diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 65f267f64ec..b1ce85770e5 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -1,61 +1,50 @@ -name: BUG 提交 -description: 提交产品缺陷帮助我们更好的改进 -title: "[BUG]" -labels: "类型: 缺陷" -assignees: baixin513 +name: 'Bug Report' +description: 'Report an Bug' +title: "[Bug] " +assignees: zyyfit body: - type: markdown - id: contacts_title attributes: - value: "## 联系方式" + value: "## Contact Information" - type: input - id: contacts validations: required: false attributes: - label: "联系方式" - description: "可以快速联系到您的方式:交流群号及昵称、邮箱等" + label: "Contact Information" + description: "The ways to quickly contact you: WeChat group number and nickname, email, etc." - type: markdown - id: environment attributes: - value: "## 环境信息" + value: "## Environment Information" - type: input - id: version validations: required: true attributes: - label: "MaxKB 版本" - description: "登录 MaxKB Web 控制台,在右上角关于页面查看当前版本。" + label: "MaxKB Version" + description: "Log in to the MaxKB Web Console and check the current version on the `About` page in the top right corner." - type: markdown - id: details attributes: - value: "## 详细信息" + value: "## Detailed information" - type: textarea - id: what-happened attributes: - label: "问题描述" - description: "简要描述您碰到的问题" + label: "Problem Description" + description: "Briefly describe the issue you’ve encountered." validations: required: true - type: textarea - id: how-happened attributes: - label: "重现步骤" - description: "如果操作可以重现该问题" + label: "Steps to Reproduce" + description: "How can this issue be reproduced." validations: required: true - type: textarea - id: expect attributes: - label: "期待的正确结果" + label: "The expected correct result" - type: textarea - id: logs attributes: - label: "相关日志输出" - description: "请复制并粘贴任何相关的日志输出。 这将自动格式化为代码,因此无需反引号。" + label: "Related log output" + description: "Please paste any relevant log output here. It will automatically be formatted as code, so no backticks are necessary." render: shell - type: textarea - id: additional-information attributes: - label: "附加信息" - description: "如果你还有其他需要提供的信息,可以在这里填写(可以提供截图、视频等)。" + label: "Additional Information" + description: "If you have any additional information to provide, you can include it here (screenshots, videos, etc., are welcome)." diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index cd5a52fa608..ab410a894f6 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,5 @@ blank_issues_enabled: false contact_links: - - name: 对 MaxKB 项目有其他问题 - url: https://bbs.fit2cloud.com/c/mk/11 - about: 如果你对 MaxKB 有其他想要提问的,我们欢迎到我们的官方社区进行提问。 \ No newline at end of file + - name: Questions & Discussions + url: https://github.com/1Panel-dev/MaxKB/discussions + about: Raise questions about the installation, deployment, use and other aspects of the project. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml index 3c015c4e072..2f943cba934 100644 --- a/.github/ISSUE_TEMPLATE/feature.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -1,36 +1,29 @@ -name: 需求建议 -description: 提出针对本项目的想法和建议 -title: "[FEATURE]" -labels: enhancement +name: 'Feature Request' +description: 'Suggest an idea' +title: '[Feature] ' assignees: baixin513 body: - type: markdown - id: environment attributes: - value: "## 环境信息" + value: "## Environment Information" - type: input - id: version validations: required: true attributes: - label: "MaxKB 版本" - description: "登录 MaxKB Web 控制台,在右上角关于页面查看当前版本。" + label: "MaxKB Version" + description: "Log in to the MaxKB Web Console and check the current version on the `About` page in the top right corner." - type: markdown - id: details attributes: - value: "## 详细信息" + value: "## Detailed information" - type: textarea - id: description attributes: - label: "请描述您的需求或者改进建议" + label: "Please describe your needs or suggestions for improvements" validations: required: true - type: textarea - id: solution attributes: - label: "请描述你建议的实现方案" + label: "Please describe the solution you suggest" - type: textarea - id: additional-information attributes: - label: "附加信息" - description: "如果你还有其他需要提供的信息,可以在这里填写(可以提供截图、视频等)。" \ No newline at end of file + label: "Additional Information" + description: "If you have any additional information to provide, you can include it here (screenshots, videos, etc., are welcome)." \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..a6a922a221b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,17 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + timezone: "Asia/Shanghai" + day: "friday" + target-branch: "v2" + groups: + python-dependencies: + patterns: + - "*" +# ignore: +# - dependency-name: "pymupdf" +# versions: ["*"] + diff --git a/.github/workflows/build-and-push-python-pg.yml b/.github/workflows/build-and-push-python-pg.yml index 1eb12cf415b..bc4dc3f2c77 100644 --- a/.github/workflows/build-and-push-python-pg.yml +++ b/.github/workflows/build-and-push-python-pg.yml @@ -14,7 +14,7 @@ on: - linux/amd64,linux/arm64 jobs: build-and-push-python-pg-to-ghcr: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Check Disk Space run: df -h @@ -39,7 +39,7 @@ jobs: run: | DOCKER_IMAGE=ghcr.io/1panel-dev/maxkb-python-pg DOCKER_PLATFORMS=${{ github.event.inputs.architecture }} - TAG_NAME=python3.11-pg15.6 + TAG_NAME=python3.11-pg15.8 DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest" echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=version::${TAG_NAME} @@ -50,6 +50,9 @@ jobs: ${DOCKER_IMAGE_TAGS} . - name: Set up QEMU uses: docker/setup-qemu-action@v3 + with: + # Until https://github.com/tonistiigi/binfmt/issues/215 + image: tonistiigi/binfmt:qemu-v7.0.0-28 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry diff --git a/.github/workflows/build-and-push-vector-model.yml b/.github/workflows/build-and-push-vector-model.yml index a054bc7b348..ff667f15850 100644 --- a/.github/workflows/build-and-push-vector-model.yml +++ b/.github/workflows/build-and-push-vector-model.yml @@ -19,7 +19,7 @@ on: jobs: build-and-push-vector-model-to-ghcr: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Check Disk Space run: df -h @@ -55,6 +55,9 @@ jobs: ${DOCKER_IMAGE_TAGS} . - name: Set up QEMU uses: docker/setup-qemu-action@v3 + with: + # Until https://github.com/tonistiigi/binfmt/issues/215 + image: tonistiigi/binfmt:qemu-v7.0.0-28 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry diff --git a/.github/workflows/build-and-push.yml b/.github/workflows/build-and-push.yml index 602b51387d0..1e1daf2696c 100644 --- a/.github/workflows/build-and-push.yml +++ b/.github/workflows/build-and-push.yml @@ -1,12 +1,19 @@ name: build-and-push +run-name: 构建镜像并推送仓库 ${{ github.event.inputs.dockerImageTag }} (${{ github.event.inputs.registry }}) + on: workflow_dispatch: inputs: dockerImageTag: - description: 'Docker Image Tag' - default: 'v1.1.0-dev' + description: 'Image Tag' + default: 'v1.10.7-dev' + required: true + dockerImageTagWithLatest: + description: '是否发布latest tag(正式发版时选择,测试版本切勿选择)' + default: false required: true + type: boolean architecture: description: 'Architecture' required: true @@ -19,11 +26,11 @@ on: registry: description: 'Push To Registry' required: true - default: 'dockerhub' + default: 'fit2cloud-registry' type: choice options: - - dockerhub - fit2cloud-registry + - dockerhub - dockerhub, fit2cloud-registry jobs: @@ -52,16 +59,17 @@ jobs: - name: Prepare id: prepare run: | - DOCKER_IMAGE=registry-hkproxy.fit2cloud.com/maxkb/maxkb + DOCKER_IMAGE=${{ secrets.FIT2CLOUD_REGISTRY_HOST }}/maxkb/maxkb DOCKER_PLATFORMS=${{ github.event.inputs.architecture }} TAG_NAME=${{ github.event.inputs.dockerImageTag }} - if [[ ${TAG_NAME} == *dev* ]]; then - DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}" + TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }} + if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then + DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}" else - DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest" + DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}" fi - echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \ - --build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \ + echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \ + --build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \ ${DOCKER_IMAGE_TAGS} . - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -76,11 +84,12 @@ jobs: - name: Login to FIT2CLOUD Registry uses: docker/login-action@v3 with: - registry: registry-hkproxy.fit2cloud.com + registry: ${{ secrets.FIT2CLOUD_REGISTRY_HOST }} username: ${{ secrets.FIT2CLOUD_REGISTRY_USERNAME }} password: ${{ secrets.FIT2CLOUD_REGISTRY_PASSWORD }} - name: Docker Buildx (build-and-push) run: | + sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile build-and-push-to-dockerhub: @@ -110,14 +119,15 @@ jobs: run: | DOCKER_IMAGE=1panel/maxkb DOCKER_PLATFORMS=${{ github.event.inputs.architecture }} - TAG_NAME=${{ github.event.inputs.dockerImageTag }} - if [[ ${TAG_NAME} == *dev* ]]; then - DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}" + TAG_NAME=${{ github.event.inputs.dockerImageTag }} + TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }} + if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then + DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}" else - DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest" + DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}" fi - echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \ - --build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \ + echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \ + --build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \ ${DOCKER_IMAGE_TAGS} . - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -136,4 +146,5 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Docker Buildx (build-and-push) run: | + sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile diff --git a/.github/workflows/create-pr-from-push.yml b/.github/workflows/create-pr-from-push.yml new file mode 100644 index 00000000000..3e5ed9137db --- /dev/null +++ b/.github/workflows/create-pr-from-push.yml @@ -0,0 +1,17 @@ +on: + push: + branches: + - 'pr@**' + - 'repr@**' + +name: 针对特定分支名自动创建 PR + +jobs: + generic_handler: + name: 自动创建 PR + runs-on: ubuntu-latest + steps: + - name: Create pull request + uses: jumpserver/action-generic-handler@master + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} diff --git a/.github/workflows/issue-translator.yml b/.github/workflows/issue-translator.yml new file mode 100644 index 00000000000..67c4e8b6be3 --- /dev/null +++ b/.github/workflows/issue-translator.yml @@ -0,0 +1,14 @@ +name: Issue Translator +on: + issue_comment: + types: [created] + issues: + types: [opened] +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: usthe/issues-translate-action@v2.7 + with: + IS_MODIFY_TITLE: true + BOT_GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }} diff --git a/.github/workflows/llm-code-review.yml b/.github/workflows/llm-code-review.yml new file mode 100644 index 00000000000..df8b6b2de62 --- /dev/null +++ b/.github/workflows/llm-code-review.yml @@ -0,0 +1,28 @@ +name: LLM Code Review + +permissions: + contents: read + pull-requests: write + +on: + pull_request: + types: [opened, reopened, synchronize] + +jobs: + llm-code-review: + runs-on: ubuntu-latest + steps: + - uses: fit2cloud/LLM-CodeReview-Action@main + env: + GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }} + OPENAI_API_KEY: ${{ secrets.ALIYUN_LLM_API_KEY }} + LANGUAGE: English + OPENAI_API_ENDPOINT: https://dashscope.aliyuncs.com/compatible-mode/v1 + MODEL: qwen2.5-coder-3b-instruct + PROMPT: "Please check the following code for any irregularities, potential issues, or optimization suggestions, and provide your answers in English." + top_p: 1 + temperature: 1 + # max_tokens: 10000 + MAX_PATCH_LENGTH: 10000 + IGNORE_PATTERNS: "/node_modules,*.md,/dist,/.github" + FILE_PATTERNS: "*.java,*.go,*.py,*.vue,*.ts,*.js,*.css,*.scss,*.html" diff --git a/.github/workflows/typos_check.yml b/.github/workflows/typos_check.yml index 099b219d3e8..0acbca910d6 100644 --- a/.github/workflows/typos_check.yml +++ b/.github/workflows/typos_check.yml @@ -1,5 +1,10 @@ name: Typos Check -on: [push, pull_request] +on: + push: + branches: + - main + pull_request: + types: [opened, synchronize, reopened] jobs: run: diff --git a/.gitignore b/.gitignore index c6d277f80f0..659c32f5f4c 100644 --- a/.gitignore +++ b/.gitignore @@ -178,6 +178,10 @@ ui/node_modules ui/dist apps/static models/ +apps/xpack +!apps/**/models/ data .dev poetry.lock +apps/setting/models_provider/impl/*/icon/ +tmp/ \ No newline at end of file diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000000..3c485ffe780 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,4 @@ +[files] +extend-exclude = [ + 'apps/setting/models_provider/impl/*/icon/*' +] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 830ab828b5c..d7663b476c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,10 @@ # Contributing +As a contributor, you should agree that: + +- The producer can adjust the open-source agreement to be more strict or relaxed as deemed necessary. +- Your contributed code may be used for commercial purposes, including but not limited to its cloud business operations. + ## Create pull request PR are always welcome, even if they only contain small fixes like typos or a few lines of code. If there will be a significant effort, please document it as an issue and get a discussion going before starting to work on it. @@ -22,4 +27,4 @@ When reporting issues, always include: * Snapshots or log files if needed Because the issues are open to the public, when submitting files, be sure to remove any sensitive information, e.g. user name, password, IP address, and company name. You can -replace those parts with "REDACTED" or other strings like "****". \ No newline at end of file +replace those parts with "REDACTED" or other strings like "****". diff --git a/README.md b/README.md index aab4c89b8d5..7acd92c539c 100644 --- a/README.md +++ b/README.md @@ -1,79 +1,126 @@

MaxKB

-

基于 LLM 大语言模型的知识库问答系统

+

Open-source platform for building enterprise-grade agents

+

强大易用的企业级智能体平台

+

1Panel-dev%2FMaxKB | Trendshift

License: GPL v3 - Codacy Latest release Stars - Download + Download
+ [中文(简体)] | [English]


-MaxKB 是一款基于 LLM 大语言模型的知识库问答系统。MaxKB = Max Knowledge Base,旨在成为企业的最强大脑。 +MaxKB = Max Knowledge Brain, it is an open-source platform for building enterprise-grade agents. MaxKB integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education. -- **开箱即用**:支持直接上传文档、自动爬取在线文档,支持文本自动拆分、向量化、RAG(检索增强生成),智能问答交互体验好; -- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统; -- **多模型支持**:支持对接主流的大模型,包括 Ollama 本地私有大模型(如 Meta Llama 3、qwen 等)、通义千问、OpenAI、Azure OpenAI、Kimi、智谱 AI、讯飞星火和百度千帆大模型等。 +- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience. +- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios. +- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction. +- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.). +- **Multi Modal**: Native support for input and output text, image, audio and video. -## 快速开始 +## Quick start -``` -docker run -d --name=maxkb -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data 1panel/maxkb +Execute the script below to start a MaxKB container using Docker: -# 用户名: admin -# 密码: MaxKB@123.. +```bash +docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages 1panel/maxkb ``` -你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB + Ollama + Llama 2,30 分钟内即可上线基于本地大模型的知识库问答系统,并嵌入到第三方业务系统中。 - -你也可以在线体验:[DataEase 小助手](https://dataease.io/docs/v2/),它是基于 MaxKB 搭建的智能问答系统,已经嵌入到 DataEase 产品及在线文档中。 +Access MaxKB web interface at `http://your_server_ip:8080` with default admin credentials: -如你有更多问题,可以查看使用手册,或者通过论坛与我们交流。 +- username: admin +- password: MaxKB@123.. -- [使用手册](https://github.com/1Panel-dev/MaxKB/wiki/1-%E5%AE%89%E8%A3%85%E9%83%A8%E7%BD%B2) -- [演示视频](https://www.bilibili.com/video/BV1BE421M7YM/) -- [论坛求助](https://bbs.fit2cloud.com/c/mk/11) -- 技术交流群 - +中国用户如遇到 Docker 镜像 Pull 失败问题,请参照该 [离线安装文档](https://maxkb.cn/docs/installation/offline_installtion/) 进行安装。 -## UI 展示 +## Screenshots - - + + - - + +
MaxKB Demo1MaxKB Demo2MaxKB Demo1MaxKB Demo2
MaxKB Demo3MaxKB Demo4MaxKB Demo3MaxKB Demo4
-## 技术栈 +## Technical stack -- 前端:[Vue.js](https://cn.vuejs.org/) -- 后端:[Python / Django](https://www.djangoproject.com/) -- LangChain:[LangChain](https://www.langchain.com/) -- 向量数据库:[PostgreSQL / pgvector](https://www.postgresql.org/) -- 大模型:Azure OpenAI、OpenAI、百度千帆大模型、[Ollama](https://github.com/ollama/ollama)、通义千问、Kimi、智谱 AI、讯飞星火 +- Frontend:[Vue.js](https://vuejs.org/) +- Backend:[Python / Django](https://www.djangoproject.com/) +- LLM Framework:[LangChain](https://www.langchain.com/) +- Database:[PostgreSQL + pgvector](https://www.postgresql.org/) -## Star History +## Feature Comparison -[![Star History Chart](https://api.star-history.com/svg?repos=1Panel-dev/MaxKB&type=Date)](https://star-history.com/#1Panel-dev/MaxKB&Date) - -## 我们的其他明星开源项目 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureLangChainDify.AIFlowiseMaxKB
(Built upon LangChain)
Supported LLMsRich VarietyRich VarietyRich VarietyRich Variety
RAG Engine
Agent
Workflow
Observability
SSO/Access control✅ (Pro)
On-premise Deployment
-- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板 -- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具 -- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机 -- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具 -- [MeterSphere](https://github.com/metersphere/metersphere/) - 现代化、开源的测试管理及接口测试工具 +## Star History +[![Star History Chart](https://api.star-history.com/svg?repos=1Panel-dev/MaxKB&type=Date)](https://star-history.com/#1Panel-dev/MaxKB&Date) ## License -Copyright (c) 2014-2024 飞致云 FIT2CLOUD, All rights reserved. - Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/README_CN.md b/README_CN.md new file mode 100644 index 00000000000..aec9379eea8 --- /dev/null +++ b/README_CN.md @@ -0,0 +1,89 @@ +

MaxKB

+

强大易用的企业级智能体平台

+

+ 1Panel-dev%2FMaxKB | Trendshift +

+

+ English README + License: GPL v3 + Latest release + Stars + Download + Gitee Stars + GitCode Stars +

+
+ +MaxKB = Max Knowledge Brain,是一款强大易用的企业级智能体平台,支持 RAG 检索增强生成、工作流编排、MCP 工具调用能力。MaxKB 支持对接各种主流大语言模型,广泛应用于智能客服、企业内部知识库问答、员工助手、学术研究与教育等场景。 + +- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果; +- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求; +- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度; +- **模型中立**:支持对接各种大模型,包括本地私有大模型(DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等)和国外公共大模型(OpenAI / Claude / Gemini 等)。 + +MaxKB 三分钟视频介绍:https://www.bilibili.com/video/BV18JypYeEkj/ + +## 快速开始 + +``` +# Linux 机器 +docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb + +# Windows 机器 +docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/postgresql/data -v C:/python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb + +# 用户名: admin +# 密码: MaxKB@123.. +``` + +- 你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB; +- 如果是内网环境,推荐使用 [离线安装包](https://community.fit2cloud.com/#/products/maxkb/downloads) 进行安装部署; +- MaxKB 产品版本分为社区版和专业版,详情请参见:[MaxKB 产品版本对比](https://maxkb.cn/pricing.html); +- 如果您需要向团队介绍 MaxKB,可以使用这个 [官方 PPT 材料](https://maxkb.cn/download/introduce-maxkb_202503.pdf)。 + +如你有更多问题,可以查看使用手册,或者通过论坛与我们交流。 + +- [案例展示](USE-CASES.md) +- [使用手册](https://maxkb.cn/docs/) +- [论坛求助](https://bbs.fit2cloud.com/c/mk/11) +- 技术交流群 + + + +## UI 展示 + + + + + + + + + + +
MaxKB Demo1MaxKB Demo2
MaxKB Demo3MaxKB Demo4
+ +## 技术栈 + +- 前端:[Vue.js](https://cn.vuejs.org/) +- 后端:[Python / Django](https://www.djangoproject.com/) +- LangChain:[LangChain](https://www.langchain.com/) +- 向量数据库:[PostgreSQL / pgvector](https://www.postgresql.org/) + +## 飞致云的其他明星项目 + +- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板 +- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机 +- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具 +- [MeterSphere](https://github.com/metersphere/metersphere/) - 新一代的开源持续测试工具 +- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具 + +## License + +Copyright (c) 2014-2025 飞致云 FIT2CLOUD, All rights reserved. + +Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/USE-CASES.md b/USE-CASES.md new file mode 100644 index 00000000000..4448488e912 --- /dev/null +++ b/USE-CASES.md @@ -0,0 +1,39 @@ +

MaxKB 应用案例,持续更新中...

+ +------------------------------ + +- [MaxKB 应用案例:中国农业大学-小鹉哥](https://mp.weixin.qq.com/s/4g_gySMBQZCJ9OZ-yBkmvw) +- [MaxKB 应用案例:东北财经大学-小银杏](https://mp.weixin.qq.com/s/3BoxkY7EMomMmmvFYxvDIA) +- [MaxKB 应用案例:中铁水务](https://mp.weixin.qq.com/s/voNAddbK2CJOrJJs1ewZ8g) +- [MaxKB 应用案例:解放军总医院](https://mp.weixin.qq.com/s/ETrZC-vrA4Aap0eF-15EeQ) +- [MaxKB 应用案例:无锡市数据局](https://mp.weixin.qq.com/s/enfUFLevvL_La74PQ0kIXw) +- [MaxKB 应用案例:中核西仪研究院-西仪睿答](https://mp.weixin.qq.com/s/CbKr4mev8qahKLAtV6Dxdg) +- [MaxKB 应用案例:南京中医药大学](https://mp.weixin.qq.com/s/WUmAKYbZjp3272HIecpRFA) +- [MaxKB 应用案例:西北电力设计院-AI数字助理Memex](https://mp.weixin.qq.com/s/ezHFdB7C7AVL9MTtDwYGSA) +- [MaxKB 应用案例:西安国际医院中心医院-国医小助](https://mp.weixin.qq.com/s/DSOUvwrQrxbqQxKBilTCFQ) +- [MaxKB 应用案例:华莱士智能AI客服助手上线啦!](https://www.bilibili.com/video/BV1hQtVeXEBL) +- [MaxKB 应用案例:把医疗行业知识转化为知识库问答助手!](https://www.bilibili.com/video/BV157wme9EgB) +- [MaxKB 应用案例:会展AI智能客服体验](https://www.bilibili.com/video/BV1J7BqY6EKA) +- [MaxKB 应用案例:孩子要上幼儿园了,AI 智能助手择校好帮手](https://www.bilibili.com/video/BV1wKrhYvEer) +- [MaxKB 应用案例:产品使用指南AI助手,新手小白也能轻松搞定!](https://www.bilibili.com/video/BV1Yz6gYtEqX) +- [MaxKB 应用案例:生物医药AI客服智能体验!](https://www.bilibili.com/video/BV13JzvYsE3e) +- [MaxKB 应用案例:高校行政管理AI小助手](https://www.bilibili.com/video/BV1yvBMYvEdy) +- [MaxKB 应用案例:岳阳市人民医院-OA小助手](https://mp.weixin.qq.com/s/O94Qo3UH-MiUtDdWCVg8sQ) +- [MaxKB 应用案例:常熟市第一人民医院](https://mp.weixin.qq.com/s/s5XXGTR3_MUo41NbJ8WzZQ) +- [MaxKB 应用案例:华北水利水电大学](https://mp.weixin.qq.com/s/PoOFAcMCr9qJdvSj8c08qg) +- [MaxKB 应用案例:唐山海事局-“小海”AI语音助手](https://news.qq.com/rain/a/20250223A030BE00) +- [MaxKB 应用案例:湖南汉寿政务](http://hsds.hsdj.gov.cn:19999/ui/chat/a2c976736739aadc) +- [MaxKB 应用案例:广州市妇女儿童医疗中心-AI医疗数据分类分级小助手](https://mp.weixin.qq.com/s/YHUMkUOAaUomBV8bswpK3g) +- [MaxKB 应用案例:苏州热工研究院有限公司-维修大纲评估质量自查AI小助手](https://mp.weixin.qq.com/s/Ts5FQdnv7Tu9Jp7bvofCVA) +- [MaxKB 应用案例:国核自仪系统工程有限公司-NuCON AI帮](https://mp.weixin.qq.com/s/HNPc7u5xVfGLJr8IQz3vjQ) +- [MaxKB 应用案例:深圳通开启Deep Seek智能应用新篇章](https://mp.weixin.qq.com/s/SILN0GSescH9LyeQqYP0VQ) +- [MaxKB 应用案例:南通智慧出行领跑长三角!首款接入DeepSeek的"畅行南通"APP上线AI新场景](https://mp.weixin.qq.com/s/WEC9UQ6msY0VS8LhTZh-Ew) +- [MaxKB 应用案例:中船动力人工智能"智慧动力云助手"及首批数字员工正式上线](https://mp.weixin.qq.com/s/OGcEkjh9DzGO1Tkc9nr7qg) +- [MaxKB 应用案例:AI+矿山:DeepSeek助力绿色智慧矿山智慧“升级”](https://mp.weixin.qq.com/s/SZstxTvVoLZg0ECbZbfpIA) +- [MaxKB 应用案例:DeepSeek落地弘盛铜业:国产大模型点亮"黑灯工厂"新引擎](https://mp.weixin.qq.com/s/Eczdx574MS5RMF7WfHN7_A) +- [MaxKB 应用案例:拥抱智能时代!中国五矿以 “AI+”赋能企业发展](https://mp.weixin.qq.com/s/D5vBtlX2E81pWE3_2OgWSw) +- [MaxKB 应用案例:DeepSeek赋能中冶武勘AI智能体](https://mp.weixin.qq.com/s/8m0vxGcWXNdZazziQrLyxg) +- [MaxKB 应用案例:重磅!陕西广电网络“秦岭云”平台实现DeepSeek本地化部署](https://mp.weixin.qq.com/s/ZKmEU_wWShK1YDomKJHQeA) +- [MaxKB 应用案例:粤海集团完成DeepSeek私有化部署,助力集团智能化管理](https://mp.weixin.qq.com/s/2JbVp0-kr9Hfp-0whH4cvg) +- [MaxKB 应用案例:建筑材料工业信息中心完成DeepSeek本地化部署,推动行业数智化转型新发展](https://mp.weixin.qq.com/s/HThGSnND3qDF8ySEqiM4jw) +- [MaxKB 应用案例:一起DeepSeek!福建设计以AI大模型开启新篇章](https://mp.weixin.qq.com/s/m67e-H7iQBg3d24NM82UjA) diff --git a/apps/application/chat_pipeline/I_base_chat_pipeline.py b/apps/application/chat_pipeline/I_base_chat_pipeline.py index 4c894ddbd47..a35bdc39c7f 100644 --- a/apps/application/chat_pipeline/I_base_chat_pipeline.py +++ b/apps/application/chat_pipeline/I_base_chat_pipeline.py @@ -19,7 +19,7 @@ class ParagraphPipelineModel: def __init__(self, _id: str, document_id: str, dataset_id: str, content: str, title: str, status: str, is_active: bool, comprehensive_score: float, similarity: float, dataset_name: str, document_name: str, - hit_handling_method: str, directly_return_similarity: float): + hit_handling_method: str, directly_return_similarity: float, meta: dict = None): self.id = _id self.document_id = document_id self.dataset_id = dataset_id @@ -33,6 +33,7 @@ def __init__(self, _id: str, document_id: str, dataset_id: str, content: str, ti self.document_name = document_name self.hit_handling_method = hit_handling_method self.directly_return_similarity = directly_return_similarity + self.meta = meta def to_dict(self): return { @@ -46,7 +47,8 @@ def to_dict(self): 'comprehensive_score': self.comprehensive_score, 'similarity': self.similarity, 'dataset_name': self.dataset_name, - 'document_name': self.document_name + 'document_name': self.document_name, + 'meta': self.meta, } class builder: @@ -58,6 +60,7 @@ def __init__(self): self.dataset_name = None self.hit_handling_method = None self.directly_return_similarity = 0.9 + self.meta = {} def add_paragraph(self, paragraph): if isinstance(paragraph, Paragraph): @@ -97,6 +100,10 @@ def add_similarity(self, similarity: float): self.similarity = similarity return self + def add_meta(self, meta: dict): + self.meta = meta + return self + def build(self): return ParagraphPipelineModel(str(self.paragraph.get('id')), str(self.paragraph.get('document_id')), str(self.paragraph.get('dataset_id')), @@ -104,7 +111,8 @@ def build(self): self.paragraph.get('status'), self.paragraph.get('is_active'), self.comprehensive_score, self.similarity, self.dataset_name, - self.document_name, self.hit_handling_method, self.directly_return_similarity) + self.document_name, self.hit_handling_method, self.directly_return_similarity, + self.meta) class IBaseChatPipelineStep: diff --git a/apps/application/chat_pipeline/pipeline_manage.py b/apps/application/chat_pipeline/pipeline_manage.py index 37d7736b505..7c4acb3a34a 100644 --- a/apps/application/chat_pipeline/pipeline_manage.py +++ b/apps/application/chat_pipeline/pipeline_manage.py @@ -11,14 +11,18 @@ from typing import List, Type, Dict from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep +from common.handle.base_to_response import BaseToResponse +from common.handle.impl.response.system_to_response import SystemToResponse class PipelineManage: - def __init__(self, step_list: List[Type[IBaseChatPipelineStep]]): + def __init__(self, step_list: List[Type[IBaseChatPipelineStep]], + base_to_response: BaseToResponse = SystemToResponse()): # 步骤执行器 self.step_list = [step() for step in step_list] # 上下文 self.context = {'message_tokens': 0, 'answer_tokens': 0} + self.base_to_response = base_to_response def run(self, context: Dict = None): self.context['start_time'] = time.time() @@ -33,13 +37,21 @@ def get_details(self): filter(lambda r: r is not None, [row.get_details(self) for row in self.step_list])], {}) + def get_base_to_response(self): + return self.base_to_response + class builder: def __init__(self): self.step_list: List[Type[IBaseChatPipelineStep]] = [] + self.base_to_response = SystemToResponse() def append_step(self, step: Type[IBaseChatPipelineStep]): self.step_list.append(step) return self + def add_base_to_response(self, base_to_response: BaseToResponse): + self.base_to_response = base_to_response + return self + def build(self): - return PipelineManage(step_list=self.step_list) + return PipelineManage(step_list=self.step_list, base_to_response=self.base_to_response) diff --git a/apps/application/chat_pipeline/step/chat_step/i_chat_step.py b/apps/application/chat_pipeline/step/chat_step/i_chat_step.py index 8fbac34c9a6..2673c6b7bbd 100644 --- a/apps/application/chat_pipeline/step/chat_step/i_chat_step.py +++ b/apps/application/chat_pipeline/step/chat_step/i_chat_step.py @@ -9,6 +9,7 @@ from abc import abstractmethod from typing import Type, List +from django.utils.translation import gettext_lazy as _ from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseMessage from rest_framework import serializers @@ -23,7 +24,7 @@ class ModelField(serializers.Field): def to_internal_value(self, data): if not isinstance(data, BaseChatModel): - self.fail('模型类型错误', value=data) + self.fail(_('Model type error'), value=data) return data def to_representation(self, value): @@ -33,7 +34,7 @@ def to_representation(self, value): class MessageField(serializers.Field): def to_internal_value(self, data): if not isinstance(data, BaseMessage): - self.fail('message类型错误', value=data) + self.fail(_('Message type error'), value=data) return data def to_representation(self, value): @@ -52,33 +53,42 @@ class IChatStep(IBaseChatPipelineStep): class InstanceSerializer(serializers.Serializer): # 对话列表 message_list = serializers.ListField(required=True, child=MessageField(required=True), - error_messages=ErrMessage.list("对话列表")) - # 大语言模型 - chat_model = ModelField(required=False, allow_null=True, error_messages=ErrMessage.list("大语言模型")) + error_messages=ErrMessage.list(_("Conversation list"))) + model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Model id"))) # 段落列表 - paragraph_list = serializers.ListField(error_messages=ErrMessage.list("段落列表")) + paragraph_list = serializers.ListField(error_messages=ErrMessage.list(_("Paragraph List"))) # 对话id - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) # 用户问题 - problem_text = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户问题")) + problem_text = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("User Questions"))) # 后置处理器 post_response_handler = InstanceField(model_type=PostResponseHandler, - error_messages=ErrMessage.base("用户问题")) + error_messages=ErrMessage.base(_("Post-processor"))) # 补全问题 - padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.base("补全问题")) + padding_problem_text = serializers.CharField(required=False, + error_messages=ErrMessage.base(_("Completion Question"))) # 是否使用流的形式输出 - stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base("流式输出")) - client_id = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端id")) - client_type = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端类型")) + stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base(_("Streaming Output"))) + client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id"))) + client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type"))) # 未查询到引用分段 - no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base("无引用分段设置")) + no_references_setting = NoReferencesSetting(required=True, + error_messages=ErrMessage.base(_("No reference segment settings"))) + + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + + model_setting = serializers.DictField(required=True, allow_null=True, + error_messages=ErrMessage.dict(_("Model settings"))) + + model_params_setting = serializers.DictField(required=False, allow_null=True, + error_messages=ErrMessage.dict(_("Model parameter settings"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) message_list: List = self.initial_data.get('message_list') for message in message_list: if not isinstance(message, BaseMessage): - raise Exception("message 类型错误") + raise Exception(_("message type error")) def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]: return self.InstanceSerializer @@ -91,9 +101,10 @@ def _run(self, manage: PipelineManage): def execute(self, message_list: List[BaseMessage], chat_id, problem_text, post_response_handler: PostResponseHandler, - chat_model: BaseChatModel = None, + model_id: str = None, + user_id: str = None, paragraph_list=None, manage: PipelineManage = None, padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None, - no_references_setting=None, **kwargs): + no_references_setting=None, model_params_setting=None, model_setting=None, **kwargs): pass diff --git a/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py b/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py index 8d7b9d35e8c..b03f06d80ad 100644 --- a/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py +++ b/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py @@ -6,7 +6,6 @@ @date:2024/1/9 18:25 @desc: 对话step Base实现 """ -import json import logging import time import traceback @@ -15,28 +14,44 @@ from django.db.models import QuerySet from django.http import StreamingHttpResponse +from django.utils.translation import gettext as _ from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseMessage from langchain.schema.messages import HumanMessage, AIMessage from langchain_core.messages import AIMessageChunk +from rest_framework import status from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel from application.chat_pipeline.pipeline_manage import PipelineManage from application.chat_pipeline.step.chat_step.i_chat_step import IChatStep, PostResponseHandler +from application.flow.tools import Reasoning from application.models.api_key_model import ApplicationPublicAccessClient from common.constants.authentication_type import AuthenticationType -from common.response import result +from setting.models_provider.tools import get_model_instance_by_model_user_id -def add_access_num(client_id=None, client_type=None): - if client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value: - application_public_access_client = QuerySet(ApplicationPublicAccessClient).filter(id=client_id).first() +def add_access_num(client_id=None, client_type=None, application_id=None): + if client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value and application_id is not None: + application_public_access_client = (QuerySet(ApplicationPublicAccessClient).filter(client_id=client_id, + application_id=application_id) + .first()) if application_public_access_client is not None: application_public_access_client.access_num = application_public_access_client.access_num + 1 application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1 application_public_access_client.save() +def write_context(step, manage, request_token, response_token, all_text): + step.context['message_tokens'] = request_token + step.context['answer_tokens'] = response_token + current_time = time.time() + step.context['answer_text'] = all_text + step.context['run_time'] = current_time - step.context['start_time'] + manage.context['run_time'] = current_time - manage.context['start_time'] + manage.context['message_tokens'] = manage.context['message_tokens'] + request_token + manage.context['answer_tokens'] = manage.context['answer_tokens'] + response_token + + def event_content(response, chat_id, chat_record_id, @@ -49,14 +64,54 @@ def event_content(response, problem_text: str, padding_problem_text: str = None, client_id=None, client_type=None, - is_ai_chat: bool = None): + is_ai_chat: bool = None, + model_setting=None): + if model_setting is None: + model_setting = {} + reasoning_content_enable = model_setting.get('reasoning_content_enable', False) + reasoning_content_start = model_setting.get('reasoning_content_start', '') + reasoning_content_end = model_setting.get('reasoning_content_end', '') + reasoning = Reasoning(reasoning_content_start, + reasoning_content_end) all_text = '' + reasoning_content = '' try: + response_reasoning_content = False for chunk in response: - all_text += chunk.content - yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, - 'content': chunk.content, 'is_end': False}) + "\n\n" - + reasoning_chunk = reasoning.get_reasoning_content(chunk) + content_chunk = reasoning_chunk.get('content') + if 'reasoning_content' in chunk.additional_kwargs: + response_reasoning_content = True + reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '') + else: + reasoning_content_chunk = reasoning_chunk.get('reasoning_content') + all_text += content_chunk + if reasoning_content_chunk is None: + reasoning_content_chunk = '' + reasoning_content += reasoning_content_chunk + yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node', + [], content_chunk, + False, + 0, 0, {'node_is_end': False, + 'view_type': 'many_view', + 'node_type': 'ai-chat-node', + 'real_node_id': 'ai-chat-node', + 'reasoning_content': reasoning_content_chunk if reasoning_content_enable else ''}) + reasoning_chunk = reasoning.get_end_reasoning_content() + all_text += reasoning_chunk.get('content') + reasoning_content_chunk = "" + if not response_reasoning_content: + reasoning_content_chunk = reasoning_chunk.get( + 'reasoning_content') + yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node', + [], reasoning_chunk.get('content'), + False, + 0, 0, {'node_is_end': False, + 'view_type': 'many_view', + 'node_type': 'ai-chat-node', + 'real_node_id': 'ai-chat-node', + 'reasoning_content' + : reasoning_content_chunk if reasoning_content_enable else ''}) # 获取token if is_ai_chat: try: @@ -68,23 +123,35 @@ def event_content(response, else: request_token = 0 response_token = 0 - step.context['message_tokens'] = request_token - step.context['answer_tokens'] = response_token - current_time = time.time() - step.context['answer_text'] = all_text - step.context['run_time'] = current_time - step.context['start_time'] - manage.context['run_time'] = current_time - manage.context['start_time'] - manage.context['message_tokens'] = manage.context['message_tokens'] + request_token - manage.context['answer_tokens'] = manage.context['answer_tokens'] + response_token + write_context(step, manage, request_token, response_token, all_text) + asker = manage.context.get('form_data', {}).get('asker', None) post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text, - all_text, manage, step, padding_problem_text, client_id) - yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, - 'content': '', 'is_end': True}) + "\n\n" - add_access_num(client_id, client_type) + all_text, manage, step, padding_problem_text, client_id, + reasoning_content=reasoning_content if reasoning_content_enable else '' + , asker=asker) + yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node', + [], '', True, + request_token, response_token, + {'node_is_end': True, 'view_type': 'many_view', + 'node_type': 'ai-chat-node'}) + add_access_num(client_id, client_type, manage.context.get('application_id')) except Exception as e: logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') - yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, - 'content': '异常' + str(e), 'is_end': True}) + "\n\n" + all_text = 'Exception:' + str(e) + write_context(step, manage, 0, 0, all_text) + asker = manage.context.get('form_data', {}).get('asker', None) + post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text, + all_text, manage, step, padding_problem_text, client_id, reasoning_content='', + asker=asker) + add_access_num(client_id, client_type, manage.context.get('application_id')) + yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node', + [], all_text, + False, + 0, 0, {'node_is_end': False, + 'view_type': 'many_view', + 'node_type': 'ai-chat-node', + 'real_node_id': 'ai-chat-node', + 'reasoning_content': ''}) class BaseChatStep(IChatStep): @@ -92,22 +159,29 @@ def execute(self, message_list: List[BaseMessage], chat_id, problem_text, post_response_handler: PostResponseHandler, - chat_model: BaseChatModel = None, + model_id: str = None, + user_id: str = None, paragraph_list=None, manage: PipelineManage = None, padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None, no_references_setting=None, + model_params_setting=None, + model_setting=None, **kwargs): + chat_model = get_model_instance_by_model_user_id(model_id, user_id, + **model_params_setting) if model_id is not None else None if stream: return self.execute_stream(message_list, chat_id, problem_text, post_response_handler, chat_model, paragraph_list, - manage, padding_problem_text, client_id, client_type, no_references_setting) + manage, padding_problem_text, client_id, client_type, no_references_setting, + model_setting) else: return self.execute_block(message_list, chat_id, problem_text, post_response_handler, chat_model, paragraph_list, - manage, padding_problem_text, client_id, client_type, no_references_setting) + manage, padding_problem_text, client_id, client_type, no_references_setting, + model_setting) def get_details(self, manage, **kwargs): return { @@ -134,7 +208,8 @@ def reset_message_list(message_list: List[BaseMessage], answer_text): def get_stream_result(message_list: List[BaseMessage], chat_model: BaseChatModel = None, paragraph_list=None, - no_references_setting=None): + no_references_setting=None, + problem_text=None): if paragraph_list is None: paragraph_list = [] directly_return_chunk_list = [AIMessageChunk(content=paragraph.content) @@ -144,9 +219,11 @@ def get_stream_result(message_list: List[BaseMessage], return iter(directly_return_chunk_list), False elif len(paragraph_list) == 0 and no_references_setting.get( 'status') == 'designated_answer': - return iter([AIMessageChunk(content=no_references_setting.get('value'))]), False + return iter( + [AIMessageChunk(content=no_references_setting.get('value').replace('{question}', problem_text))]), False if chat_model is None: - return iter([AIMessageChunk('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。')]), False + return iter([AIMessageChunk( + _('Sorry, the AI model is not configured. Please go to the application to set up the AI model first.'))]), False else: return chat_model.stream(message_list), True @@ -159,14 +236,15 @@ def execute_stream(self, message_list: List[BaseMessage], manage: PipelineManage = None, padding_problem_text: str = None, client_id=None, client_type=None, - no_references_setting=None): + no_references_setting=None, + model_setting=None): chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list, - no_references_setting) + no_references_setting, problem_text) chat_record_id = uuid.uuid1() r = StreamingHttpResponse( streaming_content=event_content(chat_result, chat_id, chat_record_id, paragraph_list, post_response_handler, manage, self, chat_model, message_list, problem_text, - padding_problem_text, client_id, client_type, is_ai_chat), + padding_problem_text, client_id, client_type, is_ai_chat, model_setting), content_type='text/event-stream;charset=utf-8') r['Cache-Control'] = 'no-cache' @@ -176,20 +254,21 @@ def execute_stream(self, message_list: List[BaseMessage], def get_block_result(message_list: List[BaseMessage], chat_model: BaseChatModel = None, paragraph_list=None, - no_references_setting=None): + no_references_setting=None, + problem_text=None): if paragraph_list is None: paragraph_list = [] - - directly_return_chunk_list = [AIMessage(content=paragraph.content) - for paragraph in paragraph_list if - paragraph.hit_handling_method == 'directly_return'] + directly_return_chunk_list = [AIMessageChunk(content=paragraph.content) + for paragraph in paragraph_list if ( + paragraph.hit_handling_method == 'directly_return' and paragraph.similarity >= paragraph.directly_return_similarity)] if directly_return_chunk_list is not None and len(directly_return_chunk_list) > 0: return directly_return_chunk_list[0], False elif len(paragraph_list) == 0 and no_references_setting.get( 'status') == 'designated_answer': - return AIMessage(no_references_setting.get('value')), False + return AIMessage(no_references_setting.get('value').replace('{question}', problem_text)), False if chat_model is None: - return AIMessage('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。'), False + return AIMessage( + _('Sorry, the AI model is not configured. Please go to the application to set up the AI model first.')), False else: return chat_model.invoke(message_list), True @@ -201,26 +280,55 @@ def execute_block(self, message_list: List[BaseMessage], paragraph_list=None, manage: PipelineManage = None, padding_problem_text: str = None, - client_id=None, client_type=None, no_references_setting=None): - # 调用模型 - chat_result, is_ai_chat = self.get_block_result(message_list, chat_model, paragraph_list, no_references_setting) + client_id=None, client_type=None, no_references_setting=None, + model_setting=None): + reasoning_content_enable = model_setting.get('reasoning_content_enable', False) + reasoning_content_start = model_setting.get('reasoning_content_start', '') + reasoning_content_end = model_setting.get('reasoning_content_end', '') + reasoning = Reasoning(reasoning_content_start, + reasoning_content_end) chat_record_id = uuid.uuid1() - if is_ai_chat: - request_token = chat_model.get_num_tokens_from_messages(message_list) - response_token = chat_model.get_num_tokens(chat_result.content) - else: - request_token = 0 - response_token = 0 - self.context['message_tokens'] = request_token - self.context['answer_tokens'] = response_token - current_time = time.time() - self.context['answer_text'] = chat_result.content - self.context['run_time'] = current_time - self.context['start_time'] - manage.context['run_time'] = current_time - manage.context['start_time'] - manage.context['message_tokens'] = manage.context['message_tokens'] + request_token - manage.context['answer_tokens'] = manage.context['answer_tokens'] + response_token - post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text, - chat_result.content, manage, self, padding_problem_text, client_id) - add_access_num(client_id, client_type) - return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, - 'content': chat_result.content, 'is_end': True}) + # 调用模型 + try: + chat_result, is_ai_chat = self.get_block_result(message_list, chat_model, paragraph_list, + no_references_setting, problem_text) + if is_ai_chat: + request_token = chat_model.get_num_tokens_from_messages(message_list) + response_token = chat_model.get_num_tokens(chat_result.content) + else: + request_token = 0 + response_token = 0 + write_context(self, manage, request_token, response_token, chat_result.content) + reasoning_result = reasoning.get_reasoning_content(chat_result) + reasoning_result_end = reasoning.get_end_reasoning_content() + content = reasoning_result.get('content') + reasoning_result_end.get('content') + if 'reasoning_content' in chat_result.response_metadata: + reasoning_content = chat_result.response_metadata.get('reasoning_content', '') + else: + reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get( + 'reasoning_content') + asker = manage.context.get('form_data', {}).get('asker', None) + post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text, + content, manage, self, padding_problem_text, client_id, + reasoning_content=reasoning_content if reasoning_content_enable else '', + asker=asker) + add_access_num(client_id, client_type, manage.context.get('application_id')) + return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id), + content, True, + request_token, response_token, + { + 'reasoning_content': reasoning_content if reasoning_content_enable else '', + 'answer_list': [{ + 'content': content, + 'reasoning_content': reasoning_content if reasoning_content_enable else '' + }]}) + except Exception as e: + all_text = 'Exception:' + str(e) + write_context(self, manage, 0, 0, all_text) + asker = manage.context.get('form_data', {}).get('asker', None) + post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text, + all_text, manage, self, padding_problem_text, client_id, reasoning_content='', + asker=asker) + add_access_num(client_id, client_type, manage.context.get('application_id')) + return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id), all_text, True, 0, + 0, _status=status.HTTP_500_INTERNAL_SERVER_ERROR) diff --git a/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py b/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py index ca2d00e0b59..9e23f2d6c52 100644 --- a/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py +++ b/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py @@ -9,6 +9,7 @@ from abc import abstractmethod from typing import Type, List +from django.utils.translation import gettext_lazy as _ from langchain.schema import BaseMessage from rest_framework import serializers @@ -23,24 +24,26 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep): class InstanceSerializer(serializers.Serializer): # 问题 - problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char("问题")) + problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("question"))) # 段落列表 paragraph_list = serializers.ListField(child=InstanceField(model_type=ParagraphPipelineModel, required=True), - error_messages=ErrMessage.list("段落列表")) + error_messages=ErrMessage.list(_("Paragraph List"))) # 历史对答 history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True), - error_messages=ErrMessage.list("历史对答")) + error_messages=ErrMessage.list(_("History Questions"))) # 多轮对话数量 - dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量")) + dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations"))) # 最大携带知识库段落长度 max_paragraph_char_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer( - "最大携带知识库段落长度")) + _("Maximum length of the knowledge base paragraph"))) # 模板 - prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词")) + prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word"))) + system = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("System prompt words (role)"))) # 补齐问题 - padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("补齐问题")) + padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Completion problem"))) # 未查询到引用分段 - no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base("无引用分段设置")) + no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base(_("No reference segment settings"))) def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]: return self.InstanceSerializer @@ -59,6 +62,7 @@ def execute(self, prompt: str, padding_problem_text: str = None, no_references_setting=None, + system=None, **kwargs) -> List[BaseMessage]: """ @@ -71,6 +75,7 @@ def execute(self, :param padding_problem_text 用户修改文本 :param kwargs: 其他参数 :param no_references_setting: 无引用分段设置 + :param system 系统提示称 :return: """ pass diff --git a/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py b/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py index 6664a286c54..68cfbbcb95d 100644 --- a/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py +++ b/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py @@ -9,6 +9,7 @@ from typing import List, Dict from langchain.schema import BaseMessage, HumanMessage +from langchain_core.messages import SystemMessage from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel from application.chat_pipeline.step.generate_human_message_step.i_generate_human_message_step import \ @@ -27,6 +28,7 @@ def execute(self, problem_text: str, prompt: str, padding_problem_text: str = None, no_references_setting=None, + system=None, **kwargs) -> List[BaseMessage]: prompt = prompt if (paragraph_list is not None and len(paragraph_list) > 0) else no_references_setting.get( 'value') @@ -35,6 +37,11 @@ def execute(self, problem_text: str, history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()] for index in range(start_index if start_index > 0 else 0, len(history_chat_record))] + if system is not None and len(system) > 0: + return [SystemMessage(system), *flat_map(history_message), + self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list, + no_references_setting)] + return [*flat_map(history_message), self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list, no_references_setting)] @@ -48,9 +55,9 @@ def to_human_message(prompt: str, if paragraph_list is None or len(paragraph_list) == 0: if no_references_setting.get('status') == 'ai_questioning': return HumanMessage( - content=no_references_setting.get('value').format(**{'question': problem})) + content=no_references_setting.get('value').replace('{question}', problem)) else: - return HumanMessage(content=prompt.format(**{'data': "", 'question': problem})) + return HumanMessage(content=prompt.replace('{data}', "").replace('{question}', problem)) temp_data = "" data_list = [] for p in paragraph_list: @@ -63,4 +70,4 @@ def to_human_message(prompt: str, else: data_list.append(f"{content}") data = "\n".join(data_list) - return HumanMessage(content=prompt.format(**{'data': data, 'question': problem})) + return HumanMessage(content=prompt.replace('{data}', data).replace('{question}', problem)) diff --git a/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py b/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py index ce30d96af3a..f48f5c804fd 100644 --- a/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py +++ b/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py @@ -9,12 +9,11 @@ from abc import abstractmethod from typing import Type, List -from langchain.chat_models.base import BaseChatModel +from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep from application.chat_pipeline.pipeline_manage import PipelineManage -from application.chat_pipeline.step.chat_step.i_chat_step import ModelField from application.models import ChatRecord from common.field.common import InstanceField from common.util.field_message import ErrMessage @@ -23,12 +22,16 @@ class IResetProblemStep(IBaseChatPipelineStep): class InstanceSerializer(serializers.Serializer): # 问题文本 - problem_text = serializers.CharField(required=True, error_messages=ErrMessage.float("问题文本")) + problem_text = serializers.CharField(required=True, error_messages=ErrMessage.float(_("question"))) # 历史对答 history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True), - error_messages=ErrMessage.list("历史对答")) + error_messages=ErrMessage.list(_("History Questions"))) # 大语言模型 - chat_model = ModelField(required=False, allow_null=True, error_messages=ErrMessage.base("大语言模型")) + model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Model id"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + problem_optimization_prompt = serializers.CharField(required=False, max_length=102400, + error_messages=ErrMessage.char( + _("Question completion prompt"))) def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]: return self.InstanceSerializer @@ -42,10 +45,13 @@ def _run(self, manage: PipelineManage): manage.context['problem_text'] = source_problem_text manage.context['padding_problem_text'] = padding_problem # 累加tokens - manage.context['message_tokens'] = manage.context['message_tokens'] + self.context.get('message_tokens') - manage.context['answer_tokens'] = manage.context['answer_tokens'] + self.context.get('answer_tokens') + manage.context['message_tokens'] = manage.context.get('message_tokens', 0) + self.context.get('message_tokens', + 0) + manage.context['answer_tokens'] = manage.context.get('answer_tokens', 0) + self.context.get('answer_tokens', 0) @abstractmethod - def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None, + def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None, + problem_optimization_prompt=None, + user_id=None, **kwargs): pass diff --git a/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py b/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py index c0595d590fb..ec01daa3444 100644 --- a/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py +++ b/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py @@ -8,30 +8,33 @@ """ from typing import List -from langchain.chat_models.base import BaseChatModel +from django.utils.translation import gettext as _ from langchain.schema import HumanMessage from application.chat_pipeline.step.reset_problem_step.i_reset_problem_step import IResetProblemStep from application.models import ChatRecord from common.util.split_model import flat_map +from setting.models_provider.tools import get_model_instance_by_model_user_id -prompt = ( - '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中') +prompt = _( + "() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the tag") class BaseResetProblemStep(IResetProblemStep): - def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None, + def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None, + problem_optimization_prompt=None, + user_id=None, **kwargs) -> str: + chat_model = get_model_instance_by_model_user_id(model_id, user_id) if model_id is not None else None if chat_model is None: - self.context['message_tokens'] = 0 - self.context['answer_tokens'] = 0 return problem_text start_index = len(history_chat_record) - 3 history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()] for index in range(start_index if start_index > 0 else 0, len(history_chat_record))] + reset_prompt = problem_optimization_prompt if problem_optimization_prompt else prompt message_list = [*flat_map(history_message), - HumanMessage(content=prompt.format(**{'question': problem_text}))] + HumanMessage(content=reset_prompt.replace('{question}', problem_text))] response = chat_model.invoke(message_list) padding_problem = problem_text if response.content.__contains__("") and response.content.__contains__(''): @@ -39,6 +42,9 @@ def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = Non response.content.index('') + 6:response.content.index('')] if padding_problem_data is not None and len(padding_problem_data.strip()) > 0: padding_problem = padding_problem_data + elif len(response.content) > 0: + padding_problem = response.content + try: request_token = chat_model.get_num_tokens_from_messages(message_list) response_token = chat_model.get_num_tokens(padding_problem) @@ -54,8 +60,8 @@ def get_details(self, manage, **kwargs): 'step_type': 'problem_padding', 'run_time': self.context['run_time'], 'model_id': str(manage.context['model_id']) if 'model_id' in manage.context else None, - 'message_tokens': self.context['message_tokens'], - 'answer_tokens': self.context['answer_tokens'], + 'message_tokens': self.context.get('message_tokens', 0), + 'answer_tokens': self.context.get('answer_tokens', 0), 'cost': 0, 'padding_problem_text': self.context.get('padding_problem_text'), 'problem_text': self.context.get("step_args").get('problem_text'), diff --git a/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py b/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py index 549abfaf297..7b222cbc279 100644 --- a/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py +++ b/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py @@ -11,6 +11,7 @@ from typing import List, Type from django.core import validators +from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep, ParagraphPipelineModel @@ -21,28 +22,30 @@ class ISearchDatasetStep(IBaseChatPipelineStep): class InstanceSerializer(serializers.Serializer): # 原始问题文本 - problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char("问题")) + problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("question"))) # 系统补全问题文本 - padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("系统补全问题文本")) + padding_problem_text = serializers.CharField(required=False, + error_messages=ErrMessage.char(_("System completes question text"))) # 需要查询的数据集id列表 dataset_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True), - error_messages=ErrMessage.list("数据集id列表")) + error_messages=ErrMessage.list(_("Dataset id list"))) # 需要排除的文档id exclude_document_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True), - error_messages=ErrMessage.list("排除的文档id列表")) + error_messages=ErrMessage.list(_("List of document ids to exclude"))) # 需要排除向量id exclude_paragraph_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True), - error_messages=ErrMessage.list("排除向量id列表")) + error_messages=ErrMessage.list(_("List of exclusion vector ids"))) # 需要查询的条数 top_n = serializers.IntegerField(required=True, - error_messages=ErrMessage.integer("引用分段数")) + error_messages=ErrMessage.integer(_("Reference segment number"))) # 相似度 0-1之间 similarity = serializers.FloatField(required=True, max_value=1, min_value=0, - error_messages=ErrMessage.float("引用分段数")) + error_messages=ErrMessage.float(_("Similarity"))) search_mode = serializers.CharField(required=True, validators=[ validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"), - message="类型只支持register|reset_password", code=500) - ], error_messages=ErrMessage.char("检索模式")) + message=_("The type only supports embedding|keywords|blend"), code=500) + ], error_messages=ErrMessage.char(_("Retrieval Mode"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) def get_step_serializer(self, manage: PipelineManage) -> Type[InstanceSerializer]: return self.InstanceSerializer @@ -56,6 +59,7 @@ def _run(self, manage: PipelineManage): def execute(self, problem_text: str, dataset_id_list: list[str], exclude_document_id_list: list[str], exclude_paragraph_id_list: list[str], top_n: int, similarity: float, padding_problem_text: str = None, search_mode: str = None, + user_id=None, **kwargs) -> List[ParagraphPipelineModel]: """ 关于 用户和补全问题 说明: 补全问题如果有就使用补全问题去查询 反之就用用户原始问题查询 @@ -67,6 +71,7 @@ def execute(self, problem_text: str, dataset_id_list: list[str], exclude_documen :param exclude_paragraph_id_list: 需要排除段落id :param padding_problem_text 补全问题 :param search_mode 检索模式 + :param user_id 用户id :return: 段落列表 """ pass diff --git a/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py b/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py index 3dd9f830008..6591f6d246a 100644 --- a/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py +++ b/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py @@ -10,25 +10,54 @@ from typing import List, Dict from django.db.models import QuerySet +from django.utils.translation import gettext_lazy as _ +from rest_framework.utils.formatting import lazy_format from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel from application.chat_pipeline.step.search_dataset_step.i_search_dataset_step import ISearchDatasetStep -from common.config.embedding_config import VectorStore, EmbeddingModel +from common.config.embedding_config import VectorStore, ModelManage from common.db.search import native_search from common.util.file_util import get_file_content -from dataset.models import Paragraph +from dataset.models import Paragraph, DataSet from embedding.models import SearchMode +from setting.models import Model +from setting.models_provider import get_model from smartdoc.conf import PROJECT_DIR +def get_model_by_id(_id, user_id): + model = QuerySet(Model).filter(id=_id).first() + if model is None: + raise Exception(_("Model does not exist")) + if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id): + message = lazy_format(_('No permission to use this model {model_name}'), model_name=model.name) + raise Exception(message) + return model + + +def get_embedding_id(dataset_id_list): + dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list) + if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1: + raise Exception(_("The vector model of the associated knowledge base is inconsistent and the segmentation cannot be recalled.")) + if len(dataset_list) == 0: + raise Exception(_("The knowledge base setting is wrong, please reset the knowledge base")) + return dataset_list[0].embedding_mode_id + + class BaseSearchDatasetStep(ISearchDatasetStep): def execute(self, problem_text: str, dataset_id_list: list[str], exclude_document_id_list: list[str], exclude_paragraph_id_list: list[str], top_n: int, similarity: float, padding_problem_text: str = None, search_mode: str = None, + user_id=None, **kwargs) -> List[ParagraphPipelineModel]: + if len(dataset_id_list) == 0: + return [] exec_problem_text = padding_problem_text if padding_problem_text is not None else problem_text - embedding_model = EmbeddingModel.get_embedding_model() + model_id = get_embedding_id(dataset_id_list) + model = get_model_by_id(model_id, user_id) + self.context['model_name'] = model.name + embedding_model = ModelManage.get_model(model_id, lambda _id: get_model(model)) embedding_value = embedding_model.embed_query(exec_problem_text) vector = VectorStore.get_embedding_vector() embedding_list = vector.query(exec_problem_text, embedding_value, dataset_id_list, exclude_document_id_list, @@ -53,6 +82,7 @@ def reset_paragraph(paragraph: Dict, embedding_list: List) -> ParagraphPipelineM .add_document_name(paragraph.get('document_name')) .add_hit_handling_method(paragraph.get('hit_handling_method')) .add_directly_return_similarity(paragraph.get('directly_return_similarity')) + .add_meta(paragraph.get('meta')) .build()) @staticmethod @@ -101,7 +131,7 @@ def get_details(self, manage, **kwargs): 'run_time': self.context['run_time'], 'problem_text': step_args.get( 'padding_problem_text') if 'padding_problem_text' in step_args else step_args.get('problem_text'), - 'model_name': EmbeddingModel.get_embedding_model().model_name, + 'model_name': self.context.get('model_name'), 'message_tokens': 0, 'answer_tokens': 0, 'cost': 0 diff --git a/apps/application/flow/__init__.py b/apps/application/flow/__init__.py new file mode 100644 index 00000000000..328e8f8ec5f --- /dev/null +++ b/apps/application/flow/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/6/7 14:43 + @desc: +""" diff --git a/apps/application/flow/common.py b/apps/application/flow/common.py new file mode 100644 index 00000000000..f5d4cb9b0f7 --- /dev/null +++ b/apps/application/flow/common.py @@ -0,0 +1,44 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: common.py + @date:2024/12/11 17:57 + @desc: +""" + + +class Answer: + def __init__(self, content, view_type, runtime_node_id, chat_record_id, child_node, real_node_id, + reasoning_content): + self.view_type = view_type + self.content = content + self.reasoning_content = reasoning_content + self.runtime_node_id = runtime_node_id + self.chat_record_id = chat_record_id + self.child_node = child_node + self.real_node_id = real_node_id + + def to_dict(self): + return {'view_type': self.view_type, 'content': self.content, 'runtime_node_id': self.runtime_node_id, + 'chat_record_id': self.chat_record_id, + 'child_node': self.child_node, + 'reasoning_content': self.reasoning_content, + 'real_node_id': self.real_node_id} + + +class NodeChunk: + def __init__(self): + self.status = 0 + self.chunk_list = [] + + def add_chunk(self, chunk): + self.chunk_list.append(chunk) + + def end(self, chunk=None): + if chunk is not None: + self.add_chunk(chunk) + self.status = 200 + + def is_end(self): + return self.status == 200 diff --git a/apps/application/flow/default_workflow.json b/apps/application/flow/default_workflow.json new file mode 100644 index 00000000000..48ac23c4dc6 --- /dev/null +++ b/apps/application/flow/default_workflow.json @@ -0,0 +1,451 @@ +{ + "nodes": [ + { + "id": "base-node", + "type": "base-node", + "x": 360, + "y": 2810, + "properties": { + "config": { + + }, + "height": 825.6, + "stepName": "基本信息", + "node_data": { + "desc": "", + "name": "maxkbapplication", + "prologue": "您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?" + }, + "input_field_list": [ + + ] + } + }, + { + "id": "start-node", + "type": "start-node", + "x": 430, + "y": 3660, + "properties": { + "config": { + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + }, + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "height": 276, + "stepName": "开始", + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + } + }, + { + "id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "type": "search-dataset-node", + "x": 840, + "y": 3210, + "properties": { + "config": { + "fields": [ + { + "label": "检索结果的分段列表", + "value": "paragraph_list" + }, + { + "label": "满足直接回答的分段列表", + "value": "is_hit_handling_method_list" + }, + { + "label": "检索结果", + "value": "data" + }, + { + "label": "满足直接回答的分段内容", + "value": "directly_return" + } + ] + }, + "height": 794, + "stepName": "知识库检索", + "node_data": { + "dataset_id_list": [ + + ], + "dataset_setting": { + "top_n": 3, + "similarity": 0.6, + "search_mode": "embedding", + "max_paragraph_char_number": 5000 + }, + "question_reference_address": [ + "start-node", + "question" + ], + "source_dataset_id_list": [ + + ] + } + } + }, + { + "id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "type": "condition-node", + "x": 1490, + "y": 3210, + "properties": { + "width": 600, + "config": { + "fields": [ + { + "label": "分支名称", + "value": "branch_name" + } + ] + }, + "height": 543.675, + "stepName": "判断器", + "node_data": { + "branch": [ + { + "id": "1009", + "type": "IF", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "is_hit_handling_method_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "4908", + "type": "ELSE IF 1", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "paragraph_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "161", + "type": "ELSE", + "condition": "and", + "conditions": [ + + ] + } + ] + }, + "branch_condition_list": [ + { + "index": 0, + "height": 121.225, + "id": "1009" + }, + { + "index": 1, + "height": 121.225, + "id": "4908" + }, + { + "index": 2, + "height": 44, + "id": "161" + } + ] + } + }, + { + "id": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "type": "reply-node", + "x": 2170, + "y": 2480, + "properties": { + "config": { + "fields": [ + { + "label": "内容", + "value": "answer" + } + ] + }, + "height": 378, + "stepName": "指定回复", + "node_data": { + "fields": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "directly_return" + ], + "content": "", + "reply_type": "referencing", + "is_result": true + } + } + }, + { + "id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "type": "ai-chat-node", + "x": 2160, + "y": 3200, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI 对话", + "node_data": { + "prompt": "已知信息:\n{{知识库检索.data}}\n问题:\n{{开始.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + }, + { + "id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "type": "ai-chat-node", + "x": 2160, + "y": 3970, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI 对话1", + "node_data": { + "prompt": "{{开始.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + } + ], + "edges": [ + { + "id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73", + "type": "app-edge", + "sourceNodeId": "start-node", + "targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "startPoint": { + "x": 590, + "y": 3660 + }, + "endPoint": { + "x": 680, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 590, + "y": 3660 + }, + { + "x": 700, + "y": 3660 + }, + { + "x": 570, + "y": 3210 + }, + { + "x": 680, + "y": 3210 + } + ], + "sourceAnchorId": "start-node_right", + "targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left" + }, + { + "id": "35cb86dd-f328-429e-a973-12fd7218b696", + "type": "app-edge", + "sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "startPoint": { + "x": 1000, + "y": 3210 + }, + "endPoint": { + "x": 1200, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1000, + "y": 3210 + }, + { + "x": 1110, + "y": 3210 + }, + { + "x": 1090, + "y": 3210 + }, + { + "x": 1200, + "y": 3210 + } + ], + "sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right", + "targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left" + }, + { + "id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "startPoint": { + "x": 1780, + "y": 3073.775 + }, + "endPoint": { + "x": 2010, + "y": 2480 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3073.775 + }, + { + "x": 1890, + "y": 3073.775 + }, + { + "x": 1900, + "y": 2480 + }, + { + "x": 2010, + "y": 2480 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right", + "targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left" + }, + { + "id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "startPoint": { + "x": 1780, + "y": 3203 + }, + "endPoint": { + "x": 2000, + "y": 3200 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3203 + }, + { + "x": 1890, + "y": 3203 + }, + { + "x": 1890, + "y": 3200 + }, + { + "x": 2000, + "y": 3200 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right", + "targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left" + }, + { + "id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "startPoint": { + "x": 1780, + "y": 3293.6124999999997 + }, + "endPoint": { + "x": 2000, + "y": 3970 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3970 + }, + { + "x": 2000, + "y": 3970 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right", + "targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left" + } + ] +} \ No newline at end of file diff --git a/apps/application/flow/default_workflow_en.json b/apps/application/flow/default_workflow_en.json new file mode 100644 index 00000000000..7c0194be676 --- /dev/null +++ b/apps/application/flow/default_workflow_en.json @@ -0,0 +1,451 @@ +{ + "nodes": [ + { + "id": "base-node", + "type": "base-node", + "x": 360, + "y": 2810, + "properties": { + "config": { + + }, + "height": 825.6, + "stepName": "Base", + "node_data": { + "desc": "", + "name": "maxkbapplication", + "prologue": "Hello, I am the MaxKB assistant. You can ask me about MaxKB usage issues.\n-What are the main functions of MaxKB?\n-What major language models does MaxKB support?\n-What document types does MaxKB support?" + }, + "input_field_list": [ + + ] + } + }, + { + "id": "start-node", + "type": "start-node", + "x": 430, + "y": 3660, + "properties": { + "config": { + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + }, + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "height": 276, + "stepName": "Start", + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + } + }, + { + "id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "type": "search-dataset-node", + "x": 840, + "y": 3210, + "properties": { + "config": { + "fields": [ + { + "label": "检索结果的分段列表", + "value": "paragraph_list" + }, + { + "label": "满足直接回答的分段列表", + "value": "is_hit_handling_method_list" + }, + { + "label": "检索结果", + "value": "data" + }, + { + "label": "满足直接回答的分段内容", + "value": "directly_return" + } + ] + }, + "height": 794, + "stepName": "Knowledge Search", + "node_data": { + "dataset_id_list": [ + + ], + "dataset_setting": { + "top_n": 3, + "similarity": 0.6, + "search_mode": "embedding", + "max_paragraph_char_number": 5000 + }, + "question_reference_address": [ + "start-node", + "question" + ], + "source_dataset_id_list": [ + + ] + } + } + }, + { + "id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "type": "condition-node", + "x": 1490, + "y": 3210, + "properties": { + "width": 600, + "config": { + "fields": [ + { + "label": "分支名称", + "value": "branch_name" + } + ] + }, + "height": 543.675, + "stepName": "Conditional Branch", + "node_data": { + "branch": [ + { + "id": "1009", + "type": "IF", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "is_hit_handling_method_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "4908", + "type": "ELSE IF 1", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "paragraph_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "161", + "type": "ELSE", + "condition": "and", + "conditions": [ + + ] + } + ] + }, + "branch_condition_list": [ + { + "index": 0, + "height": 121.225, + "id": "1009" + }, + { + "index": 1, + "height": 121.225, + "id": "4908" + }, + { + "index": 2, + "height": 44, + "id": "161" + } + ] + } + }, + { + "id": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "type": "reply-node", + "x": 2170, + "y": 2480, + "properties": { + "config": { + "fields": [ + { + "label": "内容", + "value": "answer" + } + ] + }, + "height": 378, + "stepName": "Specified Reply", + "node_data": { + "fields": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "directly_return" + ], + "content": "", + "reply_type": "referencing", + "is_result": true + } + } + }, + { + "id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "type": "ai-chat-node", + "x": 2160, + "y": 3200, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI Chat", + "node_data": { + "prompt": "Known information:\n{{Knowledge Search.data}}\nQuestion:\n{{Start.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + }, + { + "id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "type": "ai-chat-node", + "x": 2160, + "y": 3970, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI Chat1", + "node_data": { + "prompt": "{{Start.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + } + ], + "edges": [ + { + "id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73", + "type": "app-edge", + "sourceNodeId": "start-node", + "targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "startPoint": { + "x": 590, + "y": 3660 + }, + "endPoint": { + "x": 680, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 590, + "y": 3660 + }, + { + "x": 700, + "y": 3660 + }, + { + "x": 570, + "y": 3210 + }, + { + "x": 680, + "y": 3210 + } + ], + "sourceAnchorId": "start-node_right", + "targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left" + }, + { + "id": "35cb86dd-f328-429e-a973-12fd7218b696", + "type": "app-edge", + "sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "startPoint": { + "x": 1000, + "y": 3210 + }, + "endPoint": { + "x": 1200, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1000, + "y": 3210 + }, + { + "x": 1110, + "y": 3210 + }, + { + "x": 1090, + "y": 3210 + }, + { + "x": 1200, + "y": 3210 + } + ], + "sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right", + "targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left" + }, + { + "id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "startPoint": { + "x": 1780, + "y": 3073.775 + }, + "endPoint": { + "x": 2010, + "y": 2480 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3073.775 + }, + { + "x": 1890, + "y": 3073.775 + }, + { + "x": 1900, + "y": 2480 + }, + { + "x": 2010, + "y": 2480 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right", + "targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left" + }, + { + "id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "startPoint": { + "x": 1780, + "y": 3203 + }, + "endPoint": { + "x": 2000, + "y": 3200 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3203 + }, + { + "x": 1890, + "y": 3203 + }, + { + "x": 1890, + "y": 3200 + }, + { + "x": 2000, + "y": 3200 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right", + "targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left" + }, + { + "id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "startPoint": { + "x": 1780, + "y": 3293.6124999999997 + }, + "endPoint": { + "x": 2000, + "y": 3970 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3970 + }, + { + "x": 2000, + "y": 3970 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right", + "targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left" + } + ] +} \ No newline at end of file diff --git a/apps/application/flow/default_workflow_zh.json b/apps/application/flow/default_workflow_zh.json new file mode 100644 index 00000000000..48ac23c4dc6 --- /dev/null +++ b/apps/application/flow/default_workflow_zh.json @@ -0,0 +1,451 @@ +{ + "nodes": [ + { + "id": "base-node", + "type": "base-node", + "x": 360, + "y": 2810, + "properties": { + "config": { + + }, + "height": 825.6, + "stepName": "基本信息", + "node_data": { + "desc": "", + "name": "maxkbapplication", + "prologue": "您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?" + }, + "input_field_list": [ + + ] + } + }, + { + "id": "start-node", + "type": "start-node", + "x": 430, + "y": 3660, + "properties": { + "config": { + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + }, + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "height": 276, + "stepName": "开始", + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + } + }, + { + "id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "type": "search-dataset-node", + "x": 840, + "y": 3210, + "properties": { + "config": { + "fields": [ + { + "label": "检索结果的分段列表", + "value": "paragraph_list" + }, + { + "label": "满足直接回答的分段列表", + "value": "is_hit_handling_method_list" + }, + { + "label": "检索结果", + "value": "data" + }, + { + "label": "满足直接回答的分段内容", + "value": "directly_return" + } + ] + }, + "height": 794, + "stepName": "知识库检索", + "node_data": { + "dataset_id_list": [ + + ], + "dataset_setting": { + "top_n": 3, + "similarity": 0.6, + "search_mode": "embedding", + "max_paragraph_char_number": 5000 + }, + "question_reference_address": [ + "start-node", + "question" + ], + "source_dataset_id_list": [ + + ] + } + } + }, + { + "id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "type": "condition-node", + "x": 1490, + "y": 3210, + "properties": { + "width": 600, + "config": { + "fields": [ + { + "label": "分支名称", + "value": "branch_name" + } + ] + }, + "height": 543.675, + "stepName": "判断器", + "node_data": { + "branch": [ + { + "id": "1009", + "type": "IF", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "is_hit_handling_method_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "4908", + "type": "ELSE IF 1", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "paragraph_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "161", + "type": "ELSE", + "condition": "and", + "conditions": [ + + ] + } + ] + }, + "branch_condition_list": [ + { + "index": 0, + "height": 121.225, + "id": "1009" + }, + { + "index": 1, + "height": 121.225, + "id": "4908" + }, + { + "index": 2, + "height": 44, + "id": "161" + } + ] + } + }, + { + "id": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "type": "reply-node", + "x": 2170, + "y": 2480, + "properties": { + "config": { + "fields": [ + { + "label": "内容", + "value": "answer" + } + ] + }, + "height": 378, + "stepName": "指定回复", + "node_data": { + "fields": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "directly_return" + ], + "content": "", + "reply_type": "referencing", + "is_result": true + } + } + }, + { + "id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "type": "ai-chat-node", + "x": 2160, + "y": 3200, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI 对话", + "node_data": { + "prompt": "已知信息:\n{{知识库检索.data}}\n问题:\n{{开始.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + }, + { + "id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "type": "ai-chat-node", + "x": 2160, + "y": 3970, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI 对话1", + "node_data": { + "prompt": "{{开始.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + } + ], + "edges": [ + { + "id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73", + "type": "app-edge", + "sourceNodeId": "start-node", + "targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "startPoint": { + "x": 590, + "y": 3660 + }, + "endPoint": { + "x": 680, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 590, + "y": 3660 + }, + { + "x": 700, + "y": 3660 + }, + { + "x": 570, + "y": 3210 + }, + { + "x": 680, + "y": 3210 + } + ], + "sourceAnchorId": "start-node_right", + "targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left" + }, + { + "id": "35cb86dd-f328-429e-a973-12fd7218b696", + "type": "app-edge", + "sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "startPoint": { + "x": 1000, + "y": 3210 + }, + "endPoint": { + "x": 1200, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1000, + "y": 3210 + }, + { + "x": 1110, + "y": 3210 + }, + { + "x": 1090, + "y": 3210 + }, + { + "x": 1200, + "y": 3210 + } + ], + "sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right", + "targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left" + }, + { + "id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "startPoint": { + "x": 1780, + "y": 3073.775 + }, + "endPoint": { + "x": 2010, + "y": 2480 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3073.775 + }, + { + "x": 1890, + "y": 3073.775 + }, + { + "x": 1900, + "y": 2480 + }, + { + "x": 2010, + "y": 2480 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right", + "targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left" + }, + { + "id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "startPoint": { + "x": 1780, + "y": 3203 + }, + "endPoint": { + "x": 2000, + "y": 3200 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3203 + }, + { + "x": 1890, + "y": 3203 + }, + { + "x": 1890, + "y": 3200 + }, + { + "x": 2000, + "y": 3200 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right", + "targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left" + }, + { + "id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "startPoint": { + "x": 1780, + "y": 3293.6124999999997 + }, + "endPoint": { + "x": 2000, + "y": 3970 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3970 + }, + { + "x": 2000, + "y": 3970 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right", + "targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left" + } + ] +} \ No newline at end of file diff --git a/apps/application/flow/default_workflow_zh_Hant.json b/apps/application/flow/default_workflow_zh_Hant.json new file mode 100644 index 00000000000..b06301533d2 --- /dev/null +++ b/apps/application/flow/default_workflow_zh_Hant.json @@ -0,0 +1,451 @@ +{ + "nodes": [ + { + "id": "base-node", + "type": "base-node", + "x": 360, + "y": 2810, + "properties": { + "config": { + + }, + "height": 825.6, + "stepName": "基本資訊", + "node_data": { + "desc": "", + "name": "maxkbapplication", + "prologue": "您好,我是MaxKB小助手,您可以向我提出MaxKB使用問題。\n- MaxKB主要功能有什麼?\n- MaxKB支持哪些大語言模型?\n- MaxKB支持哪些文檔類型?" + }, + "input_field_list": [ + + ] + } + }, + { + "id": "start-node", + "type": "start-node", + "x": 430, + "y": 3660, + "properties": { + "config": { + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + }, + "fields": [ + { + "label": "用户问题", + "value": "question" + } + ], + "height": 276, + "stepName": "開始", + "globalFields": [ + { + "label": "当前时间", + "value": "time" + } + ] + } + }, + { + "id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "type": "search-dataset-node", + "x": 840, + "y": 3210, + "properties": { + "config": { + "fields": [ + { + "label": "检索结果的分段列表", + "value": "paragraph_list" + }, + { + "label": "满足直接回答的分段列表", + "value": "is_hit_handling_method_list" + }, + { + "label": "检索结果", + "value": "data" + }, + { + "label": "满足直接回答的分段内容", + "value": "directly_return" + } + ] + }, + "height": 794, + "stepName": "知識庫檢索", + "node_data": { + "dataset_id_list": [ + + ], + "dataset_setting": { + "top_n": 3, + "similarity": 0.6, + "search_mode": "embedding", + "max_paragraph_char_number": 5000 + }, + "question_reference_address": [ + "start-node", + "question" + ], + "source_dataset_id_list": [ + + ] + } + } + }, + { + "id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "type": "condition-node", + "x": 1490, + "y": 3210, + "properties": { + "width": 600, + "config": { + "fields": [ + { + "label": "分支名称", + "value": "branch_name" + } + ] + }, + "height": 543.675, + "stepName": "判斷器", + "node_data": { + "branch": [ + { + "id": "1009", + "type": "IF", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "is_hit_handling_method_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "4908", + "type": "ELSE IF 1", + "condition": "and", + "conditions": [ + { + "field": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "paragraph_list" + ], + "value": "1", + "compare": "len_ge" + } + ] + }, + { + "id": "161", + "type": "ELSE", + "condition": "and", + "conditions": [ + + ] + } + ] + }, + "branch_condition_list": [ + { + "index": 0, + "height": 121.225, + "id": "1009" + }, + { + "index": 1, + "height": 121.225, + "id": "4908" + }, + { + "index": 2, + "height": 44, + "id": "161" + } + ] + } + }, + { + "id": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "type": "reply-node", + "x": 2170, + "y": 2480, + "properties": { + "config": { + "fields": [ + { + "label": "内容", + "value": "answer" + } + ] + }, + "height": 378, + "stepName": "指定回覆", + "node_data": { + "fields": [ + "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "directly_return" + ], + "content": "", + "reply_type": "referencing", + "is_result": true + } + } + }, + { + "id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "type": "ai-chat-node", + "x": 2160, + "y": 3200, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI 對話", + "node_data": { + "prompt": "已知資訊:\n{{知識庫檢索.data}}\n問題:\n{{開始.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + }, + { + "id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "type": "ai-chat-node", + "x": 2160, + "y": 3970, + "properties": { + "config": { + "fields": [ + { + "label": "AI 回答内容", + "value": "answer" + } + ] + }, + "height": 763, + "stepName": "AI 對話1", + "node_data": { + "prompt": "{{開始.question}}", + "system": "", + "model_id": "", + "dialogue_number": 0, + "is_result": true + } + } + } + ], + "edges": [ + { + "id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73", + "type": "app-edge", + "sourceNodeId": "start-node", + "targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "startPoint": { + "x": 590, + "y": 3660 + }, + "endPoint": { + "x": 680, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 590, + "y": 3660 + }, + { + "x": 700, + "y": 3660 + }, + { + "x": 570, + "y": 3210 + }, + { + "x": 680, + "y": 3210 + } + ], + "sourceAnchorId": "start-node_right", + "targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left" + }, + { + "id": "35cb86dd-f328-429e-a973-12fd7218b696", + "type": "app-edge", + "sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5", + "targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "startPoint": { + "x": 1000, + "y": 3210 + }, + "endPoint": { + "x": 1200, + "y": 3210 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1000, + "y": 3210 + }, + { + "x": 1110, + "y": 3210 + }, + { + "x": 1090, + "y": 3210 + }, + { + "x": 1200, + "y": 3210 + } + ], + "sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right", + "targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left" + }, + { + "id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26", + "startPoint": { + "x": 1780, + "y": 3073.775 + }, + "endPoint": { + "x": 2010, + "y": 2480 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3073.775 + }, + { + "x": 1890, + "y": 3073.775 + }, + { + "x": 1900, + "y": 2480 + }, + { + "x": 2010, + "y": 2480 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right", + "targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left" + }, + { + "id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb", + "startPoint": { + "x": 1780, + "y": 3203 + }, + "endPoint": { + "x": 2000, + "y": 3200 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3203 + }, + { + "x": 1890, + "y": 3203 + }, + { + "x": 1890, + "y": 3200 + }, + { + "x": 2000, + "y": 3200 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right", + "targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left" + }, + { + "id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5", + "type": "app-edge", + "sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b", + "targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7", + "startPoint": { + "x": 1780, + "y": 3293.6124999999997 + }, + "endPoint": { + "x": 2000, + "y": 3970 + }, + "properties": { + + }, + "pointsList": [ + { + "x": 1780, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3293.6124999999997 + }, + { + "x": 1890, + "y": 3970 + }, + { + "x": 2000, + "y": 3970 + } + ], + "sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right", + "targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left" + } + ] +} \ No newline at end of file diff --git a/apps/application/flow/i_step_node.py b/apps/application/flow/i_step_node.py new file mode 100644 index 00000000000..fcead7a40ad --- /dev/null +++ b/apps/application/flow/i_step_node.py @@ -0,0 +1,256 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_step_node.py + @date:2024/6/3 14:57 + @desc: +""" +import time +import uuid +from abc import abstractmethod +from hashlib import sha1 +from typing import Type, Dict, List + +from django.core import cache +from django.db.models import QuerySet +from rest_framework import serializers +from rest_framework.exceptions import ValidationError, ErrorDetail + +from application.flow.common import Answer, NodeChunk +from application.models import ChatRecord +from application.models.api_key_model import ApplicationPublicAccessClient +from common.constants.authentication_type import AuthenticationType +from common.field.common import InstanceField +from common.util.field_message import ErrMessage + +chat_cache = cache.caches['chat_cache'] + + +def write_context(step_variable: Dict, global_variable: Dict, node, workflow): + if step_variable is not None: + for key in step_variable: + node.context[key] = step_variable[key] + if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'answer' in step_variable: + answer = step_variable['answer'] + yield answer + node.answer_text = answer + if global_variable is not None: + for key in global_variable: + workflow.context[key] = global_variable[key] + node.context['run_time'] = time.time() - node.context['start_time'] + + +def is_interrupt(node, step_variable: Dict, global_variable: Dict): + return node.type == 'form-node' and not node.context.get('is_submit', False) + + +class WorkFlowPostHandler: + def __init__(self, chat_info, client_id, client_type): + self.chat_info = chat_info + self.client_id = client_id + self.client_type = client_type + + def handler(self, chat_id, + chat_record_id, + answer, + workflow): + question = workflow.params['question'] + details = workflow.get_runtime_details() + message_tokens = sum([row.get('message_tokens') for row in details.values() if + 'message_tokens' in row and row.get('message_tokens') is not None]) + answer_tokens = sum([row.get('answer_tokens') for row in details.values() if + 'answer_tokens' in row and row.get('answer_tokens') is not None]) + answer_text_list = workflow.get_answer_text_list() + answer_text = '\n\n'.join( + '\n\n'.join([a.get('content') for a in answer]) for answer in + answer_text_list) + if workflow.chat_record is not None: + chat_record = workflow.chat_record + chat_record.answer_text = answer_text + chat_record.details = details + chat_record.message_tokens = message_tokens + chat_record.answer_tokens = answer_tokens + chat_record.answer_text_list = answer_text_list + chat_record.run_time = time.time() - workflow.context['start_time'] + else: + chat_record = ChatRecord(id=chat_record_id, + chat_id=chat_id, + problem_text=question, + answer_text=answer_text, + details=details, + message_tokens=message_tokens, + answer_tokens=answer_tokens, + answer_text_list=answer_text_list, + run_time=time.time() - workflow.context['start_time'], + index=0) + asker = workflow.context.get('asker', None) + self.chat_info.append_chat_record(chat_record, self.client_id, asker) + # 重新设置缓存 + chat_cache.set(chat_id, + self.chat_info, timeout=60 * 30) + if self.client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value: + application_public_access_client = (QuerySet(ApplicationPublicAccessClient) + .filter(client_id=self.client_id, + application_id=self.chat_info.application.id).first()) + if application_public_access_client is not None: + application_public_access_client.access_num = application_public_access_client.access_num + 1 + application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1 + application_public_access_client.save() + + +class NodeResult: + def __init__(self, node_variable: Dict, workflow_variable: Dict, + _write_context=write_context, _is_interrupt=is_interrupt): + self._write_context = _write_context + self.node_variable = node_variable + self.workflow_variable = workflow_variable + self._is_interrupt = _is_interrupt + + def write_context(self, node, workflow): + return self._write_context(self.node_variable, self.workflow_variable, node, workflow) + + def is_assertion_result(self): + return 'branch_id' in self.node_variable + + def is_interrupt_exec(self, current_node): + """ + 是否中断执行 + @param current_node: + @return: + """ + return self._is_interrupt(current_node, self.node_variable, self.workflow_variable) + + +class ReferenceAddressSerializer(serializers.Serializer): + node_id = serializers.CharField(required=True, error_messages=ErrMessage.char("节点id")) + fields = serializers.ListField( + child=serializers.CharField(required=True, error_messages=ErrMessage.char("节点字段")), required=True, + error_messages=ErrMessage.list("节点字段数组")) + + +class FlowParamsSerializer(serializers.Serializer): + # 历史对答 + history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True), + error_messages=ErrMessage.list("历史对答")) + + question = serializers.CharField(required=True, error_messages=ErrMessage.list("用户问题")) + + chat_id = serializers.CharField(required=True, error_messages=ErrMessage.list("对话id")) + + chat_record_id = serializers.CharField(required=True, error_messages=ErrMessage.char("对话记录id")) + + stream = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("流式输出")) + + client_id = serializers.CharField(required=False, error_messages=ErrMessage.char("客户端id")) + + client_type = serializers.CharField(required=False, error_messages=ErrMessage.char("客户端类型")) + + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + re_chat = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("换个答案")) + + +class INode: + view_type = 'many_view' + + @abstractmethod + def save_context(self, details, workflow_manage): + pass + + def get_answer_list(self) -> List[Answer] | None: + if self.answer_text is None: + return None + reasoning_content_enable = self.context.get('model_setting', {}).get('reasoning_content_enable', False) + return [ + Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {}, + self.runtime_node_id, self.context.get('reasoning_content', '') if reasoning_content_enable else '')] + + def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None, + get_node_params=lambda node: node.properties.get('node_data')): + # 当前步骤上下文,用于存储当前步骤信息 + self.status = 200 + self.err_message = '' + self.node = node + self.node_params = get_node_params(node) + self.workflow_params = workflow_params + self.workflow_manage = workflow_manage + self.node_params_serializer = None + self.flow_params_serializer = None + self.context = {} + self.answer_text = None + self.id = node.id + if up_node_id_list is None: + up_node_id_list = [] + self.up_node_id_list = up_node_id_list + self.node_chunk = NodeChunk() + self.runtime_node_id = sha1(uuid.NAMESPACE_DNS.bytes + bytes(str(uuid.uuid5(uuid.NAMESPACE_DNS, + "".join([*sorted(up_node_id_list), + node.id]))), + "utf-8")).hexdigest() + + def valid_args(self, node_params, flow_params): + flow_params_serializer_class = self.get_flow_params_serializer_class() + node_params_serializer_class = self.get_node_params_serializer_class() + if flow_params_serializer_class is not None and flow_params is not None: + self.flow_params_serializer = flow_params_serializer_class(data=flow_params) + self.flow_params_serializer.is_valid(raise_exception=True) + if node_params_serializer_class is not None: + self.node_params_serializer = node_params_serializer_class(data=node_params) + self.node_params_serializer.is_valid(raise_exception=True) + if self.node.properties.get('status', 200) != 200: + raise ValidationError(ErrorDetail(f'节点{self.node.properties.get("stepName")} 不可用')) + + def get_reference_field(self, fields: List[str]): + return self.get_field(self.context, fields) + + @staticmethod + def get_field(obj, fields: List[str]): + for field in fields: + value = obj.get(field) + if value is None: + return None + else: + obj = value + return obj + + @abstractmethod + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + pass + + def get_flow_params_serializer_class(self) -> Type[serializers.Serializer]: + return FlowParamsSerializer + + def get_write_error_context(self, e): + self.status = 500 + self.answer_text = str(e) + self.err_message = str(e) + self.context['run_time'] = time.time() - self.context['start_time'] + + def write_error_context(answer, status=200): + pass + + return write_error_context + + def run(self) -> NodeResult: + """ + :return: 执行结果 + """ + start_time = time.time() + self.context['start_time'] = start_time + result = self._run() + self.context['run_time'] = time.time() - start_time + return result + + def _run(self): + result = self.execute() + return result + + def execute(self, **kwargs) -> NodeResult: + pass + + def get_details(self, index: int, **kwargs): + """ + 运行详情 + :return: 步骤详情 + """ + return {} diff --git a/apps/application/flow/step_node/__init__.py b/apps/application/flow/step_node/__init__.py new file mode 100644 index 00000000000..0ce1d5fedd1 --- /dev/null +++ b/apps/application/flow/step_node/__init__.py @@ -0,0 +1,42 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/6/7 14:43 + @desc: +""" +from .ai_chat_step_node import * +from .application_node import BaseApplicationNode +from .condition_node import * +from .direct_reply_node import * +from .form_node import * +from .function_lib_node import * +from .function_node import * +from .question_node import * +from .reranker_node import * + +from .document_extract_node import * +from .image_understand_step_node import * +from .image_generate_step_node import * + +from .search_dataset_node import * +from .speech_to_text_step_node import BaseSpeechToTextNode +from .start_node import * +from .text_to_speech_step_node.impl.base_text_to_speech_node import BaseTextToSpeechNode +from .variable_assign_node import BaseVariableAssignNode +from .mcp_node import BaseMcpNode + +node_list = [BaseStartStepNode, BaseChatNode, BaseSearchDatasetNode, BaseQuestionNode, + BaseConditionNode, BaseReplyNode, + BaseFunctionNodeNode, BaseFunctionLibNodeNode, BaseRerankerNode, BaseApplicationNode, + BaseDocumentExtractNode, + BaseImageUnderstandNode, BaseFormNode, BaseSpeechToTextNode, BaseTextToSpeechNode, + BaseImageGenerateNode, BaseVariableAssignNode, BaseMcpNode] + + +def get_node(node_type): + find_list = [node for node in node_list if node.type == node_type] + if len(find_list) > 0: + return find_list[0] + return None diff --git a/apps/application/flow/step_node/ai_chat_step_node/__init__.py b/apps/application/flow/step_node/ai_chat_step_node/__init__.py new file mode 100644 index 00000000000..1929ae2af49 --- /dev/null +++ b/apps/application/flow/step_node/ai_chat_step_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:29 + @desc: +""" +from .impl import * diff --git a/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py b/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py new file mode 100644 index 00000000000..a83d2ef5771 --- /dev/null +++ b/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py @@ -0,0 +1,58 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_chat_node.py + @date:2024/6/4 13:58 + @desc: +""" +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage + + +class ChatNodeSerializer(serializers.Serializer): + model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id"))) + system = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_("Role Setting"))) + prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word"))) + # 多轮对话数量 + dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer( + _("Number of multi-round conversations"))) + + is_result = serializers.BooleanField(required=False, + error_messages=ErrMessage.boolean(_('Whether to return content'))) + + model_params_setting = serializers.DictField(required=False, + error_messages=ErrMessage.dict(_("Model parameter settings"))) + model_setting = serializers.DictField(required=False, + error_messages=ErrMessage.dict('Model settings')) + dialogue_type = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_("Context Type"))) + mcp_enable = serializers.BooleanField(required=False, + error_messages=ErrMessage.boolean(_("Whether to enable MCP"))) + mcp_servers = serializers.JSONField(required=False, error_messages=ErrMessage.list(_("MCP Server"))) + + +class IChatNode(INode): + type = 'ai-chat-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return ChatNodeSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, + chat_record_id, + model_params_setting=None, + dialogue_type=None, + model_setting=None, + mcp_enable=False, + mcp_servers=None, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/ai_chat_step_node/impl/__init__.py b/apps/application/flow/step_node/ai_chat_step_node/impl/__init__.py new file mode 100644 index 00000000000..79051a999fb --- /dev/null +++ b/apps/application/flow/step_node/ai_chat_step_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:34 + @desc: +""" +from .base_chat_node import BaseChatNode diff --git a/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py b/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py new file mode 100644 index 00000000000..56efa4e54ef --- /dev/null +++ b/apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py @@ -0,0 +1,288 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_question_node.py + @date:2024/6/4 14:30 + @desc: +""" +import asyncio +import json +import re +import time +from functools import reduce +from types import AsyncGeneratorType +from typing import List, Dict + +from django.db.models import QuerySet +from langchain.schema import HumanMessage, SystemMessage +from langchain_core.messages import BaseMessage, AIMessage, AIMessageChunk, ToolMessage +from langchain_mcp_adapters.client import MultiServerMCPClient +from langgraph.prebuilt import create_react_agent + +from application.flow.i_step_node import NodeResult, INode +from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode +from application.flow.tools import Reasoning +from setting.models import Model +from setting.models_provider import get_model_credential +from setting.models_provider.tools import get_model_instance_by_model_user_id + +tool_message_template = """ +
+ + Called MCP Tool: %s + + +```json +%s +``` +
+ +""" + + +def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str, + reasoning_content: str): + chat_model = node_variable.get('chat_model') + message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list')) + answer_tokens = chat_model.get_num_tokens(answer) + node.context['message_tokens'] = message_tokens + node.context['answer_tokens'] = answer_tokens + node.context['answer'] = answer + node.context['history_message'] = node_variable['history_message'] + node.context['question'] = node_variable['question'] + node.context['run_time'] = time.time() - node.context['start_time'] + node.context['reasoning_content'] = reasoning_content + if workflow.is_result(node, NodeResult(node_variable, workflow_variable)): + node.answer_text = answer + + +def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 (流式) + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + answer = '' + reasoning_content = '' + model_setting = node.context.get('model_setting', + {'reasoning_content_enable': False, 'reasoning_content_end': '', + 'reasoning_content_start': ''}) + reasoning = Reasoning(model_setting.get('reasoning_content_start', ''), + model_setting.get('reasoning_content_end', '')) + response_reasoning_content = False + + for chunk in response: + reasoning_chunk = reasoning.get_reasoning_content(chunk) + content_chunk = reasoning_chunk.get('content') + if 'reasoning_content' in chunk.additional_kwargs: + response_reasoning_content = True + reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '') + else: + reasoning_content_chunk = reasoning_chunk.get('reasoning_content') + answer += content_chunk + if reasoning_content_chunk is None: + reasoning_content_chunk = '' + reasoning_content += reasoning_content_chunk + yield {'content': content_chunk, + 'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable', + False) else ''} + + reasoning_chunk = reasoning.get_end_reasoning_content() + answer += reasoning_chunk.get('content') + reasoning_content_chunk = "" + if not response_reasoning_content: + reasoning_content_chunk = reasoning_chunk.get( + 'reasoning_content') + yield {'content': reasoning_chunk.get('content'), + 'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable', + False) else ''} + _write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content) + + +async def _yield_mcp_response(chat_model, message_list, mcp_servers): + async with MultiServerMCPClient(json.loads(mcp_servers)) as client: + agent = create_react_agent(chat_model, client.get_tools()) + response = agent.astream({"messages": message_list}, stream_mode='messages') + async for chunk in response: + if isinstance(chunk[0], ToolMessage): + content = tool_message_template % (chunk[0].name, chunk[0].content) + chunk[0].content = content + yield chunk[0] + if isinstance(chunk[0], AIMessageChunk): + yield chunk[0] + + +def mcp_response_generator(chat_model, message_list, mcp_servers): + loop = asyncio.new_event_loop() + try: + async_gen = _yield_mcp_response(chat_model, message_list, mcp_servers) + while True: + try: + chunk = loop.run_until_complete(anext_async(async_gen)) + yield chunk + except StopAsyncIteration: + break + except Exception as e: + print(f'exception: {e}') + finally: + loop.close() + + +async def anext_async(agen): + return await agen.__anext__() + + +def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点实例对象 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + model_setting = node.context.get('model_setting', + {'reasoning_content_enable': False, 'reasoning_content_end': '', + 'reasoning_content_start': ''}) + reasoning = Reasoning(model_setting.get('reasoning_content_start'), model_setting.get('reasoning_content_end')) + reasoning_result = reasoning.get_reasoning_content(response) + reasoning_result_end = reasoning.get_end_reasoning_content() + content = reasoning_result.get('content') + reasoning_result_end.get('content') + if 'reasoning_content' in response.response_metadata: + reasoning_content = response.response_metadata.get('reasoning_content', '') + else: + reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get('reasoning_content') + _write_context(node_variable, workflow_variable, node, workflow, content, reasoning_content) + + +def get_default_model_params_setting(model_id): + model = QuerySet(Model).filter(id=model_id).first() + credential = get_model_credential(model.provider, model.model_type, model.model_name) + model_params_setting = credential.get_model_params_setting_form( + model.model_name).get_default_form_data() + return model_params_setting + + +def get_node_message(chat_record, runtime_node_id): + node_details = chat_record.get_node_details_runtime_node_id(runtime_node_id) + if node_details is None: + return [] + return [HumanMessage(node_details.get('question')), AIMessage(node_details.get('answer'))] + + +def get_workflow_message(chat_record): + return [chat_record.get_human_message(), chat_record.get_ai_message()] + + +def get_message(chat_record, dialogue_type, runtime_node_id): + return get_node_message(chat_record, runtime_node_id) if dialogue_type == 'NODE' else get_workflow_message( + chat_record) + + +class BaseChatNode(IChatNode): + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + self.context['question'] = details.get('question') + self.context['reasoning_content'] = details.get('reasoning_content') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id, + model_params_setting=None, + dialogue_type=None, + model_setting=None, + mcp_enable=False, + mcp_servers=None, + **kwargs) -> NodeResult: + if dialogue_type is None: + dialogue_type = 'WORKFLOW' + + if model_params_setting is None: + model_params_setting = get_default_model_params_setting(model_id) + if model_setting is None: + model_setting = {'reasoning_content_enable': False, 'reasoning_content_end': '', + 'reasoning_content_start': ''} + self.context['model_setting'] = model_setting + chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), + **model_params_setting) + history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type, + self.runtime_node_id) + self.context['history_message'] = history_message + question = self.generate_prompt_question(prompt) + self.context['question'] = question.content + system = self.workflow_manage.generate_prompt(system) + self.context['system'] = system + message_list = self.generate_message_list(system, prompt, history_message) + self.context['message_list'] = message_list + + if mcp_enable and mcp_servers is not None and '"stdio"' not in mcp_servers: + r = mcp_response_generator(chat_model, message_list, mcp_servers) + return NodeResult( + {'result': r, 'chat_model': chat_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context_stream) + + if stream: + r = chat_model.stream(message_list) + return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context_stream) + else: + r = chat_model.invoke(message_list) + return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context) + + @staticmethod + def get_history_message(history_chat_record, dialogue_number, dialogue_type, runtime_node_id): + start_index = len(history_chat_record) - dialogue_number + history_message = reduce(lambda x, y: [*x, *y], [ + get_message(history_chat_record[index], dialogue_type, runtime_node_id) + for index in + range(start_index if start_index > 0 else 0, len(history_chat_record))], []) + for message in history_message: + if isinstance(message.content, str): + message.content = re.sub('[\d\D]*?<\/form_rander>', '', message.content) + return history_message + + def generate_prompt_question(self, prompt): + return HumanMessage(self.workflow_manage.generate_prompt(prompt)) + + def generate_message_list(self, system: str, prompt: str, history_message): + if system is not None and len(system) > 0: + return [SystemMessage(self.workflow_manage.generate_prompt(system)), *history_message, + HumanMessage(self.workflow_manage.generate_prompt(prompt))] + else: + return [*history_message, HumanMessage(self.workflow_manage.generate_prompt(prompt))] + + @staticmethod + def reset_message_list(message_list: List[BaseMessage], answer_text): + result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for + message + in + message_list] + result.append({'role': 'ai', 'content': answer_text}) + return result + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'system': self.context.get('system'), + 'history_message': [{'content': message.content, 'role': message.type} for message in + (self.context.get('history_message') if self.context.get( + 'history_message') is not None else [])], + 'question': self.context.get('question'), + 'answer': self.context.get('answer'), + 'reasoning_content': self.context.get('reasoning_content'), + 'type': self.node.type, + 'message_tokens': self.context.get('message_tokens'), + 'answer_tokens': self.context.get('answer_tokens'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/application_node/__init__.py b/apps/application/flow/step_node/application_node/__init__.py new file mode 100644 index 00000000000..d1ea91ca7f8 --- /dev/null +++ b/apps/application/flow/step_node/application_node/__init__.py @@ -0,0 +1,2 @@ +# coding=utf-8 +from .impl import * diff --git a/apps/application/flow/step_node/application_node/i_application_node.py b/apps/application/flow/step_node/application_node/i_application_node.py new file mode 100644 index 00000000000..6394fa49c7b --- /dev/null +++ b/apps/application/flow/step_node/application_node/i_application_node.py @@ -0,0 +1,86 @@ +# coding=utf-8 +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage + +from django.utils.translation import gettext_lazy as _ + + +class ApplicationNodeSerializer(serializers.Serializer): + application_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Application ID"))) + question_reference_address = serializers.ListField(required=True, + error_messages=ErrMessage.list(_("User Questions"))) + api_input_field_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("API Input Fields"))) + user_input_field_list = serializers.ListField(required=False, + error_messages=ErrMessage.uuid(_("User Input Fields"))) + image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture"))) + document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document"))) + audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio"))) + child_node = serializers.DictField(required=False, allow_null=True, + error_messages=ErrMessage.dict(_("Child Nodes"))) + node_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Form Data"))) + + +class IApplicationNode(INode): + type = 'application-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return ApplicationNodeSerializer + + def _run(self): + question = self.workflow_manage.get_reference_field( + self.node_params_serializer.data.get('question_reference_address')[0], + self.node_params_serializer.data.get('question_reference_address')[1:]) + kwargs = {} + for api_input_field in self.node_params_serializer.data.get('api_input_field_list', []): + value = api_input_field.get('value', [''])[0] if api_input_field.get('value') else '' + kwargs[api_input_field['variable']] = self.workflow_manage.get_reference_field(value, + api_input_field['value'][ + 1:]) if value != '' else '' + + for user_input_field in self.node_params_serializer.data.get('user_input_field_list', []): + value = user_input_field.get('value', [''])[0] if user_input_field.get('value') else '' + kwargs[user_input_field['field']] = self.workflow_manage.get_reference_field(value, + user_input_field['value'][ + 1:]) if value != '' else '' + # 判断是否包含这个属性 + app_document_list = self.node_params_serializer.data.get('document_list', []) + if app_document_list and len(app_document_list) > 0: + app_document_list = self.workflow_manage.get_reference_field( + app_document_list[0], + app_document_list[1:]) + for document in app_document_list: + if 'file_id' not in document: + raise ValueError( + _("Parameter value error: The uploaded document lacks file_id, and the document upload fails")) + app_image_list = self.node_params_serializer.data.get('image_list', []) + if app_image_list and len(app_image_list) > 0: + app_image_list = self.workflow_manage.get_reference_field( + app_image_list[0], + app_image_list[1:]) + for image in app_image_list: + if 'file_id' not in image: + raise ValueError( + _("Parameter value error: The uploaded image lacks file_id, and the image upload fails")) + + app_audio_list = self.node_params_serializer.data.get('audio_list', []) + if app_audio_list and len(app_audio_list) > 0: + app_audio_list = self.workflow_manage.get_reference_field( + app_audio_list[0], + app_audio_list[1:]) + for audio in app_audio_list: + if 'file_id' not in audio: + raise ValueError( + _("Parameter value error: The uploaded audio lacks file_id, and the audio upload fails.")) + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + app_document_list=app_document_list, app_image_list=app_image_list, + app_audio_list=app_audio_list, + message=str(question), **kwargs) + + def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type, + app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/application_node/impl/__init__.py b/apps/application/flow/step_node/application_node/impl/__init__.py new file mode 100644 index 00000000000..e31a8d885cd --- /dev/null +++ b/apps/application/flow/step_node/application_node/impl/__init__.py @@ -0,0 +1,2 @@ +# coding=utf-8 +from .base_application_node import BaseApplicationNode diff --git a/apps/application/flow/step_node/application_node/impl/base_application_node.py b/apps/application/flow/step_node/application_node/impl/base_application_node.py new file mode 100644 index 00000000000..95445f45612 --- /dev/null +++ b/apps/application/flow/step_node/application_node/impl/base_application_node.py @@ -0,0 +1,267 @@ +# coding=utf-8 +import json +import re +import time +import uuid +from typing import Dict, List + +from application.flow.common import Answer +from application.flow.i_step_node import NodeResult, INode +from application.flow.step_node.application_node.i_application_node import IApplicationNode +from application.models import Chat + + +def string_to_uuid(input_str): + return str(uuid.uuid5(uuid.NAMESPACE_DNS, input_str)) + + +def _is_interrupt_exec(node, node_variable: Dict, workflow_variable: Dict): + return node_variable.get('is_interrupt_exec', False) + + +def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str, + reasoning_content: str): + result = node_variable.get('result') + node.context['application_node_dict'] = node_variable.get('application_node_dict') + node.context['node_dict'] = node_variable.get('node_dict', {}) + node.context['is_interrupt_exec'] = node_variable.get('is_interrupt_exec') + node.context['message_tokens'] = result.get('usage', {}).get('prompt_tokens', 0) + node.context['answer_tokens'] = result.get('usage', {}).get('completion_tokens', 0) + node.context['answer'] = answer + node.context['result'] = answer + node.context['reasoning_content'] = reasoning_content + node.context['question'] = node_variable['question'] + node.context['run_time'] = time.time() - node.context['start_time'] + if workflow.is_result(node, NodeResult(node_variable, workflow_variable)): + node.answer_text = answer + + +def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 (流式) + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + answer = '' + reasoning_content = '' + usage = {} + node_child_node = {} + application_node_dict = node.context.get('application_node_dict', {}) + is_interrupt_exec = False + for chunk in response: + # 先把流转成字符串 + response_content = chunk.decode('utf-8')[6:] + response_content = json.loads(response_content) + content = response_content.get('content', '') + runtime_node_id = response_content.get('runtime_node_id', '') + chat_record_id = response_content.get('chat_record_id', '') + child_node = response_content.get('child_node') + view_type = response_content.get('view_type') + node_type = response_content.get('node_type') + real_node_id = response_content.get('real_node_id') + node_is_end = response_content.get('node_is_end', False) + _reasoning_content = response_content.get('reasoning_content', '') + if node_type == 'form-node': + is_interrupt_exec = True + answer += content + reasoning_content += _reasoning_content + node_child_node = {'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id, + 'child_node': child_node} + + if real_node_id is not None: + application_node = application_node_dict.get(real_node_id, None) + if application_node is None: + + application_node_dict[real_node_id] = {'content': content, + 'runtime_node_id': runtime_node_id, + 'chat_record_id': chat_record_id, + 'child_node': child_node, + 'index': len(application_node_dict), + 'view_type': view_type, + 'reasoning_content': _reasoning_content} + else: + application_node['content'] += content + application_node['reasoning_content'] += _reasoning_content + + yield {'content': content, + 'node_type': node_type, + 'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id, + 'reasoning_content': _reasoning_content, + 'child_node': child_node, + 'real_node_id': real_node_id, + 'node_is_end': node_is_end, + 'view_type': view_type} + usage = response_content.get('usage', {}) + node_variable['result'] = {'usage': usage} + node_variable['is_interrupt_exec'] = is_interrupt_exec + node_variable['child_node'] = node_child_node + node_variable['application_node_dict'] = application_node_dict + _write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content) + + +def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点实例对象 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result', {}).get('data', {}) + node_variable['result'] = {'usage': {'completion_tokens': response.get('completion_tokens'), + 'prompt_tokens': response.get('prompt_tokens')}} + answer = response.get('content', '') or "抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。" + reasoning_content = response.get('reasoning_content', '') + answer_list = response.get('answer_list', []) + node_variable['application_node_dict'] = {answer.get('real_node_id'): {**answer, 'index': index} for answer, index + in + zip(answer_list, range(len(answer_list)))} + _write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content) + + +def reset_application_node_dict(application_node_dict, runtime_node_id, node_data): + try: + if application_node_dict is None: + return + for key in application_node_dict: + application_node = application_node_dict[key] + if application_node.get('runtime_node_id') == runtime_node_id: + content: str = application_node.get('content') + match = re.search('.*?', content) + if match: + form_setting_str = match.group().replace('', '').replace('', '') + form_setting = json.loads(form_setting_str) + form_setting['is_submit'] = True + form_setting['form_data'] = node_data + value = f'{json.dumps(form_setting)}' + res = re.sub('.*?', + '${value}', content) + application_node['content'] = res.replace('${value}', value) + except Exception as e: + pass + + +class BaseApplicationNode(IApplicationNode): + def get_answer_list(self) -> List[Answer] | None: + if self.answer_text is None: + return None + application_node_dict = self.context.get('application_node_dict') + if application_node_dict is None or len(application_node_dict) == 0: + return [ + Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], + self.context.get('child_node'), self.runtime_node_id, '')] + else: + return [Answer(n.get('content'), n.get('view_type'), self.runtime_node_id, + self.workflow_params['chat_record_id'], {'runtime_node_id': n.get('runtime_node_id'), + 'chat_record_id': n.get('chat_record_id') + , 'child_node': n.get('child_node')}, n.get('real_node_id'), + n.get('reasoning_content', '')) + for n in + sorted(application_node_dict.values(), key=lambda item: item.get('index'))] + + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + self.context['result'] = details.get('answer') + self.context['question'] = details.get('question') + self.context['type'] = details.get('type') + self.context['reasoning_content'] = details.get('reasoning_content') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type, + app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None, + **kwargs) -> NodeResult: + from application.serializers.chat_message_serializers import ChatMessageSerializer + # 生成嵌入应用的chat_id + current_chat_id = string_to_uuid(chat_id + application_id) + Chat.objects.get_or_create(id=current_chat_id, defaults={ + 'application_id': application_id, + 'abstract': message[0:1024], + 'client_id': client_id, + }) + if app_document_list is None: + app_document_list = [] + if app_image_list is None: + app_image_list = [] + if app_audio_list is None: + app_audio_list = [] + runtime_node_id = None + record_id = None + child_node_value = None + if child_node is not None: + runtime_node_id = child_node.get('runtime_node_id') + record_id = child_node.get('chat_record_id') + child_node_value = child_node.get('child_node') + application_node_dict = self.context.get('application_node_dict') + reset_application_node_dict(application_node_dict, runtime_node_id, node_data) + + response = ChatMessageSerializer( + data={'chat_id': current_chat_id, 'message': message, + 're_chat': re_chat, + 'stream': stream, + 'application_id': application_id, + 'client_id': client_id, + 'client_type': client_type, + 'document_list': app_document_list, + 'image_list': app_image_list, + 'audio_list': app_audio_list, + 'runtime_node_id': runtime_node_id, + 'chat_record_id': record_id, + 'child_node': child_node_value, + 'node_data': node_data, + 'form_data': kwargs}).chat() + if response.status_code == 200: + if stream: + content_generator = response.streaming_content + return NodeResult({'result': content_generator, 'question': message}, {}, + _write_context=write_context_stream, _is_interrupt=_is_interrupt_exec) + else: + data = json.loads(response.content) + return NodeResult({'result': data, 'question': message}, {}, + _write_context=write_context, _is_interrupt=_is_interrupt_exec) + + def get_details(self, index: int, **kwargs): + global_fields = [] + for api_input_field in self.node_params_serializer.data.get('api_input_field_list', []): + value = api_input_field.get('value', [''])[0] if api_input_field.get('value') else '' + global_fields.append({ + 'label': api_input_field['variable'], + 'key': api_input_field['variable'], + 'value': self.workflow_manage.get_reference_field( + value, + api_input_field['value'][1:] + ) if value != '' else '' + }) + + for user_input_field in self.node_params_serializer.data.get('user_input_field_list', []): + value = user_input_field.get('value', [''])[0] if user_input_field.get('value') else '' + global_fields.append({ + 'label': user_input_field['label'], + 'key': user_input_field['field'], + 'value': self.workflow_manage.get_reference_field( + value, + user_input_field['value'][1:] + ) if value != '' else '' + }) + return { + 'name': self.node.properties.get('stepName'), + "index": index, + "info": self.node.properties.get('node_data'), + 'run_time': self.context.get('run_time'), + 'question': self.context.get('question'), + 'answer': self.context.get('answer'), + 'reasoning_content': self.context.get('reasoning_content'), + 'type': self.node.type, + 'message_tokens': self.context.get('message_tokens'), + 'answer_tokens': self.context.get('answer_tokens'), + 'status': self.status, + 'err_message': self.err_message, + 'global_fields': global_fields, + 'document_list': self.workflow_manage.document_list, + 'image_list': self.workflow_manage.image_list, + 'audio_list': self.workflow_manage.audio_list, + 'application_node_dict': self.context.get('application_node_dict') + } diff --git a/apps/application/flow/step_node/condition_node/__init__.py b/apps/application/flow/step_node/condition_node/__init__.py new file mode 100644 index 00000000000..57638504c9e --- /dev/null +++ b/apps/application/flow/step_node/condition_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/6/7 14:43 + @desc: +""" +from .impl import * diff --git a/apps/application/flow/step_node/condition_node/compare/__init__.py b/apps/application/flow/step_node/condition_node/compare/__init__.py new file mode 100644 index 00000000000..c015f6fea45 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/__init__.py @@ -0,0 +1,30 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/6/7 14:43 + @desc: +""" + +from .contain_compare import * +from .equal_compare import * +from .ge_compare import * +from .gt_compare import * +from .is_not_null_compare import * +from .is_not_true import IsNotTrueCompare +from .is_null_compare import * +from .is_true import IsTrueCompare +from .le_compare import * +from .len_equal_compare import * +from .len_ge_compare import * +from .len_gt_compare import * +from .len_le_compare import * +from .len_lt_compare import * +from .lt_compare import * +from .not_contain_compare import * + +compare_handle_list = [GECompare(), GTCompare(), ContainCompare(), EqualCompare(), LTCompare(), LECompare(), + LenLECompare(), LenGECompare(), LenEqualCompare(), LenGTCompare(), LenLTCompare(), + IsNullCompare(), + IsNotNullCompare(), NotContainCompare(), IsTrueCompare(), IsNotTrueCompare()] diff --git a/apps/application/flow/step_node/condition_node/compare/compare.py b/apps/application/flow/step_node/condition_node/compare/compare.py new file mode 100644 index 00000000000..6cbb4af0732 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/compare.py @@ -0,0 +1,20 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: compare.py + @date:2024/6/7 14:37 + @desc: +""" +from abc import abstractmethod +from typing import List + + +class Compare: + @abstractmethod + def support(self, node_id, fields: List[str], source_value, compare, target_value): + pass + + @abstractmethod + def compare(self, source_value, compare, target_value): + pass diff --git a/apps/application/flow/step_node/condition_node/compare/contain_compare.py b/apps/application/flow/step_node/condition_node/compare/contain_compare.py new file mode 100644 index 00000000000..6073131a54d --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/contain_compare.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: contain_compare.py + @date:2024/6/11 10:02 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class ContainCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'contain': + return True + + def compare(self, source_value, compare, target_value): + if isinstance(source_value, str): + return str(target_value) in source_value + return any([str(item) == str(target_value) for item in source_value]) diff --git a/apps/application/flow/step_node/condition_node/compare/equal_compare.py b/apps/application/flow/step_node/condition_node/compare/equal_compare.py new file mode 100644 index 00000000000..0061a82f6e6 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/equal_compare.py @@ -0,0 +1,21 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: equal_compare.py + @date:2024/6/7 14:44 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class EqualCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'eq': + return True + + def compare(self, source_value, compare, target_value): + return str(source_value) == str(target_value) diff --git a/apps/application/flow/step_node/condition_node/compare/ge_compare.py b/apps/application/flow/step_node/condition_node/compare/ge_compare.py new file mode 100644 index 00000000000..d4e22cbd696 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/ge_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 大于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class GECompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'ge': + return True + + def compare(self, source_value, compare, target_value): + try: + return float(source_value) >= float(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/gt_compare.py b/apps/application/flow/step_node/condition_node/compare/gt_compare.py new file mode 100644 index 00000000000..80942abb2f2 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/gt_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 大于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class GTCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'gt': + return True + + def compare(self, source_value, compare, target_value): + try: + return float(source_value) > float(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/is_not_null_compare.py b/apps/application/flow/step_node/condition_node/compare/is_not_null_compare.py new file mode 100644 index 00000000000..5dec267135b --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/is_not_null_compare.py @@ -0,0 +1,21 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: is_not_null_compare.py + @date:2024/6/28 10:45 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare import Compare + + +class IsNotNullCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'is_not_null': + return True + + def compare(self, source_value, compare, target_value): + return source_value is not None and len(source_value) > 0 diff --git a/apps/application/flow/step_node/condition_node/compare/is_not_true.py b/apps/application/flow/step_node/condition_node/compare/is_not_true.py new file mode 100644 index 00000000000..f8a29f5a126 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/is_not_true.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: is_not_true.py + @date:2025/4/7 13:44 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare import Compare + + +class IsNotTrueCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'is_not_true': + return True + + def compare(self, source_value, compare, target_value): + try: + return source_value is False + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/is_null_compare.py b/apps/application/flow/step_node/condition_node/compare/is_null_compare.py new file mode 100644 index 00000000000..c463f3fda28 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/is_null_compare.py @@ -0,0 +1,21 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: is_null_compare.py + @date:2024/6/28 10:45 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare import Compare + + +class IsNullCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'is_null': + return True + + def compare(self, source_value, compare, target_value): + return source_value is None or len(source_value) == 0 diff --git a/apps/application/flow/step_node/condition_node/compare/is_true.py b/apps/application/flow/step_node/condition_node/compare/is_true.py new file mode 100644 index 00000000000..166e0993ac0 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/is_true.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: IsTrue.py + @date:2025/4/7 13:38 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare import Compare + + +class IsTrueCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'is_true': + return True + + def compare(self, source_value, compare, target_value): + try: + return source_value is True + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/le_compare.py b/apps/application/flow/step_node/condition_node/compare/le_compare.py new file mode 100644 index 00000000000..77a0bca0f5b --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/le_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 小于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LECompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'le': + return True + + def compare(self, source_value, compare, target_value): + try: + return float(source_value) <= float(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/len_equal_compare.py b/apps/application/flow/step_node/condition_node/compare/len_equal_compare.py new file mode 100644 index 00000000000..f2b0764c551 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/len_equal_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: equal_compare.py + @date:2024/6/7 14:44 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LenEqualCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'len_eq': + return True + + def compare(self, source_value, compare, target_value): + try: + return len(source_value) == int(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/len_ge_compare.py b/apps/application/flow/step_node/condition_node/compare/len_ge_compare.py new file mode 100644 index 00000000000..87f11eb2cc5 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/len_ge_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 大于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LenGECompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'len_ge': + return True + + def compare(self, source_value, compare, target_value): + try: + return len(source_value) >= int(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/len_gt_compare.py b/apps/application/flow/step_node/condition_node/compare/len_gt_compare.py new file mode 100644 index 00000000000..0532d353d74 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/len_gt_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 大于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LenGTCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'len_gt': + return True + + def compare(self, source_value, compare, target_value): + try: + return len(source_value) > int(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/len_le_compare.py b/apps/application/flow/step_node/condition_node/compare/len_le_compare.py new file mode 100644 index 00000000000..d315a754aa6 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/len_le_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 小于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LenLECompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'len_le': + return True + + def compare(self, source_value, compare, target_value): + try: + return len(source_value) <= int(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/len_lt_compare.py b/apps/application/flow/step_node/condition_node/compare/len_lt_compare.py new file mode 100644 index 00000000000..c89638cd721 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/len_lt_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 小于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LenLTCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'len_lt': + return True + + def compare(self, source_value, compare, target_value): + try: + return len(source_value) < int(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/lt_compare.py b/apps/application/flow/step_node/condition_node/compare/lt_compare.py new file mode 100644 index 00000000000..d2d5be74823 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/lt_compare.py @@ -0,0 +1,24 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: lt_compare.py + @date:2024/6/11 9:52 + @desc: 小于比较器 +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class LTCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'lt': + return True + + def compare(self, source_value, compare, target_value): + try: + return float(source_value) < float(target_value) + except Exception as e: + return False diff --git a/apps/application/flow/step_node/condition_node/compare/not_contain_compare.py b/apps/application/flow/step_node/condition_node/compare/not_contain_compare.py new file mode 100644 index 00000000000..f95b237ddf6 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/compare/not_contain_compare.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: contain_compare.py + @date:2024/6/11 10:02 + @desc: +""" +from typing import List + +from application.flow.step_node.condition_node.compare.compare import Compare + + +class NotContainCompare(Compare): + + def support(self, node_id, fields: List[str], source_value, compare, target_value): + if compare == 'not_contain': + return True + + def compare(self, source_value, compare, target_value): + if isinstance(source_value, str): + return str(target_value) not in source_value + return not any([str(item) == str(target_value) for item in source_value]) diff --git a/apps/application/flow/step_node/condition_node/i_condition_node.py b/apps/application/flow/step_node/condition_node/i_condition_node.py new file mode 100644 index 00000000000..a0e9814ff69 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/i_condition_node.py @@ -0,0 +1,39 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_condition_node.py + @date:2024/6/7 9:54 + @desc: +""" +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.i_step_node import INode +from common.util.field_message import ErrMessage + + +class ConditionSerializer(serializers.Serializer): + compare = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Comparator"))) + value = serializers.CharField(required=True, error_messages=ErrMessage.char(_("value"))) + field = serializers.ListField(required=True, error_messages=ErrMessage.char(_("Fields"))) + + +class ConditionBranchSerializer(serializers.Serializer): + id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Branch id"))) + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Branch Type"))) + condition = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Condition or|and"))) + conditions = ConditionSerializer(many=True) + + +class ConditionNodeParamsSerializer(serializers.Serializer): + branch = ConditionBranchSerializer(many=True) + + +class IConditionNode(INode): + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return ConditionNodeParamsSerializer + + type = 'condition-node' diff --git a/apps/application/flow/step_node/condition_node/impl/__init__.py b/apps/application/flow/step_node/condition_node/impl/__init__.py new file mode 100644 index 00000000000..c21cd3ebb37 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:35 + @desc: +""" +from .base_condition_node import BaseConditionNode diff --git a/apps/application/flow/step_node/condition_node/impl/base_condition_node.py b/apps/application/flow/step_node/condition_node/impl/base_condition_node.py new file mode 100644 index 00000000000..109029be211 --- /dev/null +++ b/apps/application/flow/step_node/condition_node/impl/base_condition_node.py @@ -0,0 +1,62 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_condition_node.py + @date:2024/6/7 11:29 + @desc: +""" +from typing import List + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.condition_node.compare import compare_handle_list +from application.flow.step_node.condition_node.i_condition_node import IConditionNode + + +class BaseConditionNode(IConditionNode): + def save_context(self, details, workflow_manage): + self.context['branch_id'] = details.get('branch_id') + self.context['branch_name'] = details.get('branch_name') + + def execute(self, **kwargs) -> NodeResult: + branch_list = self.node_params_serializer.data['branch'] + branch = self._execute(branch_list) + r = NodeResult({'branch_id': branch.get('id'), 'branch_name': branch.get('type')}, {}) + return r + + def _execute(self, branch_list: List): + for branch in branch_list: + if self.branch_assertion(branch): + return branch + + def branch_assertion(self, branch): + condition_list = [self.assertion(row.get('field'), row.get('compare'), row.get('value')) for row in + branch.get('conditions')] + condition = branch.get('condition') + return all(condition_list) if condition == 'and' else any(condition_list) + + def assertion(self, field_list: List[str], compare: str, value): + try: + value = self.workflow_manage.generate_prompt(value) + except Exception as e: + pass + field_value = None + try: + field_value = self.workflow_manage.get_reference_field(field_list[0], field_list[1:]) + except Exception as e: + pass + for compare_handler in compare_handle_list: + if compare_handler.support(field_list[0], field_list[1:], field_value, compare, value): + return compare_handler.compare(field_value, compare, value) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'branch_id': self.context.get('branch_id'), + 'branch_name': self.context.get('branch_name'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/direct_reply_node/__init__.py b/apps/application/flow/step_node/direct_reply_node/__init__.py new file mode 100644 index 00000000000..cf360f95685 --- /dev/null +++ b/apps/application/flow/step_node/direct_reply_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 17:50 + @desc: +""" +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/direct_reply_node/i_reply_node.py b/apps/application/flow/step_node/direct_reply_node/i_reply_node.py new file mode 100644 index 00000000000..d60541b18fb --- /dev/null +++ b/apps/application/flow/step_node/direct_reply_node/i_reply_node.py @@ -0,0 +1,48 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_reply_node.py + @date:2024/6/11 16:25 + @desc: +""" +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.exception.app_exception import AppApiException +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class ReplyNodeParamsSerializer(serializers.Serializer): + reply_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Response Type"))) + fields = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Reference Field"))) + content = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_("Direct answer content"))) + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + if self.data.get('reply_type') == 'referencing': + if 'fields' not in self.data: + raise AppApiException(500, _("Reference field cannot be empty")) + if len(self.data.get('fields')) < 2: + raise AppApiException(500, _("Reference field error")) + else: + if 'content' not in self.data or self.data.get('content') is None: + raise AppApiException(500, _("Content cannot be empty")) + + +class IReplyNode(INode): + type = 'reply-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return ReplyNodeParamsSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/direct_reply_node/impl/__init__.py b/apps/application/flow/step_node/direct_reply_node/impl/__init__.py new file mode 100644 index 00000000000..3307e90899e --- /dev/null +++ b/apps/application/flow/step_node/direct_reply_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 17:49 + @desc: +""" +from .base_reply_node import * \ No newline at end of file diff --git a/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py b/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py new file mode 100644 index 00000000000..1d3115e4c67 --- /dev/null +++ b/apps/application/flow/step_node/direct_reply_node/impl/base_reply_node.py @@ -0,0 +1,45 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_reply_node.py + @date:2024/6/11 17:25 + @desc: +""" +from typing import List + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode + + +class BaseReplyNode(IReplyNode): + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult: + if reply_type == 'referencing': + result = self.get_reference_content(fields) + else: + result = self.generate_reply_content(content) + return NodeResult({'answer': result}, {}) + + def generate_reply_content(self, prompt): + return self.workflow_manage.generate_prompt(prompt) + + def get_reference_content(self, fields: List[str]): + return str(self.workflow_manage.get_reference_field( + fields[0], + fields[1:])) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'answer': self.context.get('answer'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/document_extract_node/__init__.py b/apps/application/flow/step_node/document_extract_node/__init__.py new file mode 100644 index 00000000000..ce8f10f3e24 --- /dev/null +++ b/apps/application/flow/step_node/document_extract_node/__init__.py @@ -0,0 +1 @@ +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py b/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py new file mode 100644 index 00000000000..93d2b5b987b --- /dev/null +++ b/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py @@ -0,0 +1,28 @@ +# coding=utf-8 + +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage + + +class DocumentExtractNodeSerializer(serializers.Serializer): + document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document"))) + + +class IDocumentExtractNode(INode): + type = 'document-extract-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return DocumentExtractNodeSerializer + + def _run(self): + res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('document_list')[0], + self.node_params_serializer.data.get('document_list')[1:]) + return self.execute(document=res, **self.flow_params_serializer.data) + + def execute(self, document, chat_id, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/document_extract_node/impl/__init__.py b/apps/application/flow/step_node/document_extract_node/impl/__init__.py new file mode 100644 index 00000000000..cf9d55ecde8 --- /dev/null +++ b/apps/application/flow/step_node/document_extract_node/impl/__init__.py @@ -0,0 +1 @@ +from .base_document_extract_node import BaseDocumentExtractNode diff --git a/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py b/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py new file mode 100644 index 00000000000..6ddcb6e2fca --- /dev/null +++ b/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py @@ -0,0 +1,94 @@ +# coding=utf-8 +import io +import mimetypes + +from django.core.files.uploadedfile import InMemoryUploadedFile +from django.db.models import QuerySet + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.document_extract_node.i_document_extract_node import IDocumentExtractNode +from dataset.models import File +from dataset.serializers.document_serializers import split_handles, parse_table_handle_list, FileBufferHandle +from dataset.serializers.file_serializers import FileSerializer + + +def bytes_to_uploaded_file(file_bytes, file_name="file.txt"): + content_type, _ = mimetypes.guess_type(file_name) + if content_type is None: + # 如果未能识别,设置为默认的二进制文件类型 + content_type = "application/octet-stream" + # 创建一个内存中的字节流对象 + file_stream = io.BytesIO(file_bytes) + + # 获取文件大小 + file_size = len(file_bytes) + + # 创建 InMemoryUploadedFile 对象 + uploaded_file = InMemoryUploadedFile( + file=file_stream, + field_name=None, + name=file_name, + content_type=content_type, + size=file_size, + charset=None, + ) + return uploaded_file + + +splitter = '\n`-----------------------------------`\n' + +class BaseDocumentExtractNode(IDocumentExtractNode): + def save_context(self, details, workflow_manage): + self.context['content'] = details.get('content') + + + def execute(self, document, chat_id, **kwargs): + get_buffer = FileBufferHandle().get_buffer + + self.context['document_list'] = document + content = [] + if document is None or not isinstance(document, list): + return NodeResult({'content': ''}, {}) + + application = self.workflow_manage.work_flow_post_handler.chat_info.application + + # doc文件中的图片保存 + def save_image(image_list): + for image in image_list: + meta = { + 'debug': False if application.id else True, + 'chat_id': chat_id, + 'application_id': str(application.id) if application.id else None, + 'file_id': str(image.id) + } + file = bytes_to_uploaded_file(image.image, image.image_name) + FileSerializer(data={'file': file, 'meta': meta}).upload() + + for doc in document: + file = QuerySet(File).filter(id=doc['file_id']).first() + buffer = io.BytesIO(file.get_byte().tobytes()) + buffer.name = doc['name'] # this is the important line + + for split_handle in (parse_table_handle_list + split_handles): + if split_handle.support(buffer, get_buffer): + # 回到文件头 + buffer.seek(0) + file_content = split_handle.get_content(buffer, save_image) + content.append('### ' + doc['name'] + '\n' + file_content) + break + + return NodeResult({'content': splitter.join(content)}, {}) + + def get_details(self, index: int, **kwargs): + content = self.context.get('content', '').split(splitter) + # 不保存content全部内容,因为content内容可能会很大 + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'content': [file_content[:500] for file_content in content], + 'status': self.status, + 'err_message': self.err_message, + 'document_list': self.context.get('document_list') + } diff --git a/apps/application/flow/step_node/form_node/__init__.py b/apps/application/flow/step_node/form_node/__init__.py new file mode 100644 index 00000000000..ce04b64aea8 --- /dev/null +++ b/apps/application/flow/step_node/form_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/11/4 14:48 + @desc: +""" +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/form_node/i_form_node.py b/apps/application/flow/step_node/form_node/i_form_node.py new file mode 100644 index 00000000000..7e82494293d --- /dev/null +++ b/apps/application/flow/step_node/form_node/i_form_node.py @@ -0,0 +1,35 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: i_form_node.py + @date:2024/11/4 14:48 + @desc: +""" +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class FormNodeParamsSerializer(serializers.Serializer): + form_field_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("Form Configuration"))) + form_content_format = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Form output content'))) + form_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Form Data"))) + + +class IFormNode(INode): + type = 'form-node' + view_type = 'single_view' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return FormNodeParamsSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, form_field_list, form_content_format, form_data, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/form_node/impl/__init__.py b/apps/application/flow/step_node/form_node/impl/__init__.py new file mode 100644 index 00000000000..4cea85e1d9e --- /dev/null +++ b/apps/application/flow/step_node/form_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/11/4 14:49 + @desc: +""" +from .base_form_node import BaseFormNode diff --git a/apps/application/flow/step_node/form_node/impl/base_form_node.py b/apps/application/flow/step_node/form_node/impl/base_form_node.py new file mode 100644 index 00000000000..dcf35dd3cfd --- /dev/null +++ b/apps/application/flow/step_node/form_node/impl/base_form_node.py @@ -0,0 +1,107 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_form_node.py + @date:2024/11/4 14:52 + @desc: +""" +import json +import time +from typing import Dict, List + +from langchain_core.prompts import PromptTemplate + +from application.flow.common import Answer +from application.flow.i_step_node import NodeResult +from application.flow.step_node.form_node.i_form_node import IFormNode + + +def write_context(step_variable: Dict, global_variable: Dict, node, workflow): + if step_variable is not None: + for key in step_variable: + node.context[key] = step_variable[key] + if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable: + result = step_variable['result'] + yield result + node.answer_text = result + node.context['run_time'] = time.time() - node.context['start_time'] + + +class BaseFormNode(IFormNode): + def save_context(self, details, workflow_manage): + form_data = details.get('form_data', None) + self.context['result'] = details.get('result') + self.context['form_content_format'] = details.get('form_content_format') + self.context['form_field_list'] = details.get('form_field_list') + self.context['run_time'] = details.get('run_time') + self.context['start_time'] = details.get('start_time') + self.context['form_data'] = form_data + self.context['is_submit'] = details.get('is_submit') + if self.node_params.get('is_result', False): + self.answer_text = details.get('result') + if form_data is not None: + for key in form_data: + self.context[key] = form_data[key] + + def execute(self, form_field_list, form_content_format, form_data, **kwargs) -> NodeResult: + if form_data is not None: + self.context['is_submit'] = True + self.context['form_data'] = form_data + for key in form_data: + self.context[key] = form_data.get(key) + else: + self.context['is_submit'] = False + form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id, + "chat_record_id": self.flow_params_serializer.data.get("chat_record_id"), + "is_submit": self.context.get("is_submit", False)} + form = f'{json.dumps(form_setting, ensure_ascii=False)}' + context = self.workflow_manage.get_workflow_content() + form_content_format = self.workflow_manage.reset_prompt(form_content_format) + prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2') + value = prompt_template.format(form=form, context=context) + return NodeResult( + {'result': value, 'form_field_list': form_field_list, 'form_content_format': form_content_format}, {}, + _write_context=write_context) + + def get_answer_list(self) -> List[Answer] | None: + form_content_format = self.context.get('form_content_format') + form_field_list = self.context.get('form_field_list') + form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id, + "chat_record_id": self.flow_params_serializer.data.get("chat_record_id"), + 'form_data': self.context.get('form_data', {}), + "is_submit": self.context.get("is_submit", False)} + form = f'{json.dumps(form_setting, ensure_ascii=False)}' + context = self.workflow_manage.get_workflow_content() + form_content_format = self.workflow_manage.reset_prompt(form_content_format) + prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2') + value = prompt_template.format(form=form, context=context) + return [Answer(value, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], None, + self.runtime_node_id, '')] + + def get_details(self, index: int, **kwargs): + form_content_format = self.context.get('form_content_format') + form_field_list = self.context.get('form_field_list') + form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id, + "chat_record_id": self.flow_params_serializer.data.get("chat_record_id"), + 'form_data': self.context.get('form_data', {}), + "is_submit": self.context.get("is_submit", False)} + form = f'{json.dumps(form_setting, ensure_ascii=False)}' + context = self.workflow_manage.get_workflow_content() + form_content_format = self.workflow_manage.reset_prompt(form_content_format) + prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2') + value = prompt_template.format(form=form, context=context) + return { + 'name': self.node.properties.get('stepName'), + "index": index, + "result": value, + "form_content_format": self.context.get('form_content_format'), + "form_field_list": self.context.get('form_field_list'), + 'form_data': self.context.get('form_data'), + 'start_time': self.context.get('start_time'), + 'is_submit': self.context.get('is_submit'), + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/function_lib_node/__init__.py b/apps/application/flow/step_node/function_lib_node/__init__.py new file mode 100644 index 00000000000..7422965c365 --- /dev/null +++ b/apps/application/flow/step_node/function_lib_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/8/8 17:45 + @desc: +""" +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py b/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py new file mode 100644 index 00000000000..c84782ff6a9 --- /dev/null +++ b/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py @@ -0,0 +1,48 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: i_function_lib_node.py + @date:2024/8/8 16:21 + @desc: +""" +from typing import Type + +from django.db.models import QuerySet +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.field.common import ObjectField +from common.util.field_message import ErrMessage +from function_lib.models.function import FunctionLib +from django.utils.translation import gettext_lazy as _ + + +class InputField(serializers.Serializer): + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Variable Name'))) + value = ObjectField(required=True, error_messages=ErrMessage.char(_("Variable Value")), model_type_list=[str, list]) + + +class FunctionLibNodeParamsSerializer(serializers.Serializer): + function_lib_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('Library ID'))) + input_field_list = InputField(required=True, many=True) + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + f_lib = QuerySet(FunctionLib).filter(id=self.data.get('function_lib_id')).first() + if f_lib is None: + raise Exception(_('The function has been deleted')) + + +class IFunctionLibNode(INode): + type = 'function-lib-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return FunctionLibNodeParamsSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/function_lib_node/impl/__init__.py b/apps/application/flow/step_node/function_lib_node/impl/__init__.py new file mode 100644 index 00000000000..96681474f19 --- /dev/null +++ b/apps/application/flow/step_node/function_lib_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/8/8 17:48 + @desc: +""" +from .base_function_lib_node import BaseFunctionLibNodeNode diff --git a/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py b/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py new file mode 100644 index 00000000000..341bb91da63 --- /dev/null +++ b/apps/application/flow/step_node/function_lib_node/impl/base_function_lib_node.py @@ -0,0 +1,150 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_function_lib_node.py + @date:2024/8/8 17:49 + @desc: +""" +import json +import time +from typing import Dict + +from django.db.models import QuerySet +from django.utils.translation import gettext as _ + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.function_lib_node.i_function_lib_node import IFunctionLibNode +from common.exception.app_exception import AppApiException +from common.util.function_code import FunctionExecutor +from common.util.rsa_util import rsa_long_decrypt +from function_lib.models.function import FunctionLib +from smartdoc.const import CONFIG + +function_executor = FunctionExecutor(CONFIG.get('SANDBOX')) + + +def write_context(step_variable: Dict, global_variable: Dict, node, workflow): + if step_variable is not None: + for key in step_variable: + node.context[key] = step_variable[key] + if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable: + result = str(step_variable['result']) + '\n' + yield result + node.answer_text = result + node.context['run_time'] = time.time() - node.context['start_time'] + + +def get_field_value(debug_field_list, name, is_required): + result = [field for field in debug_field_list if field.get('name') == name] + if len(result) > 0: + return result[-1]['value'] + if is_required: + raise AppApiException(500, _('Field: {name} No value set').format(name=name)) + return None + + +def valid_reference_value(_type, value, name): + if _type == 'int': + instance_type = int | float + elif _type == 'float': + instance_type = float | int + elif _type == 'dict': + instance_type = dict + elif _type == 'array': + instance_type = list + elif _type == 'string': + instance_type = str + else: + raise Exception(_('Field: {name} Type: {_type} Value: {value} Unsupported types').format(name=name, + _type=_type)) + if not isinstance(value, instance_type): + raise Exception( + _('Field: {name} Type: {_type} Value: {value} Type error').format(name=name, _type=_type, + value=value)) + + +def convert_value(name: str, value, _type, is_required, source, node): + if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)): + return None + if not is_required and source == 'reference' and (value is None or len(value) == 0): + return None + if source == 'reference': + value = node.workflow_manage.get_reference_field( + value[0], + value[1:]) + valid_reference_value(_type, value, name) + if _type == 'int': + return int(value) + if _type == 'float': + return float(value) + return value + try: + if _type == 'int': + return int(value) + if _type == 'float': + return float(value) + if _type == 'dict': + v = json.loads(value) + if isinstance(v, dict): + return v + raise Exception(_('type error')) + if _type == 'array': + v = json.loads(value) + if isinstance(v, list): + return v + raise Exception(_('type error')) + return value + except Exception as e: + raise Exception( + _('Field: {name} Type: {_type} Value: {value} Type error').format(name=name, _type=_type, + value=value)) + + +def valid_function(function_lib, user_id): + if function_lib is None: + raise Exception(_('Function does not exist')) + if function_lib.permission_type == 'PRIVATE' and str(function_lib.user_id) != str(user_id): + raise Exception(_('No permission to use this function {name}').format(name=function_lib.name)) + if not function_lib.is_active: + raise Exception(_('Function {name} is unavailable').format(name=function_lib.name)) + + +class BaseFunctionLibNodeNode(IFunctionLibNode): + def save_context(self, details, workflow_manage): + self.context['result'] = details.get('result') + if self.node_params.get('is_result'): + self.answer_text = str(details.get('result')) + + def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult: + function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first() + valid_function(function_lib, self.flow_params_serializer.data.get('user_id')) + params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'), + field.get('is_required'), + field.get('source'), self) + for field in + [{'value': get_field_value(input_field_list, field.get('name'), field.get('is_required'), + ), **field} + for field in + function_lib.input_field_list]} + + self.context['params'] = params + # 合并初始化参数 + if function_lib.init_params is not None: + all_params = json.loads(rsa_long_decrypt(function_lib.init_params)) | params + else: + all_params = params + result = function_executor.exec_code(function_lib.code, all_params) + return NodeResult({'result': result}, {}, _write_context=write_context) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + "result": self.context.get('result'), + "params": self.context.get('params'), + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/function_node/__init__.py b/apps/application/flow/step_node/function_node/__init__.py new file mode 100644 index 00000000000..ebfbe8d8bb4 --- /dev/null +++ b/apps/application/flow/step_node/function_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/13 10:43 + @desc: +""" +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/function_node/i_function_node.py b/apps/application/flow/step_node/function_node/i_function_node.py new file mode 100644 index 00000000000..bbaae6c73fe --- /dev/null +++ b/apps/application/flow/step_node/function_node/i_function_node.py @@ -0,0 +1,63 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: i_function_lib_node.py + @date:2024/8/8 16:21 + @desc: +""" +import re +from typing import Type + +from django.core import validators +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.exception.app_exception import AppApiException +from common.field.common import ObjectField +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ +from rest_framework.utils.formatting import lazy_format + + +class InputField(serializers.Serializer): + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Variable Name'))) + is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_("Is this field required"))) + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("type")), validators=[ + validators.RegexValidator(regex=re.compile("^string|int|dict|array|float$"), + message=_("The field only supports string|int|dict|array|float"), code=500) + ]) + source = serializers.CharField(required=True, error_messages=ErrMessage.char(_("source")), validators=[ + validators.RegexValidator(regex=re.compile("^custom|reference$"), + message=_("The field only supports custom|reference"), code=500) + ]) + value = ObjectField(required=True, error_messages=ErrMessage.char(_("Variable Value")), model_type_list=[str, list]) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + is_required = self.data.get('is_required') + if is_required and self.data.get('value') is None: + message = lazy_format(_('{field}, this field is required.'), field=self.data.get("name")) + raise AppApiException(500, message) + + +class FunctionNodeParamsSerializer(serializers.Serializer): + input_field_list = InputField(required=True, many=True) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("function"))) + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + + +class IFunctionNode(INode): + type = 'function-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return FunctionNodeParamsSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, input_field_list, code, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/function_node/impl/__init__.py b/apps/application/flow/step_node/function_node/impl/__init__.py new file mode 100644 index 00000000000..1a096368f84 --- /dev/null +++ b/apps/application/flow/step_node/function_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/13 11:19 + @desc: +""" +from .base_function_node import BaseFunctionNodeNode diff --git a/apps/application/flow/step_node/function_node/impl/base_function_node.py b/apps/application/flow/step_node/function_node/impl/base_function_node.py new file mode 100644 index 00000000000..d659227f1ee --- /dev/null +++ b/apps/application/flow/step_node/function_node/impl/base_function_node.py @@ -0,0 +1,108 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_function_lib_node.py + @date:2024/8/8 17:49 + @desc: +""" +import json +import time + +from typing import Dict + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.function_node.i_function_node import IFunctionNode +from common.exception.app_exception import AppApiException +from common.util.function_code import FunctionExecutor +from smartdoc.const import CONFIG + +function_executor = FunctionExecutor(CONFIG.get('SANDBOX')) + + +def write_context(step_variable: Dict, global_variable: Dict, node, workflow): + if step_variable is not None: + for key in step_variable: + node.context[key] = step_variable[key] + if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable: + result = str(step_variable['result']) + '\n' + yield result + node.answer_text = result + node.context['run_time'] = time.time() - node.context['start_time'] + + +def valid_reference_value(_type, value, name): + if _type == 'int': + instance_type = int | float + elif _type == 'float': + instance_type = float | int + elif _type == 'dict': + instance_type = dict + elif _type == 'array': + instance_type = list + elif _type == 'string': + instance_type = str + else: + raise Exception(500, f'字段:{name}类型:{_type} 不支持的类型') + if not isinstance(value, instance_type): + raise Exception(f'字段:{name}类型:{_type}值:{value}类型错误') + + +def convert_value(name: str, value, _type, is_required, source, node): + if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)): + return None + if source == 'reference': + value = node.workflow_manage.get_reference_field( + value[0], + value[1:]) + valid_reference_value(_type, value, name) + if _type == 'int': + return int(value) + if _type == 'float': + return float(value) + return value + try: + if _type == 'int': + return int(value) + if _type == 'float': + return float(value) + if _type == 'dict': + v = json.loads(value) + if isinstance(v, dict): + return v + raise Exception("类型错误") + if _type == 'array': + v = json.loads(value) + if isinstance(v, list): + return v + raise Exception("类型错误") + return value + except Exception as e: + raise Exception(f'字段:{name}类型:{_type}值:{value}类型错误') + + +class BaseFunctionNodeNode(IFunctionNode): + def save_context(self, details, workflow_manage): + self.context['result'] = details.get('result') + if self.node_params.get('is_result', False): + self.answer_text = str(details.get('result')) + + def execute(self, input_field_list, code, **kwargs) -> NodeResult: + params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'), + field.get('is_required'), field.get('source'), self) + for field in input_field_list} + result = function_executor.exec_code(code, params) + self.context['params'] = params + return NodeResult({'result': result}, {}, _write_context=write_context) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + "result": self.context.get('result'), + "params": self.context.get('params'), + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/image_generate_step_node/__init__.py b/apps/application/flow/step_node/image_generate_step_node/__init__.py new file mode 100644 index 00000000000..f3feecc9ce2 --- /dev/null +++ b/apps/application/flow/step_node/image_generate_step_node/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .impl import * diff --git a/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py b/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py new file mode 100644 index 00000000000..56a214cf9b9 --- /dev/null +++ b/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py @@ -0,0 +1,45 @@ +# coding=utf-8 + +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class ImageGenerateNodeSerializer(serializers.Serializer): + model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id"))) + + prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word (positive)"))) + + negative_prompt = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Prompt word (negative)")), + allow_null=True, allow_blank=True, ) + # 多轮对话数量 + dialogue_number = serializers.IntegerField(required=False, default=0, + error_messages=ErrMessage.integer(_("Number of multi-round conversations"))) + + dialogue_type = serializers.CharField(required=False, default='NODE', + error_messages=ErrMessage.char(_("Conversation storage type"))) + + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + model_params_setting = serializers.JSONField(required=False, default=dict, + error_messages=ErrMessage.json(_("Model parameter settings"))) + + +class IImageGenerateNode(INode): + type = 'image-generate-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return ImageGenerateNodeSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + model_params_setting, + chat_record_id, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/image_generate_step_node/impl/__init__.py b/apps/application/flow/step_node/image_generate_step_node/impl/__init__.py new file mode 100644 index 00000000000..14a21a9159c --- /dev/null +++ b/apps/application/flow/step_node/image_generate_step_node/impl/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .base_image_generate_node import BaseImageGenerateNode diff --git a/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py b/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py new file mode 100644 index 00000000000..16423eafd61 --- /dev/null +++ b/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py @@ -0,0 +1,122 @@ +# coding=utf-8 +from functools import reduce +from typing import List + +import requests +from langchain_core.messages import BaseMessage, HumanMessage, AIMessage + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.image_generate_step_node.i_image_generate_node import IImageGenerateNode +from common.util.common import bytes_to_uploaded_file +from dataset.serializers.file_serializers import FileSerializer +from setting.models_provider.tools import get_model_instance_by_model_user_id + + +class BaseImageGenerateNode(IImageGenerateNode): + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + self.context['question'] = details.get('question') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + model_params_setting, + chat_record_id, + **kwargs) -> NodeResult: + print(model_params_setting) + application = self.workflow_manage.work_flow_post_handler.chat_info.application + tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), + **model_params_setting) + history_message = self.get_history_message(history_chat_record, dialogue_number) + self.context['history_message'] = history_message + question = self.generate_prompt_question(prompt) + self.context['question'] = question + message_list = self.generate_message_list(question, history_message) + self.context['message_list'] = message_list + self.context['dialogue_type'] = dialogue_type + print(message_list) + image_urls = tti_model.generate_image(question, negative_prompt) + # 保存图片 + file_urls = [] + for image_url in image_urls: + file_name = 'generated_image.png' + file = bytes_to_uploaded_file(requests.get(image_url).content, file_name) + meta = { + 'debug': False if application.id else True, + 'chat_id': chat_id, + 'application_id': str(application.id) if application.id else None, + } + file_url = FileSerializer(data={'file': file, 'meta': meta}).upload() + file_urls.append(file_url) + self.context['image_list'] = [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls] + answer = ' '.join([f"![Image]({path})" for path in file_urls]) + return NodeResult({'answer': answer, 'chat_model': tti_model, 'message_list': message_list, + 'image': [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls], + 'history_message': history_message, 'question': question}, {}) + + def generate_history_ai_message(self, chat_record): + for val in chat_record.details.values(): + if self.node.id == val['node_id'] and 'image_list' in val: + if val['dialogue_type'] == 'WORKFLOW': + return chat_record.get_ai_message() + image_list = val['image_list'] + return AIMessage(content=[ + *[{'type': 'image_url', 'image_url': {'url': f'{file_url}'}} for file_url in image_list] + ]) + return chat_record.get_ai_message() + + def get_history_message(self, history_chat_record, dialogue_number): + start_index = len(history_chat_record) - dialogue_number + history_message = reduce(lambda x, y: [*x, *y], [ + [self.generate_history_human_message(history_chat_record[index]), + self.generate_history_ai_message(history_chat_record[index])] + for index in + range(start_index if start_index > 0 else 0, len(history_chat_record))], []) + return history_message + + def generate_history_human_message(self, chat_record): + + for data in chat_record.details.values(): + if self.node.id == data['node_id'] and 'image_list' in data: + image_list = data['image_list'] + if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW': + return HumanMessage(content=chat_record.problem_text) + return HumanMessage(content=data['question']) + return HumanMessage(content=chat_record.problem_text) + + def generate_prompt_question(self, prompt): + return self.workflow_manage.generate_prompt(prompt) + + def generate_message_list(self, question: str, history_message): + return [ + *history_message, + question + ] + + @staticmethod + def reset_message_list(message_list: List[BaseMessage], answer_text): + result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for + message + in + message_list] + result.append({'role': 'ai', 'content': answer_text}) + return result + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'history_message': [{'content': message.content, 'role': message.type} for message in + (self.context.get('history_message') if self.context.get( + 'history_message') is not None else [])], + 'question': self.context.get('question'), + 'answer': self.context.get('answer'), + 'type': self.node.type, + 'message_tokens': self.context.get('message_tokens'), + 'answer_tokens': self.context.get('answer_tokens'), + 'status': self.status, + 'err_message': self.err_message, + 'image_list': self.context.get('image_list'), + 'dialogue_type': self.context.get('dialogue_type') + } diff --git a/apps/application/flow/step_node/image_understand_step_node/__init__.py b/apps/application/flow/step_node/image_understand_step_node/__init__.py new file mode 100644 index 00000000000..f3feecc9ce2 --- /dev/null +++ b/apps/application/flow/step_node/image_understand_step_node/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .impl import * diff --git a/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py b/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py new file mode 100644 index 00000000000..5ef4c101708 --- /dev/null +++ b/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py @@ -0,0 +1,46 @@ +# coding=utf-8 + +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class ImageUnderstandNodeSerializer(serializers.Serializer): + model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id"))) + system = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_("Role Setting"))) + prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word"))) + # 多轮对话数量 + dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations"))) + + dialogue_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Conversation storage type"))) + + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture"))) + + model_params_setting = serializers.JSONField(required=False, default=dict, + error_messages=ErrMessage.json(_("Model parameter settings"))) + + +class IImageUnderstandNode(INode): + type = 'image-understand-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return ImageUnderstandNodeSerializer + + def _run(self): + res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('image_list')[0], + self.node_params_serializer.data.get('image_list')[1:]) + return self.execute(image=res, **self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id, + model_params_setting, + chat_record_id, + image, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/image_understand_step_node/impl/__init__.py b/apps/application/flow/step_node/image_understand_step_node/impl/__init__.py new file mode 100644 index 00000000000..ba251283921 --- /dev/null +++ b/apps/application/flow/step_node/image_understand_step_node/impl/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .base_image_understand_node import BaseImageUnderstandNode diff --git a/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py b/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py new file mode 100644 index 00000000000..44765bc4f93 --- /dev/null +++ b/apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py @@ -0,0 +1,224 @@ +# coding=utf-8 +import base64 +import os +import time +from functools import reduce +from typing import List, Dict + +from django.db.models import QuerySet +from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage + +from application.flow.i_step_node import NodeResult, INode +from application.flow.step_node.image_understand_step_node.i_image_understand_node import IImageUnderstandNode +from dataset.models import File +from setting.models_provider.tools import get_model_instance_by_model_user_id +from imghdr import what + + +def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str): + chat_model = node_variable.get('chat_model') + message_tokens = node_variable['usage_metadata']['output_tokens'] if 'usage_metadata' in node_variable else 0 + answer_tokens = chat_model.get_num_tokens(answer) + node.context['message_tokens'] = message_tokens + node.context['answer_tokens'] = answer_tokens + node.context['answer'] = answer + node.context['history_message'] = node_variable['history_message'] + node.context['question'] = node_variable['question'] + node.context['run_time'] = time.time() - node.context['start_time'] + if workflow.is_result(node, NodeResult(node_variable, workflow_variable)): + node.answer_text = answer + + +def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 (流式) + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + answer = '' + for chunk in response: + answer += chunk.content + yield chunk.content + _write_context(node_variable, workflow_variable, node, workflow, answer) + + +def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点实例对象 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + answer = response.content + _write_context(node_variable, workflow_variable, node, workflow, answer) + + +def file_id_to_base64(file_id: str): + file = QuerySet(File).filter(id=file_id).first() + file_bytes = file.get_byte() + base64_image = base64.b64encode(file_bytes).decode("utf-8") + return [base64_image, what(None, file_bytes.tobytes())] + + +class BaseImageUnderstandNode(IImageUnderstandNode): + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + self.context['question'] = details.get('question') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id, + model_params_setting, + chat_record_id, + image, + **kwargs) -> NodeResult: + # 处理不正确的参数 + if image is None or not isinstance(image, list): + image = [] + print(model_params_setting) + image_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting) + # 执行详情中的历史消息不需要图片内容 + history_message = self.get_history_message_for_details(history_chat_record, dialogue_number) + self.context['history_message'] = history_message + question = self.generate_prompt_question(prompt) + self.context['question'] = question.content + # 生成消息列表, 真实的history_message + message_list = self.generate_message_list(image_model, system, prompt, + self.get_history_message(history_chat_record, dialogue_number), image) + self.context['message_list'] = message_list + self.context['image_list'] = image + self.context['dialogue_type'] = dialogue_type + if stream: + r = image_model.stream(message_list) + return NodeResult({'result': r, 'chat_model': image_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context_stream) + else: + r = image_model.invoke(message_list) + return NodeResult({'result': r, 'chat_model': image_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context) + + def get_history_message_for_details(self, history_chat_record, dialogue_number): + start_index = len(history_chat_record) - dialogue_number + history_message = reduce(lambda x, y: [*x, *y], [ + [self.generate_history_human_message_for_details(history_chat_record[index]), + self.generate_history_ai_message(history_chat_record[index])] + for index in + range(start_index if start_index > 0 else 0, len(history_chat_record))], []) + return history_message + + def generate_history_ai_message(self, chat_record): + for val in chat_record.details.values(): + if self.node.id == val['node_id'] and 'image_list' in val: + if val['dialogue_type'] == 'WORKFLOW': + return chat_record.get_ai_message() + return AIMessage(content=val['answer']) + return chat_record.get_ai_message() + + def generate_history_human_message_for_details(self, chat_record): + for data in chat_record.details.values(): + if self.node.id == data['node_id'] and 'image_list' in data: + image_list = data['image_list'] + if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW': + return HumanMessage(content=chat_record.problem_text) + file_id_list = [image.get('file_id') for image in image_list] + return HumanMessage(content=[ + {'type': 'text', 'text': data['question']}, + *[{'type': 'image_url', 'image_url': {'url': f'/api/file/{file_id}'}} for file_id in file_id_list] + + ]) + return HumanMessage(content=chat_record.problem_text) + + def get_history_message(self, history_chat_record, dialogue_number): + start_index = len(history_chat_record) - dialogue_number + history_message = reduce(lambda x, y: [*x, *y], [ + [self.generate_history_human_message(history_chat_record[index]), + self.generate_history_ai_message(history_chat_record[index])] + for index in + range(start_index if start_index > 0 else 0, len(history_chat_record))], []) + return history_message + + def generate_history_human_message(self, chat_record): + + for data in chat_record.details.values(): + if self.node.id == data['node_id'] and 'image_list' in data: + image_list = data['image_list'] + if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW': + return HumanMessage(content=chat_record.problem_text) + image_base64_list = [file_id_to_base64(image.get('file_id')) for image in image_list] + return HumanMessage( + content=[ + {'type': 'text', 'text': data['question']}, + *[{'type': 'image_url', 'image_url': {'url': f'data:image/{base64_image[1]};base64,{base64_image[0]}'}} for + base64_image in image_base64_list] + ]) + return HumanMessage(content=chat_record.problem_text) + + def generate_prompt_question(self, prompt): + return HumanMessage(self.workflow_manage.generate_prompt(prompt)) + + def generate_message_list(self, image_model, system: str, prompt: str, history_message, image): + if image is not None and len(image) > 0: + # 处理多张图片 + images = [] + for img in image: + file_id = img['file_id'] + file = QuerySet(File).filter(id=file_id).first() + image_bytes = file.get_byte() + base64_image = base64.b64encode(image_bytes).decode("utf-8") + image_format = what(None, image_bytes.tobytes()) + images.append({'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}}) + messages = [HumanMessage( + content=[ + {'type': 'text', 'text': self.workflow_manage.generate_prompt(prompt)}, + *images + ])] + else: + messages = [HumanMessage(self.workflow_manage.generate_prompt(prompt))] + + if system is not None and len(system) > 0: + return [ + SystemMessage(self.workflow_manage.generate_prompt(system)), + *history_message, + *messages + ] + else: + return [ + *history_message, + *messages + ] + + @staticmethod + def reset_message_list(message_list: List[BaseMessage], answer_text): + result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for + message + in + message_list] + result.append({'role': 'ai', 'content': answer_text}) + return result + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'system': self.node_params.get('system'), + 'history_message': [{'content': message.content, 'role': message.type} for message in + (self.context.get('history_message') if self.context.get( + 'history_message') is not None else [])], + 'question': self.context.get('question'), + 'answer': self.context.get('answer'), + 'type': self.node.type, + 'message_tokens': self.context.get('message_tokens'), + 'answer_tokens': self.context.get('answer_tokens'), + 'status': self.status, + 'err_message': self.err_message, + 'image_list': self.context.get('image_list'), + 'dialogue_type': self.context.get('dialogue_type') + } diff --git a/apps/application/flow/step_node/mcp_node/__init__.py b/apps/application/flow/step_node/mcp_node/__init__.py new file mode 100644 index 00000000000..f3feecc9ce2 --- /dev/null +++ b/apps/application/flow/step_node/mcp_node/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .impl import * diff --git a/apps/application/flow/step_node/mcp_node/i_mcp_node.py b/apps/application/flow/step_node/mcp_node/i_mcp_node.py new file mode 100644 index 00000000000..94cb4da7729 --- /dev/null +++ b/apps/application/flow/step_node/mcp_node/i_mcp_node.py @@ -0,0 +1,35 @@ +# coding=utf-8 + +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class McpNodeSerializer(serializers.Serializer): + mcp_servers = serializers.JSONField(required=True, + error_messages=ErrMessage.char(_("Mcp servers"))) + + mcp_server = serializers.CharField(required=True, + error_messages=ErrMessage.char(_("Mcp server"))) + + mcp_tool = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Mcp tool"))) + + tool_params = serializers.DictField(required=True, + error_messages=ErrMessage.char(_("Tool parameters"))) + + +class IMcpNode(INode): + type = 'mcp-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return McpNodeSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/mcp_node/impl/__init__.py b/apps/application/flow/step_node/mcp_node/impl/__init__.py new file mode 100644 index 00000000000..8c9a5ee197c --- /dev/null +++ b/apps/application/flow/step_node/mcp_node/impl/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .base_mcp_node import BaseMcpNode diff --git a/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py b/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py new file mode 100644 index 00000000000..e49ef7019f6 --- /dev/null +++ b/apps/application/flow/step_node/mcp_node/impl/base_mcp_node.py @@ -0,0 +1,61 @@ +# coding=utf-8 +import asyncio +import json +from typing import List + +from langchain_mcp_adapters.client import MultiServerMCPClient + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.mcp_node.i_mcp_node import IMcpNode + + +class BaseMcpNode(IMcpNode): + def save_context(self, details, workflow_manage): + self.context['result'] = details.get('result') + self.context['tool_params'] = details.get('tool_params') + self.context['mcp_tool'] = details.get('mcp_tool') + if self.node_params.get('is_result', False): + self.answer_text = details.get('result') + + def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult: + servers = json.loads(mcp_servers) + params = json.loads(json.dumps(tool_params)) + params = self.handle_variables(params) + + async def call_tool(s, session, t, a): + async with MultiServerMCPClient(s) as client: + s = await client.sessions[session].call_tool(t, a) + return s + + res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params)) + return NodeResult( + {'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {}) + + def handle_variables(self, tool_params): + # 处理参数中的变量 + for k, v in tool_params.items(): + if type(v) == str: + tool_params[k] = self.workflow_manage.generate_prompt(tool_params[k]) + if type(v) == dict: + self.handle_variables(v) + if (type(v) == list) and (type(v[0]) == str): + tool_params[k] = self.get_reference_content(v) + return tool_params + + def get_reference_content(self, fields: List[str]): + return str(self.workflow_manage.get_reference_field( + fields[0], + fields[1:])) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'status': self.status, + 'err_message': self.err_message, + 'type': self.node.type, + 'mcp_tool': self.context.get('mcp_tool'), + 'tool_params': self.context.get('tool_params'), + 'result': self.context.get('result'), + } diff --git a/apps/application/flow/step_node/question_node/__init__.py b/apps/application/flow/step_node/question_node/__init__.py new file mode 100644 index 00000000000..98a1afcd904 --- /dev/null +++ b/apps/application/flow/step_node/question_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:30 + @desc: +""" +from .impl import * diff --git a/apps/application/flow/step_node/question_node/i_question_node.py b/apps/application/flow/step_node/question_node/i_question_node.py new file mode 100644 index 00000000000..57898bf2206 --- /dev/null +++ b/apps/application/flow/step_node/question_node/i_question_node.py @@ -0,0 +1,42 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_chat_node.py + @date:2024/6/4 13:58 + @desc: +""" +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class QuestionNodeSerializer(serializers.Serializer): + model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id"))) + system = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_("Role Setting"))) + prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word"))) + # 多轮对话数量 + dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations"))) + + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.integer(_("Model parameter settings"))) + + +class IQuestionNode(INode): + type = 'question-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return QuestionNodeSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id, + model_params_setting=None, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/question_node/impl/__init__.py b/apps/application/flow/step_node/question_node/impl/__init__.py new file mode 100644 index 00000000000..d85aa8724ac --- /dev/null +++ b/apps/application/flow/step_node/question_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:35 + @desc: +""" +from .base_question_node import BaseQuestionNode diff --git a/apps/application/flow/step_node/question_node/impl/base_question_node.py b/apps/application/flow/step_node/question_node/impl/base_question_node.py new file mode 100644 index 00000000000..e1fd5b86069 --- /dev/null +++ b/apps/application/flow/step_node/question_node/impl/base_question_node.py @@ -0,0 +1,159 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_question_node.py + @date:2024/6/4 14:30 + @desc: +""" +import re +import time +from functools import reduce +from typing import List, Dict + +from django.db.models import QuerySet +from langchain.schema import HumanMessage, SystemMessage +from langchain_core.messages import BaseMessage + +from application.flow.i_step_node import NodeResult, INode +from application.flow.step_node.question_node.i_question_node import IQuestionNode +from setting.models import Model +from setting.models_provider import get_model_credential +from setting.models_provider.tools import get_model_instance_by_model_user_id + + +def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str): + chat_model = node_variable.get('chat_model') + message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list')) + answer_tokens = chat_model.get_num_tokens(answer) + node.context['message_tokens'] = message_tokens + node.context['answer_tokens'] = answer_tokens + node.context['answer'] = answer + node.context['history_message'] = node_variable['history_message'] + node.context['question'] = node_variable['question'] + node.context['run_time'] = time.time() - node.context['start_time'] + if workflow.is_result(node, NodeResult(node_variable, workflow_variable)): + node.answer_text = answer + + +def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 (流式) + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + answer = '' + for chunk in response: + answer += chunk.content + yield chunk.content + _write_context(node_variable, workflow_variable, node, workflow, answer) + + +def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): + """ + 写入上下文数据 + @param node_variable: 节点数据 + @param workflow_variable: 全局数据 + @param node: 节点实例对象 + @param workflow: 工作流管理器 + """ + response = node_variable.get('result') + answer = response.content + _write_context(node_variable, workflow_variable, node, workflow, answer) + + +def get_default_model_params_setting(model_id): + model = QuerySet(Model).filter(id=model_id).first() + credential = get_model_credential(model.provider, model.model_type, model.model_name) + model_params_setting = credential.get_model_params_setting_form( + model.model_name).get_default_form_data() + return model_params_setting + + +class BaseQuestionNode(IQuestionNode): + def save_context(self, details, workflow_manage): + self.context['run_time'] = details.get('run_time') + self.context['question'] = details.get('question') + self.context['answer'] = details.get('answer') + self.context['message_tokens'] = details.get('message_tokens') + self.context['answer_tokens'] = details.get('answer_tokens') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id, + model_params_setting=None, + **kwargs) -> NodeResult: + if model_params_setting is None: + model_params_setting = get_default_model_params_setting(model_id) + chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), + **model_params_setting) + history_message = self.get_history_message(history_chat_record, dialogue_number) + self.context['history_message'] = history_message + question = self.generate_prompt_question(prompt) + self.context['question'] = question.content + system = self.workflow_manage.generate_prompt(system) + self.context['system'] = system + message_list = self.generate_message_list(system, prompt, history_message) + self.context['message_list'] = message_list + if stream: + r = chat_model.stream(message_list) + return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context_stream) + else: + r = chat_model.invoke(message_list) + return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list, + 'history_message': history_message, 'question': question.content}, {}, + _write_context=write_context) + + @staticmethod + def get_history_message(history_chat_record, dialogue_number): + start_index = len(history_chat_record) - dialogue_number + history_message = reduce(lambda x, y: [*x, *y], [ + [history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()] + for index in + range(start_index if start_index > 0 else 0, len(history_chat_record))], []) + for message in history_message: + if isinstance(message.content, str): + message.content = re.sub('[\d\D]*?<\/form_rander>', '', message.content) + return history_message + + def generate_prompt_question(self, prompt): + return HumanMessage(self.workflow_manage.generate_prompt(prompt)) + + def generate_message_list(self, system: str, prompt: str, history_message): + if system is None or len(system) == 0: + return [SystemMessage(self.workflow_manage.generate_prompt(system)), *history_message, + HumanMessage(self.workflow_manage.generate_prompt(prompt))] + else: + return [*history_message, HumanMessage(self.workflow_manage.generate_prompt(prompt))] + + @staticmethod + def reset_message_list(message_list: List[BaseMessage], answer_text): + result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for + message + in + message_list] + result.append({'role': 'ai', 'content': answer_text}) + return result + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'system': self.context.get('system'), + 'history_message': [{'content': message.content, 'role': message.type} for message in + (self.context.get('history_message') if self.context.get( + 'history_message') is not None else [])], + 'question': self.context.get('question'), + 'answer': self.context.get('answer'), + 'type': self.node.type, + 'message_tokens': self.context.get('message_tokens'), + 'answer_tokens': self.context.get('answer_tokens'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/reranker_node/__init__.py b/apps/application/flow/step_node/reranker_node/__init__.py new file mode 100644 index 00000000000..881d0f8a393 --- /dev/null +++ b/apps/application/flow/step_node/reranker_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/9/4 11:37 + @desc: +""" +from .impl import * diff --git a/apps/application/flow/step_node/reranker_node/i_reranker_node.py b/apps/application/flow/step_node/reranker_node/i_reranker_node.py new file mode 100644 index 00000000000..3b95e4dd632 --- /dev/null +++ b/apps/application/flow/step_node/reranker_node/i_reranker_node.py @@ -0,0 +1,60 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: i_reranker_node.py + @date:2024/9/4 10:40 + @desc: +""" +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class RerankerSettingSerializer(serializers.Serializer): + # 需要查询的条数 + top_n = serializers.IntegerField(required=True, + error_messages=ErrMessage.integer(_("Reference segment number"))) + # 相似度 0-1之间 + similarity = serializers.FloatField(required=True, max_value=2, min_value=0, + error_messages=ErrMessage.float(_("Reference segment number"))) + max_paragraph_char_number = serializers.IntegerField(required=True, + error_messages=ErrMessage.float(_("Maximum number of words in a quoted segment"))) + + +class RerankerStepNodeSerializer(serializers.Serializer): + reranker_setting = RerankerSettingSerializer(required=True) + + question_reference_address = serializers.ListField(required=True) + reranker_model_id = serializers.UUIDField(required=True) + reranker_reference_list = serializers.ListField(required=True, child=serializers.ListField(required=True)) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + + +class IRerankerNode(INode): + type = 'reranker-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return RerankerStepNodeSerializer + + def _run(self): + question = self.workflow_manage.get_reference_field( + self.node_params_serializer.data.get('question_reference_address')[0], + self.node_params_serializer.data.get('question_reference_address')[1:]) + reranker_list = [self.workflow_manage.get_reference_field( + reference[0], + reference[1:]) for reference in + self.node_params_serializer.data.get('reranker_reference_list')] + return self.execute(**self.node_params_serializer.data, question=str(question), + + reranker_list=reranker_list) + + def execute(self, question, reranker_setting, reranker_list, reranker_model_id, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/reranker_node/impl/__init__.py b/apps/application/flow/step_node/reranker_node/impl/__init__.py new file mode 100644 index 00000000000..ef5ca80585b --- /dev/null +++ b/apps/application/flow/step_node/reranker_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/9/4 11:39 + @desc: +""" +from .base_reranker_node import * diff --git a/apps/application/flow/step_node/reranker_node/impl/base_reranker_node.py b/apps/application/flow/step_node/reranker_node/impl/base_reranker_node.py new file mode 100644 index 00000000000..ee92b88a52c --- /dev/null +++ b/apps/application/flow/step_node/reranker_node/impl/base_reranker_node.py @@ -0,0 +1,106 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_reranker_node.py + @date:2024/9/4 11:41 + @desc: +""" +from typing import List + +from langchain_core.documents import Document + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.reranker_node.i_reranker_node import IRerankerNode +from setting.models_provider.tools import get_model_instance_by_model_user_id + + +def merge_reranker_list(reranker_list, result=None): + if result is None: + result = [] + for document in reranker_list: + if isinstance(document, list): + merge_reranker_list(document, result) + elif isinstance(document, dict): + content = document.get('title', '') + document.get('content', '') + title = document.get("title") + dataset_name = document.get("dataset_name") + document_name = document.get('document_name') + result.append( + Document(page_content=str(document) if len(content) == 0 else content, + metadata={'title': title, 'dataset_name': dataset_name, 'document_name': document_name})) + else: + result.append(Document(page_content=str(document), metadata={})) + return result + + +def filter_result(document_list: List[Document], max_paragraph_char_number, top_n, similarity): + use_len = 0 + result = [] + for index in range(len(document_list)): + document = document_list[index] + if use_len >= max_paragraph_char_number or index >= top_n or document.metadata.get( + 'relevance_score') < similarity: + break + content = document.page_content[0:max_paragraph_char_number - use_len] + use_len = use_len + len(content) + result.append({'page_content': content, 'metadata': document.metadata}) + return result + + +def reset_result_list(result_list: List[Document], document_list: List[Document]): + r = [] + document_list = document_list.copy() + for result in result_list: + filter_result_list = [document for document in document_list if document.page_content == result.page_content] + if len(filter_result_list) > 0: + item = filter_result_list[0] + document_list.remove(item) + r.append(Document(page_content=item.page_content, + metadata={**item.metadata, 'relevance_score': result.metadata.get('relevance_score')})) + else: + r.append(result) + return r + + +class BaseRerankerNode(IRerankerNode): + def save_context(self, details, workflow_manage): + self.context['document_list'] = details.get('document_list', []) + self.context['question'] = details.get('question') + self.context['run_time'] = details.get('run_time') + self.context['result_list'] = details.get('result_list') + self.context['result'] = details.get('result') + + def execute(self, question, reranker_setting, reranker_list, reranker_model_id, + **kwargs) -> NodeResult: + documents = merge_reranker_list(reranker_list) + top_n = reranker_setting.get('top_n', 3) + self.context['document_list'] = [{'page_content': document.page_content, 'metadata': document.metadata} for + document in documents] + self.context['question'] = question + reranker_model = get_model_instance_by_model_user_id(reranker_model_id, + self.flow_params_serializer.data.get('user_id'), + top_n=top_n) + result = reranker_model.compress_documents( + documents, + question) + similarity = reranker_setting.get('similarity', 0.6) + max_paragraph_char_number = reranker_setting.get('max_paragraph_char_number', 5000) + result = reset_result_list(result, documents) + r = filter_result(result, max_paragraph_char_number, top_n, similarity) + return NodeResult({'result_list': r, 'result': ''.join([item.get('page_content') for item in r])}, {}) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'document_list': self.context.get('document_list'), + "question": self.context.get('question'), + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'reranker_setting': self.node_params_serializer.data.get('reranker_setting'), + 'result_list': self.context.get('result_list'), + 'result': self.context.get('result'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/search_dataset_node/__init__.py b/apps/application/flow/step_node/search_dataset_node/__init__.py new file mode 100644 index 00000000000..98a1afcd904 --- /dev/null +++ b/apps/application/flow/step_node/search_dataset_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:30 + @desc: +""" +from .impl import * diff --git a/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py b/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py new file mode 100644 index 00000000000..8f15c7a3203 --- /dev/null +++ b/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py @@ -0,0 +1,79 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_search_dataset_node.py + @date:2024/6/3 17:52 + @desc: +""" +import re +from typing import Type + +from django.core import validators +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.common import flat_map +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class DatasetSettingSerializer(serializers.Serializer): + # 需要查询的条数 + top_n = serializers.IntegerField(required=True, + error_messages=ErrMessage.integer(_("Reference segment number"))) + # 相似度 0-1之间 + similarity = serializers.FloatField(required=True, max_value=2, min_value=0, + error_messages=ErrMessage.float(_('similarity'))) + search_mode = serializers.CharField(required=True, validators=[ + validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"), + message=_("The type only supports embedding|keywords|blend"), code=500) + ], error_messages=ErrMessage.char(_("Retrieval Mode"))) + max_paragraph_char_number = serializers.IntegerField(required=True, + error_messages=ErrMessage.float(_("Maximum number of words in a quoted segment"))) + + +class SearchDatasetStepNodeSerializer(serializers.Serializer): + # 需要查询的数据集id列表 + dataset_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True), + error_messages=ErrMessage.list(_("Dataset id list"))) + dataset_setting = DatasetSettingSerializer(required=True) + + question_reference_address = serializers.ListField(required=True) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + + +def get_paragraph_list(chat_record, node_id): + return flat_map([chat_record.details[key].get('paragraph_list', []) for key in chat_record.details if + (chat_record.details[ + key].get('type', '') == 'search-dataset-node') and chat_record.details[key].get( + 'paragraph_list', []) is not None and key == node_id]) + + +class ISearchDatasetStepNode(INode): + type = 'search-dataset-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return SearchDatasetStepNodeSerializer + + def _run(self): + question = self.workflow_manage.get_reference_field( + self.node_params_serializer.data.get('question_reference_address')[0], + self.node_params_serializer.data.get('question_reference_address')[1:]) + exclude_paragraph_id_list = [] + if self.flow_params_serializer.data.get('re_chat', False): + history_chat_record = self.flow_params_serializer.data.get('history_chat_record', []) + paragraph_id_list = [p.get('id') for p in flat_map( + [get_paragraph_list(chat_record, self.runtime_node_id) for chat_record in history_chat_record if + chat_record.problem_text == question])] + exclude_paragraph_id_list = list(set(paragraph_id_list)) + + return self.execute(**self.node_params_serializer.data, question=str(question), + exclude_paragraph_id_list=exclude_paragraph_id_list) + + def execute(self, dataset_id_list, dataset_setting, question, + exclude_paragraph_id_list=None, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/search_dataset_node/impl/__init__.py b/apps/application/flow/step_node/search_dataset_node/impl/__init__.py new file mode 100644 index 00000000000..a9cff0d0941 --- /dev/null +++ b/apps/application/flow/step_node/search_dataset_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:35 + @desc: +""" +from .base_search_dataset_node import BaseSearchDatasetNode diff --git a/apps/application/flow/step_node/search_dataset_node/impl/base_search_dataset_node.py b/apps/application/flow/step_node/search_dataset_node/impl/base_search_dataset_node.py new file mode 100644 index 00000000000..5107d4ce2c8 --- /dev/null +++ b/apps/application/flow/step_node/search_dataset_node/impl/base_search_dataset_node.py @@ -0,0 +1,146 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_search_dataset_node.py + @date:2024/6/4 11:56 + @desc: +""" +import os +from typing import List, Dict + +from django.db.models import QuerySet +from django.db import connection +from application.flow.i_step_node import NodeResult +from application.flow.step_node.search_dataset_node.i_search_dataset_node import ISearchDatasetStepNode +from common.config.embedding_config import VectorStore +from common.db.search import native_search +from common.util.file_util import get_file_content +from dataset.models import Document, Paragraph, DataSet +from embedding.models import SearchMode +from setting.models_provider.tools import get_model_instance_by_model_user_id +from smartdoc.conf import PROJECT_DIR + + +def get_embedding_id(dataset_id_list): + dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list) + if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1: + raise Exception("关联知识库的向量模型不一致,无法召回分段。") + if len(dataset_list) == 0: + raise Exception("知识库设置错误,请重新设置知识库") + return dataset_list[0].embedding_mode_id + + +def get_none_result(question): + return NodeResult( + {'paragraph_list': [], 'is_hit_handling_method': [], 'question': question, 'data': '', + 'directly_return': ''}, {}) + + +def reset_title(title): + if title is None or len(title.strip()) == 0: + return "" + else: + return f"#### {title}\n" + + +class BaseSearchDatasetNode(ISearchDatasetStepNode): + def save_context(self, details, workflow_manage): + result = details.get('paragraph_list', []) + dataset_setting = self.node_params_serializer.data.get('dataset_setting') + directly_return = '\n'.join( + [f"{paragraph.get('title', '')}:{paragraph.get('content')}" for paragraph in result if + paragraph.get('is_hit_handling_method')]) + self.context['paragraph_list'] = result + self.context['question'] = details.get('question') + self.context['run_time'] = details.get('run_time') + self.context['is_hit_handling_method_list'] = [row for row in result if row.get('is_hit_handling_method')] + self.context['data'] = '\n'.join( + [f"{paragraph.get('title', '')}:{paragraph.get('content')}" for paragraph in + result])[0:dataset_setting.get('max_paragraph_char_number', 5000)] + self.context['directly_return'] = directly_return + + def execute(self, dataset_id_list, dataset_setting, question, + exclude_paragraph_id_list=None, + **kwargs) -> NodeResult: + self.context['question'] = question + if len(dataset_id_list) == 0: + return get_none_result(question) + model_id = get_embedding_id(dataset_id_list) + embedding_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id')) + embedding_value = embedding_model.embed_query(question) + vector = VectorStore.get_embedding_vector() + exclude_document_id_list = [str(document.id) for document in + QuerySet(Document).filter( + dataset_id__in=dataset_id_list, + is_active=False)] + embedding_list = vector.query(question, embedding_value, dataset_id_list, exclude_document_id_list, + exclude_paragraph_id_list, True, dataset_setting.get('top_n'), + dataset_setting.get('similarity'), SearchMode(dataset_setting.get('search_mode'))) + # 手动关闭数据库连接 + connection.close() + if embedding_list is None: + return get_none_result(question) + paragraph_list = self.list_paragraph(embedding_list, vector) + result = [self.reset_paragraph(paragraph, embedding_list) for paragraph in paragraph_list] + result = sorted(result, key=lambda p: p.get('similarity'), reverse=True) + return NodeResult({'paragraph_list': result, + 'is_hit_handling_method_list': [row for row in result if row.get('is_hit_handling_method')], + 'data': '\n'.join( + [f"{reset_title(paragraph.get('title', ''))}{paragraph.get('content')}" for paragraph in + result])[0:dataset_setting.get('max_paragraph_char_number', 5000)], + 'directly_return': '\n'.join( + [paragraph.get('content') for paragraph in + result if + paragraph.get('is_hit_handling_method')]), + 'question': question}, + + {}) + + @staticmethod + def reset_paragraph(paragraph: Dict, embedding_list: List): + filter_embedding_list = [embedding for embedding in embedding_list if + str(embedding.get('paragraph_id')) == str(paragraph.get('id'))] + if filter_embedding_list is not None and len(filter_embedding_list) > 0: + find_embedding = filter_embedding_list[-1] + return { + **paragraph, + 'similarity': find_embedding.get('similarity'), + 'is_hit_handling_method': find_embedding.get('similarity') > paragraph.get( + 'directly_return_similarity') and paragraph.get('hit_handling_method') == 'directly_return', + 'update_time': paragraph.get('update_time').strftime("%Y-%m-%d %H:%M:%S"), + 'create_time': paragraph.get('create_time').strftime("%Y-%m-%d %H:%M:%S"), + 'id': str(paragraph.get('id')), + 'dataset_id': str(paragraph.get('dataset_id')), + 'document_id': str(paragraph.get('document_id')) + } + + @staticmethod + def list_paragraph(embedding_list: List, vector): + paragraph_id_list = [row.get('paragraph_id') for row in embedding_list] + if paragraph_id_list is None or len(paragraph_id_list) == 0: + return [] + paragraph_list = native_search(QuerySet(Paragraph).filter(id__in=paragraph_id_list), + get_file_content( + os.path.join(PROJECT_DIR, "apps", "application", 'sql', + 'list_dataset_paragraph_by_paragraph_id.sql')), + with_table_name=True) + # 如果向量库中存在脏数据 直接删除 + if len(paragraph_list) != len(paragraph_id_list): + exist_paragraph_list = [row.get('id') for row in paragraph_list] + for paragraph_id in paragraph_id_list: + if not exist_paragraph_list.__contains__(paragraph_id): + vector.delete_by_paragraph_id(paragraph_id) + return paragraph_list + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + 'question': self.context.get('question'), + "index": index, + 'run_time': self.context.get('run_time'), + 'paragraph_list': self.context.get('paragraph_list'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/speech_to_text_step_node/__init__.py b/apps/application/flow/step_node/speech_to_text_step_node/__init__.py new file mode 100644 index 00000000000..f3feecc9ce2 --- /dev/null +++ b/apps/application/flow/step_node/speech_to_text_step_node/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .impl import * diff --git a/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py b/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py new file mode 100644 index 00000000000..154762dca1a --- /dev/null +++ b/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py @@ -0,0 +1,38 @@ +# coding=utf-8 + +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class SpeechToTextNodeSerializer(serializers.Serializer): + stt_model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id"))) + + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + audio_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("The audio file cannot be empty"))) + + +class ISpeechToTextNode(INode): + type = 'speech-to-text-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return SpeechToTextNodeSerializer + + def _run(self): + res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('audio_list')[0], + self.node_params_serializer.data.get('audio_list')[1:]) + for audio in res: + if 'file_id' not in audio: + raise ValueError(_("Parameter value error: The uploaded audio lacks file_id, and the audio upload fails")) + + return self.execute(audio=res, **self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, stt_model_id, chat_id, + audio, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/speech_to_text_step_node/impl/__init__.py b/apps/application/flow/step_node/speech_to_text_step_node/impl/__init__.py new file mode 100644 index 00000000000..9d2da615820 --- /dev/null +++ b/apps/application/flow/step_node/speech_to_text_step_node/impl/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .base_speech_to_text_node import BaseSpeechToTextNode diff --git a/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py b/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py new file mode 100644 index 00000000000..13b954e4622 --- /dev/null +++ b/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py @@ -0,0 +1,72 @@ +# coding=utf-8 +import os +import tempfile +import time +import io +from typing import List, Dict + +from django.db.models import QuerySet +from pydub import AudioSegment +from concurrent.futures import ThreadPoolExecutor +from application.flow.i_step_node import NodeResult, INode +from application.flow.step_node.speech_to_text_step_node.i_speech_to_text_node import ISpeechToTextNode +from common.util.common import split_and_transcribe, any_to_mp3 +from dataset.models import File +from setting.models_provider.tools import get_model_instance_by_model_user_id + +class BaseSpeechToTextNode(ISpeechToTextNode): + + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult: + stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id')) + audio_list = audio + self.context['audio_list'] = audio + + def process_audio_item(audio_item, model): + file = QuerySet(File).filter(id=audio_item['file_id']).first() + # 根据file_name 吧文件转成mp3格式 + file_format = file.file_name.split('.')[-1] + with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{file_format}') as temp_file: + temp_file.write(file.get_byte().tobytes()) + temp_file_path = temp_file.name + with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_amr_file: + temp_mp3_path = temp_amr_file.name + any_to_mp3(temp_file_path, temp_mp3_path) + try: + transcription = split_and_transcribe(temp_mp3_path, model) + return {file.file_name: transcription} + finally: + os.remove(temp_file_path) + os.remove(temp_mp3_path) + + def process_audio_items(audio_list, model): + with ThreadPoolExecutor(max_workers=5) as executor: + results = list(executor.map(lambda item: process_audio_item(item, model), audio_list)) + return results + + result = process_audio_items(audio_list, stt_model) + content = [] + result_content = [] + for item in result: + for key, value in item.items(): + content.append(f'### {key}\n{value}') + result_content.append(value) + return NodeResult({'answer': '\n'.join(result_content), 'result': '\n'.join(result_content), + 'content': content}, {}) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'answer': self.context.get('answer'), + 'content': self.context.get('content'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message, + 'audio_list': self.context.get('audio_list'), + } diff --git a/apps/application/flow/step_node/start_node/__init__.py b/apps/application/flow/step_node/start_node/__init__.py new file mode 100644 index 00000000000..98a1afcd904 --- /dev/null +++ b/apps/application/flow/step_node/start_node/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:30 + @desc: +""" +from .impl import * diff --git a/apps/application/flow/step_node/start_node/i_start_node.py b/apps/application/flow/step_node/start_node/i_start_node.py new file mode 100644 index 00000000000..41d73f21811 --- /dev/null +++ b/apps/application/flow/step_node/start_node/i_start_node.py @@ -0,0 +1,20 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: i_start_node.py + @date:2024/6/3 16:54 + @desc: +""" + +from application.flow.i_step_node import INode, NodeResult + + +class IStarNode(INode): + type = 'start-node' + + def _run(self): + return self.execute(**self.flow_params_serializer.data) + + def execute(self, question, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/start_node/impl/__init__.py b/apps/application/flow/step_node/start_node/impl/__init__.py new file mode 100644 index 00000000000..b68a92d021f --- /dev/null +++ b/apps/application/flow/step_node/start_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 15:36 + @desc: +""" +from .base_start_node import BaseStartStepNode diff --git a/apps/application/flow/step_node/start_node/impl/base_start_node.py b/apps/application/flow/step_node/start_node/impl/base_start_node.py new file mode 100644 index 00000000000..24b9684714e --- /dev/null +++ b/apps/application/flow/step_node/start_node/impl/base_start_node.py @@ -0,0 +1,92 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_start_node.py + @date:2024/6/3 17:17 + @desc: +""" +import time +from datetime import datetime +from typing import List, Type + +from rest_framework import serializers + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.start_node.i_start_node import IStarNode + + +def get_default_global_variable(input_field_list: List): + return {item.get('variable'): item.get('default_value') for item in input_field_list if + item.get('default_value', None) is not None} + + +def get_global_variable(node): + history_chat_record = node.flow_params_serializer.data.get('history_chat_record', []) + history_context = [{'question': chat_record.problem_text, 'answer': chat_record.answer_text} for chat_record in + history_chat_record] + chat_id = node.flow_params_serializer.data.get('chat_id') + return {'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'start_time': time.time(), + 'history_context': history_context, 'chat_id': str(chat_id), **node.workflow_manage.form_data} + + +class BaseStartStepNode(IStarNode): + def save_context(self, details, workflow_manage): + base_node = self.workflow_manage.get_base_node() + default_global_variable = get_default_global_variable(base_node.properties.get('input_field_list', [])) + workflow_variable = {**default_global_variable, **get_global_variable(self)} + self.context['question'] = details.get('question') + self.context['run_time'] = details.get('run_time') + self.context['document'] = details.get('document_list') + self.context['image'] = details.get('image_list') + self.context['audio'] = details.get('audio_list') + self.context['other'] = details.get('other_list') + self.status = details.get('status') + self.err_message = details.get('err_message') + for key, value in workflow_variable.items(): + workflow_manage.context[key] = value + for item in details.get('global_fields', []): + workflow_manage.context[item.get('key')] = item.get('value') + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + pass + + def execute(self, question, **kwargs) -> NodeResult: + base_node = self.workflow_manage.get_base_node() + default_global_variable = get_default_global_variable(base_node.properties.get('input_field_list', [])) + workflow_variable = {**default_global_variable, **get_global_variable(self)} + """ + 开始节点 初始化全局变量 + """ + node_variable = { + 'question': question, + 'image': self.workflow_manage.image_list, + 'document': self.workflow_manage.document_list, + 'audio': self.workflow_manage.audio_list, + 'other': self.workflow_manage.other_list, + } + return NodeResult(node_variable, workflow_variable) + + def get_details(self, index: int, **kwargs): + global_fields = [] + for field in self.node.properties.get('config')['globalFields']: + key = field['value'] + global_fields.append({ + 'label': field['label'], + 'key': key, + 'value': self.workflow_manage.context[key] if key in self.workflow_manage.context else '' + }) + return { + 'name': self.node.properties.get('stepName'), + "index": index, + "question": self.context.get('question'), + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message, + 'image_list': self.context.get('image'), + 'document_list': self.context.get('document'), + 'audio_list': self.context.get('audio'), + 'other_list': self.context.get('other'), + 'global_fields': global_fields + } diff --git a/apps/application/flow/step_node/text_to_speech_step_node/__init__.py b/apps/application/flow/step_node/text_to_speech_step_node/__init__.py new file mode 100644 index 00000000000..f3feecc9ce2 --- /dev/null +++ b/apps/application/flow/step_node/text_to_speech_step_node/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .impl import * diff --git a/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py b/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py new file mode 100644 index 00000000000..68b53ea92db --- /dev/null +++ b/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py @@ -0,0 +1,36 @@ +# coding=utf-8 + +from typing import Type + +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage +from django.utils.translation import gettext_lazy as _ + + +class TextToSpeechNodeSerializer(serializers.Serializer): + tts_model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id"))) + + is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content'))) + + content_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("Text content"))) + model_params_setting = serializers.DictField(required=False, + error_messages=ErrMessage.integer(_("Model parameter settings"))) + + +class ITextToSpeechNode(INode): + type = 'text-to-speech-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return TextToSpeechNodeSerializer + + def _run(self): + content = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('content_list')[0], + self.node_params_serializer.data.get('content_list')[1:]) + return self.execute(content=content, **self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, tts_model_id, chat_id, + content, model_params_setting=None, + **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/text_to_speech_step_node/impl/__init__.py b/apps/application/flow/step_node/text_to_speech_step_node/impl/__init__.py new file mode 100644 index 00000000000..385b9718f6e --- /dev/null +++ b/apps/application/flow/step_node/text_to_speech_step_node/impl/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .base_text_to_speech_node import BaseTextToSpeechNode diff --git a/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py b/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py new file mode 100644 index 00000000000..97044729596 --- /dev/null +++ b/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py @@ -0,0 +1,76 @@ +# coding=utf-8 +import io +import mimetypes + +from django.core.files.uploadedfile import InMemoryUploadedFile + +from application.flow.i_step_node import NodeResult, INode +from application.flow.step_node.image_understand_step_node.i_image_understand_node import IImageUnderstandNode +from application.flow.step_node.text_to_speech_step_node.i_text_to_speech_node import ITextToSpeechNode +from dataset.models import File +from dataset.serializers.file_serializers import FileSerializer +from setting.models_provider.tools import get_model_instance_by_model_user_id + + +def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"): + content_type, _ = mimetypes.guess_type(file_name) + if content_type is None: + # 如果未能识别,设置为默认的二进制文件类型 + content_type = "application/octet-stream" + # 创建一个内存中的字节流对象 + file_stream = io.BytesIO(file_bytes) + + # 获取文件大小 + file_size = len(file_bytes) + + uploaded_file = InMemoryUploadedFile( + file=file_stream, + field_name=None, + name=file_name, + content_type=content_type, + size=file_size, + charset=None, + ) + return uploaded_file + + +class BaseTextToSpeechNode(ITextToSpeechNode): + def save_context(self, details, workflow_manage): + self.context['answer'] = details.get('answer') + if self.node_params.get('is_result', False): + self.answer_text = details.get('answer') + + def execute(self, tts_model_id, chat_id, + content, model_params_setting=None, + **kwargs) -> NodeResult: + self.context['content'] = content + model = get_model_instance_by_model_user_id(tts_model_id, self.flow_params_serializer.data.get('user_id'), + **model_params_setting) + audio_byte = model.text_to_speech(content) + # 需要把这个音频文件存储到数据库中 + file_name = 'generated_audio.mp3' + file = bytes_to_uploaded_file(audio_byte, file_name) + application = self.workflow_manage.work_flow_post_handler.chat_info.application + meta = { + 'debug': False if application.id else True, + 'chat_id': chat_id, + 'application_id': str(application.id) if application.id else None, + } + file_url = FileSerializer(data={'file': file, 'meta': meta}).upload() + # 拼接一个audio标签的src属性 + audio_label = f'' + file_id = file_url.split('/')[-1] + audio_list = [{'file_id': file_id, 'file_name': file_name, 'url': file_url}] + return NodeResult({'answer': audio_label, 'result': audio_list}, {}) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'status': self.status, + 'content': self.context.get('content'), + 'err_message': self.err_message, + 'answer': self.context.get('answer'), + } diff --git a/apps/application/flow/step_node/variable_assign_node/__init__.py b/apps/application/flow/step_node/variable_assign_node/__init__.py new file mode 100644 index 00000000000..2d231e6066d --- /dev/null +++ b/apps/application/flow/step_node/variable_assign_node/__init__.py @@ -0,0 +1,3 @@ +# coding=utf-8 + +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py b/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py new file mode 100644 index 00000000000..e4594183f35 --- /dev/null +++ b/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py @@ -0,0 +1,27 @@ +# coding=utf-8 + +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.i_step_node import INode, NodeResult +from common.util.field_message import ErrMessage + + +class VariableAssignNodeParamsSerializer(serializers.Serializer): + variable_list = serializers.ListField(required=True, + error_messages=ErrMessage.list(_("Reference Field"))) + + +class IVariableAssignNode(INode): + type = 'variable-assign-node' + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return VariableAssignNodeParamsSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, variable_list, stream, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/variable_assign_node/impl/__init__.py b/apps/application/flow/step_node/variable_assign_node/impl/__init__.py new file mode 100644 index 00000000000..7585cdd8fe4 --- /dev/null +++ b/apps/application/flow/step_node/variable_assign_node/impl/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py + @date:2024/6/11 17:49 + @desc: +""" +from .base_variable_assign_node import * \ No newline at end of file diff --git a/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py b/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py new file mode 100644 index 00000000000..ce2906e6293 --- /dev/null +++ b/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py @@ -0,0 +1,65 @@ +# coding=utf-8 +import json +from typing import List + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.variable_assign_node.i_variable_assign_node import IVariableAssignNode + + +class BaseVariableAssignNode(IVariableAssignNode): + def save_context(self, details, workflow_manage): + self.context['variable_list'] = details.get('variable_list') + self.context['result_list'] = details.get('result_list') + + def execute(self, variable_list, stream, **kwargs) -> NodeResult: + # + result_list = [] + for variable in variable_list: + if 'fields' not in variable: + continue + if 'global' == variable['fields'][0]: + result = { + 'name': variable['name'], + 'input_value': self.get_reference_content(variable['fields']), + } + if variable['source'] == 'custom': + if variable['type'] == 'json': + if isinstance(variable['value'], dict) or isinstance(variable['value'], list): + val = variable['value'] + else: + val = json.loads(variable['value']) + self.workflow_manage.context[variable['fields'][1]] = val + result['output_value'] = variable['value'] = val + elif variable['type'] == 'string': + # 变量解析 例如:{{global.xxx}} + val = self.workflow_manage.generate_prompt(variable['value']) + self.workflow_manage.context[variable['fields'][1]] = val + result['output_value'] = val + else: + val = variable['value'] + self.workflow_manage.context[variable['fields'][1]] = val + result['output_value'] = val + else: + reference = self.get_reference_content(variable['reference']) + self.workflow_manage.context[variable['fields'][1]] = reference + result['output_value'] = reference + result_list.append(result) + + return NodeResult({'variable_list': variable_list, 'result_list': result_list}, {}) + + def get_reference_content(self, fields: List[str]): + return str(self.workflow_manage.get_reference_field( + fields[0], + fields[1:])) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'variable_list': self.context.get('variable_list'), + 'result_list': self.context.get('result_list'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/tools.py b/apps/application/flow/tools.py new file mode 100644 index 00000000000..dfbf69b3593 --- /dev/null +++ b/apps/application/flow/tools.py @@ -0,0 +1,191 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: utils.py + @date:2024/6/6 15:15 + @desc: +""" +import json +from typing import Iterator + +from django.http import StreamingHttpResponse +from langchain_core.messages import BaseMessageChunk, BaseMessage + +from application.flow.i_step_node import WorkFlowPostHandler +from common.response import result + + +class Reasoning: + def __init__(self, reasoning_content_start, reasoning_content_end): + self.content = "" + self.reasoning_content = "" + self.all_content = "" + self.reasoning_content_start_tag = reasoning_content_start + self.reasoning_content_end_tag = reasoning_content_end + self.reasoning_content_start_tag_len = len( + reasoning_content_start) if reasoning_content_start is not None else 0 + self.reasoning_content_end_tag_len = len(reasoning_content_end) if reasoning_content_end is not None else 0 + self.reasoning_content_end_tag_prefix = reasoning_content_end[ + 0] if self.reasoning_content_end_tag_len > 0 else '' + self.reasoning_content_is_start = False + self.reasoning_content_is_end = False + self.reasoning_content_chunk = "" + + def get_end_reasoning_content(self): + if not self.reasoning_content_is_start and not self.reasoning_content_is_end: + r = {'content': self.all_content, 'reasoning_content': ''} + self.reasoning_content_chunk = "" + return r + if self.reasoning_content_is_start and not self.reasoning_content_is_end: + r = {'content': '', 'reasoning_content': self.reasoning_content_chunk} + self.reasoning_content_chunk = "" + return r + return {'content': '', 'reasoning_content': ''} + + def get_reasoning_content(self, chunk): + # 如果没有开始思考过程标签那么就全是结果 + if self.reasoning_content_start_tag is None or len(self.reasoning_content_start_tag) == 0: + self.content += chunk.content + return {'content': chunk.content, 'reasoning_content': ''} + # 如果没有结束思考过程标签那么就全部是思考过程 + if self.reasoning_content_end_tag is None or len(self.reasoning_content_end_tag) == 0: + return {'content': '', 'reasoning_content': chunk.content} + self.all_content += chunk.content + if not self.reasoning_content_is_start and len(self.all_content) >= self.reasoning_content_start_tag_len: + if self.all_content.startswith(self.reasoning_content_start_tag): + self.reasoning_content_is_start = True + self.reasoning_content_chunk = self.all_content[self.reasoning_content_start_tag_len:] + else: + if not self.reasoning_content_is_end: + self.reasoning_content_is_end = True + self.content += self.all_content + return {'content': self.all_content, 'reasoning_content': ''} + else: + if self.reasoning_content_is_start: + self.reasoning_content_chunk += chunk.content + reasoning_content_end_tag_prefix_index = self.reasoning_content_chunk.find( + self.reasoning_content_end_tag_prefix) + if self.reasoning_content_is_end: + self.content += chunk.content + return {'content': chunk.content, 'reasoning_content': ''} + # 是否包含结束 + if reasoning_content_end_tag_prefix_index > -1: + if len(self.reasoning_content_chunk) - reasoning_content_end_tag_prefix_index >= self.reasoning_content_end_tag_len: + reasoning_content_end_tag_index = self.reasoning_content_chunk.find(self.reasoning_content_end_tag) + if reasoning_content_end_tag_index > -1: + reasoning_content_chunk = self.reasoning_content_chunk[0:reasoning_content_end_tag_index] + content_chunk = self.reasoning_content_chunk[ + reasoning_content_end_tag_index + self.reasoning_content_end_tag_len:] + self.reasoning_content += reasoning_content_chunk + self.content += content_chunk + self.reasoning_content_chunk = "" + self.reasoning_content_is_end = True + return {'content': content_chunk, 'reasoning_content': reasoning_content_chunk} + else: + reasoning_content_chunk = self.reasoning_content_chunk[0:reasoning_content_end_tag_prefix_index + 1] + self.reasoning_content_chunk = self.reasoning_content_chunk.replace(reasoning_content_chunk, '') + self.reasoning_content += reasoning_content_chunk + return {'content': '', 'reasoning_content': reasoning_content_chunk} + else: + return {'content': '', 'reasoning_content': ''} + + else: + if self.reasoning_content_is_end: + self.content += chunk.content + return {'content': chunk.content, 'reasoning_content': ''} + else: + # aaa + result = {'content': '', 'reasoning_content': self.reasoning_content_chunk} + self.reasoning_content += self.reasoning_content_chunk + self.reasoning_content_chunk = "" + return result + + +def event_content(chat_id, chat_record_id, response, workflow, + write_context, + post_handler: WorkFlowPostHandler): + """ + 用于处理流式输出 + @param chat_id: 会话id + @param chat_record_id: 对话记录id + @param response: 响应数据 + @param workflow: 工作流管理器 + @param write_context 写入节点上下文 + @param post_handler: 后置处理器 + """ + answer = '' + try: + for chunk in response: + answer += chunk.content + yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, + 'content': chunk.content, 'is_end': False}, ensure_ascii=False) + "\n\n" + write_context(answer, 200) + post_handler.handler(chat_id, chat_record_id, answer, workflow) + yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, + 'content': '', 'is_end': True}, ensure_ascii=False) + "\n\n" + except Exception as e: + answer = str(e) + write_context(answer, 500) + post_handler.handler(chat_id, chat_record_id, answer, workflow) + yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, + 'content': answer, 'is_end': True}, ensure_ascii=False) + "\n\n" + + +def to_stream_response(chat_id, chat_record_id, response: Iterator[BaseMessageChunk], workflow, write_context, + post_handler): + """ + 将结果转换为服务流输出 + @param chat_id: 会话id + @param chat_record_id: 对话记录id + @param response: 响应数据 + @param workflow: 工作流管理器 + @param write_context 写入节点上下文 + @param post_handler: 后置处理器 + @return: 响应 + """ + r = StreamingHttpResponse( + streaming_content=event_content(chat_id, chat_record_id, response, workflow, write_context, post_handler), + content_type='text/event-stream;charset=utf-8', + charset='utf-8') + + r['Cache-Control'] = 'no-cache' + return r + + +def to_response(chat_id, chat_record_id, response: BaseMessage, workflow, write_context, + post_handler: WorkFlowPostHandler): + """ + 将结果转换为服务输出 + + @param chat_id: 会话id + @param chat_record_id: 对话记录id + @param response: 响应数据 + @param workflow: 工作流管理器 + @param write_context 写入节点上下文 + @param post_handler: 后置处理器 + @return: 响应 + """ + answer = response.content + write_context(answer) + post_handler.handler(chat_id, chat_record_id, answer, workflow) + return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, + 'content': answer, 'is_end': True}) + + +def to_response_simple(chat_id, chat_record_id, response: BaseMessage, workflow, + post_handler: WorkFlowPostHandler): + answer = response.content + post_handler.handler(chat_id, chat_record_id, answer, workflow) + return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, + 'content': answer, 'is_end': True}) + + +def to_stream_response_simple(stream_event): + r = StreamingHttpResponse( + streaming_content=stream_event, + content_type='text/event-stream;charset=utf-8', + charset='utf-8') + + r['Cache-Control'] = 'no-cache' + return r diff --git a/apps/application/flow/workflow_manage.py b/apps/application/flow/workflow_manage.py new file mode 100644 index 00000000000..0f7bc9c7576 --- /dev/null +++ b/apps/application/flow/workflow_manage.py @@ -0,0 +1,827 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: workflow_manage.py + @date:2024/1/9 17:40 + @desc: +""" +import concurrent +import json +import threading +import traceback +from concurrent.futures import ThreadPoolExecutor +from functools import reduce +from typing import List, Dict + +from django.db import close_old_connections +from django.db.models import QuerySet +from django.utils import translation +from django.utils.translation import get_language +from django.utils.translation import gettext as _ +from langchain_core.prompts import PromptTemplate +from rest_framework import status +from rest_framework.exceptions import ErrorDetail, ValidationError + +from application.flow import tools +from application.flow.i_step_node import INode, WorkFlowPostHandler, NodeResult +from application.flow.step_node import get_node +from common.exception.app_exception import AppApiException +from common.handle.base_to_response import BaseToResponse +from common.handle.impl.response.system_to_response import SystemToResponse +from function_lib.models.function import FunctionLib +from setting.models import Model +from setting.models_provider import get_model_credential + +executor = ThreadPoolExecutor(max_workers=200) + + +class Edge: + def __init__(self, _id: str, _type: str, sourceNodeId: str, targetNodeId: str, **keywords): + self.id = _id + self.type = _type + self.sourceNodeId = sourceNodeId + self.targetNodeId = targetNodeId + for keyword in keywords: + self.__setattr__(keyword, keywords.get(keyword)) + + +class Node: + def __init__(self, _id: str, _type: str, x: int, y: int, properties: dict, **kwargs): + self.id = _id + self.type = _type + self.x = x + self.y = y + self.properties = properties + for keyword in kwargs: + self.__setattr__(keyword, kwargs.get(keyword)) + + +end_nodes = ['ai-chat-node', 'reply-node', 'function-node', 'function-lib-node', 'application-node', + 'image-understand-node', 'speech-to-text-node', 'text-to-speech-node', 'image-generate-node'] + + +class Flow: + def __init__(self, nodes: List[Node], edges: List[Edge]): + self.nodes = nodes + self.edges = edges + + @staticmethod + def new_instance(flow_obj: Dict): + nodes = flow_obj.get('nodes') + edges = flow_obj.get('edges') + nodes = [Node(node.get('id'), node.get('type'), **node) + for node in nodes] + edges = [Edge(edge.get('id'), edge.get('type'), **edge) for edge in edges] + return Flow(nodes, edges) + + def get_start_node(self): + start_node_list = [node for node in self.nodes if node.id == 'start-node'] + return start_node_list[0] + + def get_search_node(self): + return [node for node in self.nodes if node.type == 'search-dataset-node'] + + def is_valid(self): + """ + 校验工作流数据 + """ + self.is_valid_model_params() + self.is_valid_start_node() + self.is_valid_base_node() + self.is_valid_work_flow() + + @staticmethod + def is_valid_node_params(node: Node): + get_node(node.type)(node, None, None) + + def is_valid_node(self, node: Node): + self.is_valid_node_params(node) + if node.type == 'condition-node': + branch_list = node.properties.get('node_data').get('branch') + for branch in branch_list: + source_anchor_id = f"{node.id}_{branch.get('id')}_right" + edge_list = [edge for edge in self.edges if edge.sourceAnchorId == source_anchor_id] + if len(edge_list) == 0: + raise AppApiException(500, + _('The branch {branch} of the {node} node needs to be connected').format( + node=node.properties.get("stepName"), branch=branch.get("type"))) + + else: + edge_list = [edge for edge in self.edges if edge.sourceNodeId == node.id] + if len(edge_list) == 0 and not end_nodes.__contains__(node.type): + raise AppApiException(500, _("{node} Nodes cannot be considered as end nodes").format( + node=node.properties.get("stepName"))) + + def get_next_nodes(self, node: Node): + edge_list = [edge for edge in self.edges if edge.sourceNodeId == node.id] + node_list = reduce(lambda x, y: [*x, *y], + [[node for node in self.nodes if node.id == edge.targetNodeId] for edge in edge_list], + []) + if len(node_list) == 0 and not end_nodes.__contains__(node.type): + raise AppApiException(500, + _("The next node that does not exist")) + return node_list + + def is_valid_work_flow(self, up_node=None): + if up_node is None: + up_node = self.get_start_node() + self.is_valid_node(up_node) + next_nodes = self.get_next_nodes(up_node) + for next_node in next_nodes: + self.is_valid_work_flow(next_node) + + def is_valid_start_node(self): + start_node_list = [node for node in self.nodes if node.id == 'start-node'] + if len(start_node_list) == 0: + raise AppApiException(500, _('The starting node is required')) + if len(start_node_list) > 1: + raise AppApiException(500, _('There can only be one starting node')) + + def is_valid_model_params(self): + node_list = [node for node in self.nodes if (node.type == 'ai-chat-node' or node.type == 'question-node')] + for node in node_list: + model = QuerySet(Model).filter(id=node.properties.get('node_data', {}).get('model_id')).first() + if model is None: + raise ValidationError(ErrorDetail( + _('The node {node} model does not exist').format(node=node.properties.get("stepName")))) + credential = get_model_credential(model.provider, model.model_type, model.model_name) + model_params_setting = node.properties.get('node_data', {}).get('model_params_setting') + model_params_setting_form = credential.get_model_params_setting_form( + model.model_name) + if model_params_setting is None: + model_params_setting = model_params_setting_form.get_default_form_data() + node.properties.get('node_data', {})['model_params_setting'] = model_params_setting + if node.properties.get('status', 200) != 200: + raise ValidationError( + ErrorDetail(_("Node {node} is unavailable").format(node.properties.get("stepName")))) + node_list = [node for node in self.nodes if (node.type == 'function-lib-node')] + for node in node_list: + function_lib_id = node.properties.get('node_data', {}).get('function_lib_id') + if function_lib_id is None: + raise ValidationError(ErrorDetail( + _('The library ID of node {node} cannot be empty').format(node=node.properties.get("stepName")))) + f_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first() + if f_lib is None: + raise ValidationError(ErrorDetail(_("The function library for node {node} is not available").format( + node=node.properties.get("stepName")))) + + def is_valid_base_node(self): + base_node_list = [node for node in self.nodes if node.id == 'base-node'] + if len(base_node_list) == 0: + raise AppApiException(500, _('Basic information node is required')) + if len(base_node_list) > 1: + raise AppApiException(500, _('There can only be one basic information node')) + + +class NodeResultFuture: + def __init__(self, r, e, status=200): + self.r = r + self.e = e + self.status = status + + def result(self): + if self.status == 200: + return self.r + else: + raise self.e + + +def await_result(result, timeout=1): + try: + result.result(timeout) + return False + except Exception as e: + return True + + +class NodeChunkManage: + + def __init__(self, work_flow): + self.node_chunk_list = [] + self.current_node_chunk = None + self.work_flow = work_flow + + def add_node_chunk(self, node_chunk): + self.node_chunk_list.append(node_chunk) + + def contains(self, node_chunk): + return self.node_chunk_list.__contains__(node_chunk) + + def pop(self): + if self.current_node_chunk is None: + try: + current_node_chunk = self.node_chunk_list.pop(0) + self.current_node_chunk = current_node_chunk + except IndexError as e: + pass + if self.current_node_chunk is not None: + try: + chunk = self.current_node_chunk.chunk_list.pop(0) + return chunk + except IndexError as e: + if self.current_node_chunk.is_end(): + self.current_node_chunk = None + if self.work_flow.answer_is_not_empty(): + chunk = self.work_flow.base_to_response.to_stream_chunk_response( + self.work_flow.params['chat_id'], + self.work_flow.params['chat_record_id'], + '\n\n', False, 0, 0) + self.work_flow.append_answer('\n\n') + return chunk + return self.pop() + return None + + +class WorkflowManage: + def __init__(self, flow: Flow, params, work_flow_post_handler: WorkFlowPostHandler, + base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None, + document_list=None, + audio_list=None, + other_list=None, + start_node_id=None, + start_node_data=None, chat_record=None, child_node=None): + if form_data is None: + form_data = {} + if image_list is None: + image_list = [] + if document_list is None: + document_list = [] + if audio_list is None: + audio_list = [] + if other_list is None: + other_list = [] + self.start_node_id = start_node_id + self.start_node = None + self.form_data = form_data + self.image_list = image_list + self.document_list = document_list + self.audio_list = audio_list + self.other_list = other_list + self.params = params + self.flow = flow + self.context = {} + self.node_chunk_manage = NodeChunkManage(self) + self.work_flow_post_handler = work_flow_post_handler + self.current_node = None + self.current_result = None + self.answer = "" + self.answer_list = [''] + self.status = 200 + self.base_to_response = base_to_response + self.chat_record = chat_record + self.child_node = child_node + self.future_list = [] + self.lock = threading.Lock() + self.field_list = [] + self.global_field_list = [] + self.init_fields() + if start_node_id is not None: + self.load_node(chat_record, start_node_id, start_node_data) + else: + self.node_context = [] + + def init_fields(self): + field_list = [] + global_field_list = [] + for node in self.flow.nodes: + properties = node.properties + node_name = properties.get('stepName') + node_id = node.id + node_config = properties.get('config') + if node_config is not None: + fields = node_config.get('fields') + if fields is not None: + for field in fields: + field_list.append({**field, 'node_id': node_id, 'node_name': node_name}) + global_fields = node_config.get('globalFields') + if global_fields is not None: + for global_field in global_fields: + global_field_list.append({**global_field, 'node_id': node_id, 'node_name': node_name}) + field_list.sort(key=lambda f: len(f.get('node_name')), reverse=True) + global_field_list.sort(key=lambda f: len(f.get('node_name')), reverse=True) + self.field_list = field_list + self.global_field_list = global_field_list + + def append_answer(self, content): + self.answer += content + self.answer_list[-1] += content + + def answer_is_not_empty(self): + return len(self.answer_list[-1]) > 0 + + def load_node(self, chat_record, start_node_id, start_node_data): + self.node_context = [] + self.answer = chat_record.answer_text + self.answer_list = chat_record.answer_text_list + self.answer_list.append('') + for node_details in sorted(chat_record.details.values(), key=lambda d: d.get('index')): + node_id = node_details.get('node_id') + if node_details.get('runtime_node_id') == start_node_id: + def get_node_params(n): + is_result = False + if n.type == 'application-node': + is_result = True + return {**n.properties.get('node_data'), 'form_data': start_node_data, 'node_data': start_node_data, + 'child_node': self.child_node, 'is_result': is_result} + + self.start_node = self.get_node_cls_by_id(node_id, node_details.get('up_node_id_list'), + get_node_params=get_node_params) + self.start_node.valid_args( + {**self.start_node.node_params, 'form_data': start_node_data}, self.start_node.workflow_params) + if self.start_node.type == 'application-node': + application_node_dict = node_details.get('application_node_dict', {}) + self.start_node.context['application_node_dict'] = application_node_dict + self.node_context.append(self.start_node) + continue + + node_id = node_details.get('node_id') + node = self.get_node_cls_by_id(node_id, node_details.get('up_node_id_list')) + node.valid_args(node.node_params, node.workflow_params) + node.save_context(node_details, self) + node.node_chunk.end() + self.node_context.append(node) + + def run(self): + close_old_connections() + language = get_language() + if self.params.get('stream'): + return self.run_stream(self.start_node, None, language) + return self.run_block(language) + + def run_block(self, language='zh'): + """ + 非流式响应 + @return: 结果 + """ + self.run_chain_async(None, None, language) + while self.is_run(): + pass + details = self.get_runtime_details() + message_tokens = sum([row.get('message_tokens') for row in details.values() if + 'message_tokens' in row and row.get('message_tokens') is not None]) + answer_tokens = sum([row.get('answer_tokens') for row in details.values() if + 'answer_tokens' in row and row.get('answer_tokens') is not None]) + answer_text_list = self.get_answer_text_list() + answer_text = '\n\n'.join( + '\n\n'.join([a.get('content') for a in answer]) for answer in + answer_text_list) + answer_list = reduce(lambda pre, _n: [*pre, *_n], answer_text_list, []) + self.work_flow_post_handler.handler(self.params['chat_id'], self.params['chat_record_id'], + answer_text, + self) + return self.base_to_response.to_block_response(self.params['chat_id'], + self.params['chat_record_id'], answer_text, True + , message_tokens, answer_tokens, + _status=status.HTTP_200_OK if self.status == 200 else status.HTTP_500_INTERNAL_SERVER_ERROR, + other_params={'answer_list': answer_list}) + + def run_stream(self, current_node, node_result_future, language='zh'): + """ + 流式响应 + @return: + """ + self.run_chain_async(current_node, node_result_future, language) + return tools.to_stream_response_simple(self.await_result()) + + def is_run(self, timeout=0.5): + future_list_len = len(self.future_list) + try: + r = concurrent.futures.wait(self.future_list, timeout) + if len(r.not_done) > 0: + return True + else: + if future_list_len == len(self.future_list): + return False + else: + return True + except Exception as e: + return True + + def await_result(self): + try: + while self.is_run(): + while True: + chunk = self.node_chunk_manage.pop() + if chunk is not None: + yield chunk + else: + break + while True: + chunk = self.node_chunk_manage.pop() + if chunk is None: + break + yield chunk + finally: + while self.is_run(): + pass + details = self.get_runtime_details() + message_tokens = sum([row.get('message_tokens') for row in details.values() if + 'message_tokens' in row and row.get('message_tokens') is not None]) + answer_tokens = sum([row.get('answer_tokens') for row in details.values() if + 'answer_tokens' in row and row.get('answer_tokens') is not None]) + self.work_flow_post_handler.handler(self.params['chat_id'], self.params['chat_record_id'], + self.answer, + self) + yield self.base_to_response.to_stream_chunk_response(self.params['chat_id'], + self.params['chat_record_id'], + '', + [], + '', True, message_tokens, answer_tokens, {}) + + def run_chain_async(self, current_node, node_result_future, language='zh'): + future = executor.submit(self.run_chain_manage, current_node, node_result_future, language) + self.future_list.append(future) + + def run_chain_manage(self, current_node, node_result_future, language='zh'): + translation.activate(language) + if current_node is None: + start_node = self.get_start_node() + current_node = get_node(start_node.type)(start_node, self.params, self) + self.node_chunk_manage.add_node_chunk(current_node.node_chunk) + # 添加节点 + self.append_node(current_node) + result = self.run_chain(current_node, node_result_future) + if result is None: + return + node_list = self.get_next_node_list(current_node, result) + if len(node_list) == 1: + self.run_chain_manage(node_list[0], None, language) + elif len(node_list) > 1: + sorted_node_run_list = sorted(node_list, key=lambda n: n.node.y) + # 获取到可执行的子节点 + result_list = [{'node': node, 'future': executor.submit(self.run_chain_manage, node, None, language)} for + node in + sorted_node_run_list] + for r in result_list: + self.future_list.append(r.get('future')) + + def run_chain(self, current_node, node_result_future=None): + if node_result_future is None: + node_result_future = self.run_node_future(current_node) + try: + is_stream = self.params.get('stream', True) + result = self.hand_event_node_result(current_node, + node_result_future) if is_stream else self.hand_node_result( + current_node, node_result_future) + return result + except Exception as e: + traceback.print_exc() + return None + + def hand_node_result(self, current_node, node_result_future): + try: + current_result = node_result_future.result() + result = current_result.write_context(current_node, self) + if result is not None: + # 阻塞获取结果 + list(result) + return current_result + except Exception as e: + traceback.print_exc() + self.status = 500 + current_node.get_write_error_context(e) + self.answer += str(e) + finally: + current_node.node_chunk.end() + + def append_node(self, current_node): + for index in range(len(self.node_context)): + n = self.node_context[index] + if current_node.id == n.node.id and current_node.runtime_node_id == n.runtime_node_id: + self.node_context[index] = current_node + return + self.node_context.append(current_node) + + def hand_event_node_result(self, current_node, node_result_future): + runtime_node_id = current_node.runtime_node_id + real_node_id = current_node.runtime_node_id + child_node = {} + view_type = current_node.view_type + try: + current_result = node_result_future.result() + result = current_result.write_context(current_node, self) + if result is not None: + if self.is_result(current_node, current_result): + for r in result: + reasoning_content = '' + content = r + child_node = {} + node_is_end = False + view_type = current_node.view_type + if isinstance(r, dict): + content = r.get('content') + child_node = {'runtime_node_id': r.get('runtime_node_id'), + 'chat_record_id': r.get('chat_record_id') + , 'child_node': r.get('child_node')} + if r.__contains__('real_node_id'): + real_node_id = r.get('real_node_id') + if r.__contains__('node_is_end'): + node_is_end = r.get('node_is_end') + view_type = r.get('view_type') + reasoning_content = r.get('reasoning_content') + chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'], + self.params['chat_record_id'], + current_node.id, + current_node.up_node_id_list, + content, False, 0, 0, + {'node_type': current_node.type, + 'runtime_node_id': runtime_node_id, + 'view_type': view_type, + 'child_node': child_node, + 'node_is_end': node_is_end, + 'real_node_id': real_node_id, + 'reasoning_content': reasoning_content}) + current_node.node_chunk.add_chunk(chunk) + chunk = (self.base_to_response + .to_stream_chunk_response(self.params['chat_id'], + self.params['chat_record_id'], + current_node.id, + current_node.up_node_id_list, + '', False, 0, 0, {'node_is_end': True, + 'runtime_node_id': runtime_node_id, + 'node_type': current_node.type, + 'view_type': view_type, + 'child_node': child_node, + 'real_node_id': real_node_id, + 'reasoning_content': ''})) + current_node.node_chunk.add_chunk(chunk) + else: + list(result) + return current_result + except Exception as e: + # 添加节点 + traceback.print_exc() + chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'], + self.params['chat_record_id'], + current_node.id, + current_node.up_node_id_list, + 'Exception:' + str(e), False, 0, 0, + {'node_is_end': True, + 'runtime_node_id': current_node.runtime_node_id, + 'node_type': current_node.type, + 'view_type': current_node.view_type, + 'child_node': {}, + 'real_node_id': real_node_id}) + current_node.node_chunk.add_chunk(chunk) + current_node.get_write_error_context(e) + self.status = 500 + return None + finally: + current_node.node_chunk.end() + + def run_node_async(self, node): + future = executor.submit(self.run_node, node) + return future + + def run_node_future(self, node): + try: + node.valid_args(node.node_params, node.workflow_params) + result = self.run_node(node) + return NodeResultFuture(result, None, 200) + except Exception as e: + return NodeResultFuture(None, e, 500) + + def run_node(self, node): + result = node.run() + return result + + def is_result(self, current_node, current_node_result): + return current_node.node_params.get('is_result', not self._has_next_node( + current_node, current_node_result)) if current_node.node_params is not None else False + + def get_chunk_content(self, chunk, is_end=False): + return 'data: ' + json.dumps( + {'chat_id': self.params['chat_id'], 'id': self.params['chat_record_id'], 'operate': True, + 'content': chunk, 'is_end': is_end}, ensure_ascii=False) + "\n\n" + + def _has_next_node(self, current_node, node_result: NodeResult | None): + """ + 是否有下一个可运行的节点 + """ + if node_result is not None and node_result.is_assertion_result(): + for edge in self.flow.edges: + if (edge.sourceNodeId == current_node.id and + f"{edge.sourceNodeId}_{node_result.node_variable.get('branch_id')}_right" == edge.sourceAnchorId): + return True + else: + for edge in self.flow.edges: + if edge.sourceNodeId == current_node.id: + return True + + def has_next_node(self, node_result: NodeResult | None): + """ + 是否有下一个可运行的节点 + """ + return self._has_next_node(self.get_start_node() if self.current_node is None else self.current_node, + node_result) + + def get_runtime_details(self): + details_result = {} + for index in range(len(self.node_context)): + node = self.node_context[index] + if self.chat_record is not None and self.chat_record.details is not None: + details = self.chat_record.details.get(node.runtime_node_id) + if details is not None and self.start_node.runtime_node_id != node.runtime_node_id: + details_result[node.runtime_node_id] = details + continue + details = node.get_details(index) + details['node_id'] = node.id + details['up_node_id_list'] = node.up_node_id_list + details['runtime_node_id'] = node.runtime_node_id + details_result[node.runtime_node_id] = details + return details_result + + def get_answer_text_list(self): + result = [] + answer_list = reduce(lambda x, y: [*x, *y], + [n.get_answer_list() for n in self.node_context if n.get_answer_list() is not None], + []) + up_node = None + for index in range(len(answer_list)): + current_answer = answer_list[index] + if len(current_answer.content) > 0: + if up_node is None or current_answer.view_type == 'single_view' or ( + current_answer.view_type == 'many_view' and up_node.view_type == 'single_view'): + result.append([current_answer]) + else: + if len(result) > 0: + exec_index = len(result) - 1 + if isinstance(result[exec_index], list): + result[exec_index].append(current_answer) + else: + result.insert(0, [current_answer]) + up_node = current_answer + if len(result) == 0: + # 如果没有响应 就响应一个空数据 + return [[]] + return [[item.to_dict() for item in r] for r in result] + + def get_next_node(self): + """ + 获取下一个可运行的所有节点 + """ + if self.current_node is None: + node = self.get_start_node() + node_instance = get_node(node.type)(node, self.params, self) + return node_instance + if self.current_result is not None and self.current_result.is_assertion_result(): + for edge in self.flow.edges: + if (edge.sourceNodeId == self.current_node.id and + f"{edge.sourceNodeId}_{self.current_result.node_variable.get('branch_id')}_right" == edge.sourceAnchorId): + return self.get_node_cls_by_id(edge.targetNodeId) + else: + for edge in self.flow.edges: + if edge.sourceNodeId == self.current_node.id: + return self.get_node_cls_by_id(edge.targetNodeId) + + return None + + @staticmethod + def dependent_node(up_node_id, node): + if not node.node_chunk.is_end(): + return False + if node.id == up_node_id: + if node.type == 'form-node': + if node.context.get('form_data', None) is not None: + return True + return False + return True + + def dependent_node_been_executed(self, node_id): + """ + 判断依赖节点是否都已执行 + @param node_id: 需要判断的节点id + @return: + """ + up_node_id_list = [edge.sourceNodeId for edge in self.flow.edges if edge.targetNodeId == node_id] + return all([any([self.dependent_node(up_node_id, node) for node in self.node_context]) for up_node_id in + up_node_id_list]) + + def get_up_node_id_list(self, node_id): + up_node_id_list = [edge.sourceNodeId for edge in self.flow.edges if edge.targetNodeId == node_id] + return up_node_id_list + + def get_next_node_list(self, current_node, current_node_result): + """ + 获取下一个可执行节点列表 + @param current_node: 当前可执行节点 + @param current_node_result: 当前可执行节点结果 + @return: 可执行节点列表 + """ + # 判断是否中断执行 + if current_node_result.is_interrupt_exec(current_node): + return [] + node_list = [] + if current_node_result is not None and current_node_result.is_assertion_result(): + for edge in self.flow.edges: + if (edge.sourceNodeId == current_node.id and + f"{edge.sourceNodeId}_{current_node_result.node_variable.get('branch_id')}_right" == edge.sourceAnchorId): + next_node = [node for node in self.flow.nodes if node.id == edge.targetNodeId] + if len(next_node) == 0: + continue + if next_node[0].properties.get('condition', "AND") == 'AND': + if self.dependent_node_been_executed(edge.targetNodeId): + node_list.append( + self.get_node_cls_by_id(edge.targetNodeId, + [*current_node.up_node_id_list, current_node.node.id])) + else: + node_list.append( + self.get_node_cls_by_id(edge.targetNodeId, + [*current_node.up_node_id_list, current_node.node.id])) + else: + for edge in self.flow.edges: + if edge.sourceNodeId == current_node.id: + next_node = [node for node in self.flow.nodes if node.id == edge.targetNodeId] + if len(next_node) == 0: + continue + if next_node[0].properties.get('condition', "AND") == 'AND': + if self.dependent_node_been_executed(edge.targetNodeId): + node_list.append( + self.get_node_cls_by_id(edge.targetNodeId, + [*current_node.up_node_id_list, current_node.node.id])) + else: + node_list.append( + self.get_node_cls_by_id(edge.targetNodeId, + [*current_node.up_node_id_list, current_node.node.id])) + return node_list + + def get_reference_field(self, node_id: str, fields: List[str]): + """ + @param node_id: 节点id + @param fields: 字段 + @return: + """ + if node_id == 'global': + return INode.get_field(self.context, fields) + else: + return self.get_node_by_id(node_id).get_reference_field(fields) + + def get_workflow_content(self): + context = { + 'global': self.context, + } + + for node in self.node_context: + context[node.id] = node.context + return context + + def reset_prompt(self, prompt: str): + placeholder = "{}" + for field in self.field_list: + globeLabel = f"{field.get('node_name')}.{field.get('value')}" + globeValue = f"context.get('{field.get('node_id')}',{placeholder}).get('{field.get('value', '')}','')" + prompt = prompt.replace(globeLabel, globeValue) + for field in self.global_field_list: + globeLabel = f"全局变量.{field.get('value')}" + globeLabelNew = f"global.{field.get('value')}" + globeValue = f"context.get('global').get('{field.get('value', '')}','')" + prompt = prompt.replace(globeLabel, globeValue).replace(globeLabelNew, globeValue) + return prompt + + def generate_prompt(self, prompt: str): + """ + 格式化生成提示词 + @param prompt: 提示词信息 + @return: 格式化后的提示词 + """ + context = self.get_workflow_content() + prompt = self.reset_prompt(prompt) + prompt_template = PromptTemplate.from_template(prompt, template_format='jinja2') + value = prompt_template.format(context=context) + return value + + def get_start_node(self): + """ + 获取启动节点 + @return: + """ + start_node_list = [node for node in self.flow.nodes if node.type == 'start-node'] + return start_node_list[0] + + def get_base_node(self): + """ + 获取基础节点 + @return: + """ + base_node_list = [node for node in self.flow.nodes if node.type == 'base-node'] + return base_node_list[0] + + def get_node_cls_by_id(self, node_id, up_node_id_list=None, + get_node_params=lambda node: node.properties.get('node_data')): + for node in self.flow.nodes: + if node.id == node_id: + node_instance = get_node(node.type)(node, + self.params, self, up_node_id_list, get_node_params) + return node_instance + return None + + def get_node_by_id(self, node_id): + for node in self.node_context: + if node.id == node_id: + return node + return None + + def get_node_reference(self, reference_address: Dict): + node = self.get_node_by_id(reference_address.get('node_id')) + return node.context[reference_address.get('node_field')] diff --git a/apps/application/migrations/0007_alter_application_prologue.py b/apps/application/migrations/0007_alter_application_prologue.py new file mode 100644 index 00000000000..27b519cf8bb --- /dev/null +++ b/apps/application/migrations/0007_alter_application_prologue.py @@ -0,0 +1,18 @@ +# Generated by Django 4.1.13 on 2024-05-24 11:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0006_applicationapikey_allow_cross_domain_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='application', + name='prologue', + field=models.CharField(default='', max_length=4096, verbose_name='开场白'), + ), + ] diff --git a/apps/application/migrations/0008_chat_is_deleted.py b/apps/application/migrations/0008_chat_is_deleted.py new file mode 100644 index 00000000000..5291c3f548f --- /dev/null +++ b/apps/application/migrations/0008_chat_is_deleted.py @@ -0,0 +1,18 @@ +# Generated by Django 4.1.13 on 2024-06-13 11:46 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0007_alter_application_prologue'), + ] + + operations = [ + migrations.AddField( + model_name='chat', + name='is_deleted', + field=models.BooleanField(default=False, verbose_name=''), + ), + ] diff --git a/apps/application/migrations/0009_application_type_application_work_flow_and_more.py b/apps/application/migrations/0009_application_type_application_work_flow_and_more.py new file mode 100644 index 00000000000..5d0bf0c9fda --- /dev/null +++ b/apps/application/migrations/0009_application_type_application_work_flow_and_more.py @@ -0,0 +1,38 @@ +# Generated by Django 4.1.13 on 2024-06-25 16:30 + +from django.db import migrations, models +import django.db.models.deletion +import uuid + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0008_chat_is_deleted'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='type', + field=models.CharField(choices=[('SIMPLE', '简易'), ('WORK_FLOW', '工作流')], default='SIMPLE', max_length=256, verbose_name='应用类型'), + ), + migrations.AddField( + model_name='application', + name='work_flow', + field=models.JSONField(default=dict, verbose_name='工作流数据'), + ), + migrations.CreateModel( + name='WorkFlowVersion', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, verbose_name='主键id')), + ('work_flow', models.JSONField(default=dict, verbose_name='工作流数据')), + ('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='application.application')), + ], + options={ + 'db_table': 'application_work_flow_version', + }, + ), + ] diff --git a/apps/application/migrations/0010_alter_chatrecord_details.py b/apps/application/migrations/0010_alter_chatrecord_details.py new file mode 100644 index 00000000000..874f45928d0 --- /dev/null +++ b/apps/application/migrations/0010_alter_chatrecord_details.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.13 on 2024-07-15 15:52 + +from django.db import migrations, models + +import common.encoder.encoder + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0009_application_type_application_work_flow_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='chatrecord', + name='details', + field=models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='对话详情'), + ), + ] diff --git a/apps/application/migrations/0011_application_model_params_setting.py b/apps/application/migrations/0011_application_model_params_setting.py new file mode 100644 index 00000000000..656b547752d --- /dev/null +++ b/apps/application/migrations/0011_application_model_params_setting.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-08-23 14:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0010_alter_chatrecord_details'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='model_params_setting', + field=models.JSONField(default=dict, verbose_name='模型参数相关设置'), + ), + ] diff --git a/apps/application/migrations/0012_application_stt_model_application_stt_model_enable_and_more.py b/apps/application/migrations/0012_application_stt_model_application_stt_model_enable_and_more.py new file mode 100644 index 00000000000..f50c39d2f87 --- /dev/null +++ b/apps/application/migrations/0012_application_stt_model_application_stt_model_enable_and_more.py @@ -0,0 +1,35 @@ +# Generated by Django 4.2.15 on 2024-09-05 14:35 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('setting', '0006_alter_model_status'), + ('application', '0011_application_model_params_setting'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='stt_model', + field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='stt_model_id', to='setting.model'), + ), + migrations.AddField( + model_name='application', + name='stt_model_enable', + field=models.BooleanField(default=False, verbose_name='语音识别模型是否启用'), + ), + migrations.AddField( + model_name='application', + name='tts_model', + field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tts_model_id', to='setting.model'), + ), + migrations.AddField( + model_name='application', + name='tts_model_enable', + field=models.BooleanField(default=False, verbose_name='语音合成模型是否启用'), + ), + ] diff --git a/apps/application/migrations/0013_application_tts_type.py b/apps/application/migrations/0013_application_tts_type.py new file mode 100644 index 00000000000..c64c8e76d57 --- /dev/null +++ b/apps/application/migrations/0013_application_tts_type.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-09-12 11:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0012_application_stt_model_application_stt_model_enable_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='tts_type', + field=models.CharField(default='BROWSER', max_length=20, verbose_name='语音播放类型'), + ), + ] diff --git a/apps/application/migrations/0014_application_problem_optimization_prompt.py b/apps/application/migrations/0014_application_problem_optimization_prompt.py new file mode 100644 index 00000000000..e2efc1097ce --- /dev/null +++ b/apps/application/migrations/0014_application_problem_optimization_prompt.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-09-13 18:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0013_application_tts_type'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='problem_optimization_prompt', + field=models.CharField(blank=True, default='()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中', max_length=102400, null=True, verbose_name='问题优化提示词'), + ), + ] diff --git a/apps/application/migrations/0015_re_database_index.py b/apps/application/migrations/0015_re_database_index.py new file mode 100644 index 00000000000..740a2a2d241 --- /dev/null +++ b/apps/application/migrations/0015_re_database_index.py @@ -0,0 +1,63 @@ +# Generated by Django 4.2.15 on 2024-09-18 16:14 +import logging + +import psycopg2 +from django.db import migrations +from psycopg2 import extensions + +from smartdoc.const import CONFIG + + +def get_connect(db_name): + conn_params = { + "dbname": db_name, + "user": CONFIG.get('DB_USER'), + "password": CONFIG.get('DB_PASSWORD'), + "host": CONFIG.get('DB_HOST'), + "port": CONFIG.get('DB_PORT') + } + # 建立连接 + connect = psycopg2.connect(**conn_params) + return connect + + +def sql_execute(conn, reindex_sql: str, alter_database_sql: str): + """ + 执行一条sql + @param reindex_sql: + @param conn: + @param alter_database_sql: + """ + conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT) + with conn.cursor() as cursor: + cursor.execute(reindex_sql, []) + cursor.execute(alter_database_sql, []) + cursor.close() + + +def re_index(apps, schema_editor): + app_db_name = CONFIG.get('DB_NAME') + try: + re_index_database(app_db_name) + except Exception as e: + logging.error(f'reindex database {app_db_name}发送错误:{str(e)}') + try: + re_index_database('root') + except Exception as e: + logging.error(f'reindex database root 发送错误:{str(e)}') + + +def re_index_database(db_name): + db_conn = get_connect(db_name) + sql_execute(db_conn, f'REINDEX DATABASE "{db_name}";', f'ALTER DATABASE "{db_name}" REFRESH COLLATION VERSION;') + db_conn.close() + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0014_application_problem_optimization_prompt'), + ] + + operations = [ + migrations.RunPython(re_index, atomic=False) + ] diff --git a/apps/application/migrations/0016_alter_chatrecord_problem_text.py b/apps/application/migrations/0016_alter_chatrecord_problem_text.py new file mode 100644 index 00000000000..edda1e607c5 --- /dev/null +++ b/apps/application/migrations/0016_alter_chatrecord_problem_text.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-09-26 13:19 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0015_re_database_index'), + ] + + operations = [ + migrations.AlterField( + model_name='chatrecord', + name='problem_text', + field=models.CharField(max_length=10240, verbose_name='问题'), + ), + ] diff --git a/apps/application/migrations/0017_application_tts_model_params_setting.py b/apps/application/migrations/0017_application_tts_model_params_setting.py new file mode 100644 index 00000000000..3276ca63234 --- /dev/null +++ b/apps/application/migrations/0017_application_tts_model_params_setting.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-10-16 13:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0016_alter_chatrecord_problem_text'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='tts_model_params_setting', + field=models.JSONField(default=dict, verbose_name='模型参数相关设置'), + ), + ] diff --git a/apps/application/migrations/0018_workflowversion_name.py b/apps/application/migrations/0018_workflowversion_name.py new file mode 100644 index 00000000000..51d0417e7fc --- /dev/null +++ b/apps/application/migrations/0018_workflowversion_name.py @@ -0,0 +1,38 @@ +# Generated by Django 4.2.15 on 2024-10-16 15:17 + +from django.db import migrations, models + +sql = """ +UPDATE "public".application_work_flow_version +SET "name" = TO_CHAR(create_time, 'YYYY-MM-DD HH24:MI:SS'); +""" + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0017_application_tts_model_params_setting'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='clean_time', + field=models.IntegerField(default=180, verbose_name='清理时间'), + ), + migrations.AddField( + model_name='workflowversion', + name='name', + field=models.CharField(default='', max_length=128, verbose_name='版本名称'), + ), + migrations.RunSQL(sql), + migrations.AddField( + model_name='workflowversion', + name='publish_user_id', + field=models.UUIDField(default=None, null=True, verbose_name='发布者id'), + ), + migrations.AddField( + model_name='workflowversion', + name='publish_user_name', + field=models.CharField(default='', max_length=128, verbose_name='发布者名称'), + ), + ] diff --git a/apps/application/migrations/0019_application_file_upload_enable_and_more.py b/apps/application/migrations/0019_application_file_upload_enable_and_more.py new file mode 100644 index 00000000000..7b12321f114 --- /dev/null +++ b/apps/application/migrations/0019_application_file_upload_enable_and_more.py @@ -0,0 +1,34 @@ +# Generated by Django 4.2.15 on 2024-11-13 10:13 + +import django.contrib.postgres.fields +from django.db import migrations, models + +sql = """ +UPDATE application_chat_record +SET answer_text_list=ARRAY[jsonb_build_object('content',answer_text)] +""" + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0018_workflowversion_name'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='file_upload_enable', + field=models.BooleanField(default=False, verbose_name='文件上传是否启用'), + ), + migrations.AddField( + model_name='application', + name='file_upload_setting', + field=models.JSONField(default=dict, verbose_name='文件上传相关设置'), + ), + migrations.AddField( + model_name='chatrecord', + name='answer_text_list', + field=django.contrib.postgres.fields.ArrayField(base_field=models.JSONField(), default=list, size=None, verbose_name='改进标注列表') + ), + migrations.RunSQL(sql) + ] diff --git a/apps/application/migrations/0020_application_record_update_time.py b/apps/application/migrations/0020_application_record_update_time.py new file mode 100644 index 00000000000..b3d9c429897 --- /dev/null +++ b/apps/application/migrations/0020_application_record_update_time.py @@ -0,0 +1,22 @@ +from django.db import migrations, connection + +batch_update_update_time = """ +UPDATE application_chat ac +SET update_time = acr_max.max_update_time +FROM ( + SELECT chat_id, MAX(update_time) AS max_update_time + FROM application_chat_record + GROUP BY chat_id +) acr_max +WHERE ac.id = acr_max.chat_id; +""" + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0019_application_file_upload_enable_and_more'), + ] + + operations = [ + migrations.RunSQL(batch_update_update_time), + ] diff --git a/apps/application/migrations/0021_applicationpublicaccessclient_client_id_and_more.py b/apps/application/migrations/0021_applicationpublicaccessclient_client_id_and_more.py new file mode 100644 index 00000000000..356ff4dffdd --- /dev/null +++ b/apps/application/migrations/0021_applicationpublicaccessclient_client_id_and_more.py @@ -0,0 +1,34 @@ +# Generated by Django 4.2.15 on 2024-12-27 18:42 + +from django.db import migrations, models +import uuid + +run_sql = """ +UPDATE application_public_access_client +SET client_id="id" +""" + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0020_application_record_update_time'), + ] + + operations = [ + migrations.AddField( + model_name='applicationpublicaccessclient', + name='client_id', + field=models.UUIDField(default=uuid.uuid1, verbose_name='公共访问链接客户端id'), + ), + migrations.AlterField( + model_name='applicationpublicaccessclient', + name='id', + field=models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, + verbose_name='主键id'), + ), + migrations.AddIndex( + model_name='applicationpublicaccessclient', + index=models.Index(fields=['client_id'], name='application_client__4de9af_idx'), + ), + migrations.RunSQL(run_sql) + ] diff --git a/apps/application/migrations/0022_application_tts_autoplay.py b/apps/application/migrations/0022_application_tts_autoplay.py new file mode 100644 index 00000000000..0a4362851c0 --- /dev/null +++ b/apps/application/migrations/0022_application_tts_autoplay.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2025-01-03 14:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0021_applicationpublicaccessclient_client_id_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='tts_autoplay', + field=models.BooleanField(default=False, verbose_name='自动播放'), + ), + ] diff --git a/apps/application/migrations/0023_application_stt_autosend.py b/apps/application/migrations/0023_application_stt_autosend.py new file mode 100644 index 00000000000..13453c5a9fd --- /dev/null +++ b/apps/application/migrations/0023_application_stt_autosend.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2025-01-06 10:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0022_application_tts_autoplay'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='stt_autosend', + field=models.BooleanField(default=False, verbose_name='自动发送'), + ), + ] diff --git a/apps/application/migrations/0024_applicationaccesstoken_language.py b/apps/application/migrations/0024_applicationaccesstoken_language.py new file mode 100644 index 00000000000..0f92d935aec --- /dev/null +++ b/apps/application/migrations/0024_applicationaccesstoken_language.py @@ -0,0 +1,17 @@ +# Generated by Django 4.2.15 on 2025-01-20 06:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ('application', '0023_application_stt_autosend'), + ] + + operations = [ + migrations.AddField( + model_name='applicationaccesstoken', + name='language', + field=models.CharField(default=None, max_length=10, null=True, verbose_name='语言') + ), + ] diff --git a/apps/application/migrations/0025_alter_application_prologue.py b/apps/application/migrations/0025_alter_application_prologue.py new file mode 100644 index 00000000000..8fcae8245cd --- /dev/null +++ b/apps/application/migrations/0025_alter_application_prologue.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.18 on 2025-01-22 09:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0024_applicationaccesstoken_language'), + ] + + operations = [ + migrations.AlterField( + model_name='application', + name='prologue', + field=models.CharField(default='', max_length=40960, verbose_name='开场白'), + ), + ] diff --git a/apps/application/migrations/0026_chat_asker.py b/apps/application/migrations/0026_chat_asker.py new file mode 100644 index 00000000000..e556f8c3aff --- /dev/null +++ b/apps/application/migrations/0026_chat_asker.py @@ -0,0 +1,20 @@ +# Generated by Django 4.2.18 on 2025-03-18 06:05 + +import application.models.application +import common.encoder.encoder +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0025_alter_application_prologue'), + ] + + operations = [ + migrations.AddField( + model_name='chat', + name='asker', + field=models.JSONField(default=application.models.application.default_asker, encoder=common.encoder.encoder.SystemEncoder, verbose_name='访问者'), + ), + ] diff --git a/apps/application/models/api_key_model.py b/apps/application/models/api_key_model.py index 965e1f1c4ea..2f64d7f7dde 100644 --- a/apps/application/models/api_key_model.py +++ b/apps/application/models/api_key_model.py @@ -13,9 +13,14 @@ from application.models import Application from common.mixins.app_model_mixin import AppModelMixin +from smartdoc.const import CONFIG from users.models import User +def get_language(): + return CONFIG.get_language_code() + + class ApplicationApiKey(AppModelMixin): id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") secret_key = models.CharField(max_length=1024, verbose_name="秘钥", unique=True) @@ -45,15 +50,21 @@ class ApplicationAccessToken(AppModelMixin): , default=list) show_source = models.BooleanField(default=False, verbose_name="是否显示知识来源") + language = models.CharField(max_length=10, verbose_name="语言", default=None, null=True) + class Meta: db_table = "application_access_token" class ApplicationPublicAccessClient(AppModelMixin): - id = models.UUIDField(max_length=128, primary_key=True, verbose_name="公共访问链接客户端id") + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") + client_id = models.UUIDField(max_length=128, default=uuid.uuid1, verbose_name="公共访问链接客户端id") application = models.ForeignKey(Application, on_delete=models.CASCADE, verbose_name="应用id") access_num = models.IntegerField(default=0, verbose_name="访问总次数次数") intraday_access_num = models.IntegerField(default=0, verbose_name="当日访问次数") class Meta: db_table = "application_public_access_client" + indexes = [ + models.Index(fields=['client_id']), + ] diff --git a/apps/application/models/application.py b/apps/application/models/application.py index 6c77937bdef..0032271a70b 100644 --- a/apps/application/models/application.py +++ b/apps/application/models/application.py @@ -11,13 +11,20 @@ from django.contrib.postgres.fields import ArrayField from django.db import models from langchain.schema import HumanMessage, AIMessage - +from django.utils.translation import gettext as _ +from common.encoder.encoder import SystemEncoder from common.mixins.app_model_mixin import AppModelMixin from dataset.models.data_set import DataSet from setting.models.model_management import Model from users.models import User +class ApplicationTypeChoices(models.TextChoices): + """订单类型""" + SIMPLE = 'SIMPLE', '简易' + WORK_FLOW = 'WORK_FLOW', '工作流' + + def get_dataset_setting_dict(): return {'top_n': 3, 'similarity': 0.6, 'max_paragraph_char_number': 5000, 'search_mode': 'embedding', 'no_references_setting': { @@ -27,21 +34,47 @@ def get_dataset_setting_dict(): def get_model_setting_dict(): - return {'prompt': Application.get_default_model_prompt()} + return { + 'prompt': Application.get_default_model_prompt(), + 'no_references_prompt': '{question}', + 'reasoning_content_start': '', + 'reasoning_content_end': '', + 'reasoning_content_enable': False, + } class Application(AppModelMixin): id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") name = models.CharField(max_length=128, verbose_name="应用名称") desc = models.CharField(max_length=512, verbose_name="引用描述", default="") - prologue = models.CharField(max_length=1024, verbose_name="开场白", default="") + prologue = models.CharField(max_length=40960, verbose_name="开场白", default="") dialogue_number = models.IntegerField(default=0, verbose_name="会话数量") user = models.ForeignKey(User, on_delete=models.DO_NOTHING) model = models.ForeignKey(Model, on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True) dataset_setting = models.JSONField(verbose_name="数据集参数设置", default=get_dataset_setting_dict) model_setting = models.JSONField(verbose_name="模型参数相关设置", default=get_model_setting_dict) + model_params_setting = models.JSONField(verbose_name="模型参数相关设置", default=dict) + tts_model_params_setting = models.JSONField(verbose_name="模型参数相关设置", default=dict) problem_optimization = models.BooleanField(verbose_name="问题优化", default=False) icon = models.CharField(max_length=256, verbose_name="应用icon", default="/ui/favicon.ico") + work_flow = models.JSONField(verbose_name="工作流数据", default=dict) + type = models.CharField(verbose_name="应用类型", choices=ApplicationTypeChoices.choices, + default=ApplicationTypeChoices.SIMPLE, max_length=256) + problem_optimization_prompt = models.CharField(verbose_name="问题优化提示词", max_length=102400, blank=True, + null=True, + default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中") + tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False, + blank=True, null=True) + stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False, + blank=True, null=True) + tts_model_enable = models.BooleanField(verbose_name="语音合成模型是否启用", default=False) + stt_model_enable = models.BooleanField(verbose_name="语音识别模型是否启用", default=False) + tts_type = models.CharField(verbose_name="语音播放类型", max_length=20, default="BROWSER") + tts_autoplay = models.BooleanField(verbose_name="自动播放", default=False) + stt_autosend = models.BooleanField(verbose_name="自动发送", default=False) + clean_time = models.IntegerField(verbose_name="清理时间", default=180) + file_upload_enable = models.BooleanField(verbose_name="文件上传是否启用", default=False) + file_upload_setting = models.JSONField(verbose_name="文件上传相关设置", default=dict) @staticmethod def get_default_model_prompt(): @@ -61,6 +94,18 @@ class Meta: db_table = "application" +class WorkFlowVersion(AppModelMixin): + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") + application = models.ForeignKey(Application, on_delete=models.CASCADE) + name = models.CharField(verbose_name="版本名称", max_length=128, default="") + publish_user_id = models.UUIDField(verbose_name="发布者id", max_length=128, default=None, null=True) + publish_user_name = models.CharField(verbose_name="发布者名称", max_length=128, default="") + work_flow = models.JSONField(verbose_name="工作流数据", default=dict) + + class Meta: + db_table = "application_work_flow_version" + + class ApplicationDatasetMapping(AppModelMixin): id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") application = models.ForeignKey(Application, on_delete=models.CASCADE) @@ -70,11 +115,17 @@ class Meta: db_table = "application_dataset_mapping" +def default_asker(): + return {'user_name': '游客'} + + class Chat(AppModelMixin): id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") application = models.ForeignKey(Application, on_delete=models.CASCADE) abstract = models.CharField(max_length=1024, verbose_name="摘要") + asker = models.JSONField(verbose_name="访问者", default=default_asker, encoder=SystemEncoder) client_id = models.UUIDField(verbose_name="客户端id", default=None, null=True) + is_deleted = models.BooleanField(verbose_name="", default=False) class Meta: db_table = "application_chat" @@ -95,12 +146,15 @@ class ChatRecord(AppModelMixin): chat = models.ForeignKey(Chat, on_delete=models.CASCADE) vote_status = models.CharField(verbose_name='投票', max_length=10, choices=VoteChoices.choices, default=VoteChoices.UN_VOTE) - problem_text = models.CharField(max_length=1024, verbose_name="问题") + problem_text = models.CharField(max_length=10240, verbose_name="问题") answer_text = models.CharField(max_length=40960, verbose_name="答案") + answer_text_list = ArrayField(verbose_name="改进标注列表", + base_field=models.JSONField() + , default=list) message_tokens = models.IntegerField(verbose_name="请求token数量", default=0) answer_tokens = models.IntegerField(verbose_name="响应token数量", default=0) const = models.IntegerField(verbose_name="总费用", default=0) - details = models.JSONField(verbose_name="对话详情", default=dict) + details = models.JSONField(verbose_name="对话详情", default=dict, encoder=SystemEncoder) improve_paragraph_id_list = ArrayField(verbose_name="改进标注列表", base_field=models.UUIDField(max_length=128, blank=True) , default=list) @@ -113,7 +167,14 @@ def get_human_message(self): return HumanMessage(content=self.problem_text) def get_ai_message(self): - return AIMessage(content=self.answer_text) + answer_text = self.answer_text + if answer_text is None or len(str(answer_text).strip()) == 0: + answer_text = _( + 'Sorry, no relevant content was found. Please re-describe your problem or provide more information. ') + return AIMessage(content=answer_text) + + def get_node_details_runtime_node_id(self, runtime_node_id): + return self.details.get(runtime_node_id, None) class Meta: db_table = "application_chat_record" diff --git a/apps/application/serializers/application_serializers.py b/apps/application/serializers/application_serializers.py index 1827c5eb9b2..9cd06bf2b92 100644 --- a/apps/application/serializers/application_serializers.py +++ b/apps/application/serializers/application_serializers.py @@ -6,52 +6,78 @@ @date:2023/11/7 10:02 @desc: """ +import asyncio +import datetime import hashlib +import json import os +import pickle import re import uuid from functools import reduce -from typing import Dict - +from typing import Dict, List from django.contrib.postgres.fields import ArrayField from django.core import cache, validators from django.core import signing from django.db import transaction, models from django.db.models import QuerySet +from django.db.models.expressions import RawSQL from django.http import HttpResponse from django.template import Template, Context -from rest_framework import serializers +from langchain_mcp_adapters.client import MultiServerMCPClient +from mcp.client.sse import sse_client +from rest_framework import serializers, status +from rest_framework.utils.formatting import lazy_format -from application.models import Application, ApplicationDatasetMapping +from application.flow.workflow_manage import Flow +from application.models import Application, ApplicationDatasetMapping, ApplicationTypeChoices, WorkFlowVersion from application.models.api_key_model import ApplicationAccessToken, ApplicationApiKey -from common.config.embedding_config import VectorStore, EmbeddingModel +from common.cache_data.application_access_token_cache import get_application_access_token, del_application_access_token +from common.cache_data.application_api_key_cache import del_application_api_key, get_application_api_key +from common.config.embedding_config import VectorStore from common.constants.authentication_type import AuthenticationType from common.db.search import get_dynamics_model, native_search, native_page_search from common.db.sql_execute import select_list -from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed -from common.field.common import UploadedImageField +from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed, ChatException +from common.field.common import UploadedImageField, UploadedFileField +from common.models.db_model_manage import DBModelManage +from common.response import result +from common.util.common import valid_license, password_encrypt, restricted_loads from common.util.field_message import ErrMessage from common.util.file_util import get_file_content from dataset.models import DataSet, Document, Image -from dataset.serializers.common_serializers import list_paragraph +from dataset.serializers.common_serializers import list_paragraph, get_embedding_model_by_dataset_id_list from embedding.models import SearchMode -from setting.models import AuthOperate +from function_lib.models.function import FunctionLib, PermissionType, FunctionType +from function_lib.serializers.function_lib_serializer import FunctionLibSerializer, FunctionLibModelSerializer +from setting.models import AuthOperate, TeamMemberPermission from setting.models.model_management import Model +from setting.models_provider import get_model_credential +from setting.models_provider.tools import get_model_instance_by_model_user_id from setting.serializers.provider_serializers import ModelSerializer from smartdoc.conf import PROJECT_DIR +from users.models import User +from django.utils.translation import gettext_lazy as _, get_language, to_locale -token_cache = cache.caches['token_cache'] chat_cache = cache.caches['chat_cache'] +class MKInstance: + + def __init__(self, application: dict, function_lib_list: List[dict], version: str): + self.application = application + self.function_lib_list = function_lib_list + self.version = version + + class ModelDatasetAssociation(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("模型id")) + error_messages=ErrMessage.char(_("Model id"))) dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True, error_messages=ErrMessage.uuid( - "知识库id")), - error_messages=ErrMessage.list("知识库列表")) + _("Knowledge base id"))), + error_messages=ErrMessage.list(_("Knowledge Base List"))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) @@ -59,13 +85,13 @@ def is_valid(self, *, raise_exception=True): user_id = self.data.get('user_id') if model_id is not None and len(model_id) > 0: if not QuerySet(Model).filter(id=model_id).exists(): - raise AppApiException(500, f'模型不存在【{model_id}】') + raise AppApiException(500, f'{_("Model does not exist")}【{model_id}】') dataset_id_list = list(set(self.data.get('dataset_id_list'))) exist_dataset_id_list = [str(dataset.id) for dataset in QuerySet(DataSet).filter(id__in=dataset_id_list, user_id=user_id)] for dataset_id in dataset_id_list: if not exist_dataset_id_list.__contains__(dataset_id): - raise AppApiException(500, f'知识库id不存在【{dataset_id}】') + raise AppApiException(500, f'{_("The knowledge base id does not exist")}【{dataset_id}】') class ApplicationSerializerModel(serializers.ModelSerializer): @@ -82,47 +108,149 @@ class NoReferencesChoices(models.TextChoices): class NoReferencesSetting(serializers.Serializer): status = serializers.ChoiceField(required=True, choices=NoReferencesChoices.choices, - error_messages=ErrMessage.char("无引用状态")) - value = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词")) + error_messages=ErrMessage.char(_("No reference status"))) + value = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word"))) + + +def valid_model_params_setting(model_id, model_params_setting): + if model_id is None or model_params_setting is None or len(model_params_setting.keys()) == 0: + return + model = QuerySet(Model).filter(id=model_id).first() + credential = get_model_credential(model.provider, model.model_type, model.model_name) + credential.get_model_params_setting_form(model.model_name).valid_form(model_params_setting) class DatasetSettingSerializer(serializers.Serializer): - top_n = serializers.FloatField(required=True, max_value=100, min_value=1, - error_messages=ErrMessage.float("引用分段数")) + top_n = serializers.FloatField(required=True, max_value=10000, min_value=1, + error_messages=ErrMessage.float(_("Reference segment number"))) similarity = serializers.FloatField(required=True, max_value=1, min_value=0, - error_messages=ErrMessage.float("相识度")) - max_paragraph_char_number = serializers.IntegerField(required=True, min_value=500, max_value=10000, - error_messages=ErrMessage.integer("最多引用字符数")) + error_messages=ErrMessage.float(_("Acquaintance"))) + max_paragraph_char_number = serializers.IntegerField(required=True, min_value=500, max_value=100000, + error_messages=ErrMessage.integer( + _("Maximum number of quoted characters"))) search_mode = serializers.CharField(required=True, validators=[ validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"), - message="类型只支持register|reset_password", code=500) - ], error_messages=ErrMessage.char("检索模式")) + message=_("The type only supports embedding|keywords|blend"), code=500) + ], error_messages=ErrMessage.char(_("Retrieval Mode"))) - no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base("未引用分段设置")) + no_references_setting = NoReferencesSetting(required=True, + error_messages=ErrMessage.base(_("Segment settings not referenced"))) class ModelSettingSerializer(serializers.Serializer): - prompt = serializers.CharField(required=True, max_length=2048, error_messages=ErrMessage.char("提示词")) + prompt = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char(_("Prompt word"))) + system = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char(_("Role prompts"))) + no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("No citation segmentation prompt"))) + reasoning_content_enable = serializers.BooleanField(required=False, + error_messages=ErrMessage.char(_("Thinking process switch"))) + reasoning_content_start = serializers.CharField(required=False, allow_null=True, default="", + allow_blank=True, max_length=256, + trim_whitespace=False, + error_messages=ErrMessage.char( + _("The thinking process begins to mark"))) + reasoning_content_end = serializers.CharField(required=False, allow_null=True, allow_blank=True, default="", + max_length=256, + trim_whitespace=False, + error_messages=ErrMessage.char(_("End of thinking process marker"))) + + +class ApplicationWorkflowSerializer(serializers.Serializer): + name = serializers.CharField(required=True, max_length=64, min_length=1, + error_messages=ErrMessage.char(_("Application Name"))) + desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, + max_length=256, min_length=1, + error_messages=ErrMessage.char(_("Application Description"))) + work_flow = serializers.DictField(required=False, error_messages=ErrMessage.dict(_("Workflow Objects"))) + prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char(_("Opening remarks"))) + + @staticmethod + def to_application_model(user_id: str, application: Dict): + language = get_language() + if application.get('work_flow') is not None: + default_workflow = application.get('work_flow') + else: + workflow_file_path = os.path.join(PROJECT_DIR, "apps", "application", 'flow', + f'default_workflow_{to_locale(language)}.json') + if not os.path.exists(workflow_file_path): + workflow_file_path = os.path.join(PROJECT_DIR, "apps", "application", 'flow', + f'default_workflow_zh.json') + default_workflow_json = get_file_content(workflow_file_path) + default_workflow = json.loads(default_workflow_json) + for node in default_workflow.get('nodes'): + if node.get('id') == 'base-node': + node.get('properties')['node_data']['desc'] = application.get('desc') + node.get('properties')['node_data']['name'] = application.get('name') + node.get('properties')['node_data']['prologue'] = application.get('prologue') + return Application(id=uuid.uuid1(), + name=application.get('name'), + desc=application.get('desc'), + prologue="", + dialogue_number=0, + user_id=user_id, model_id=None, + dataset_setting={}, + model_setting={}, + problem_optimization=False, + type=ApplicationTypeChoices.WORK_FLOW, + stt_model_enable=application.get('stt_model_enable', False), + stt_model_id=application.get('stt_model', None), + tts_model_id=application.get('tts_model', None), + tts_model_enable=application.get('tts_model_enable', False), + tts_model_params_setting=application.get('tts_model_params_setting', {}), + tts_type=application.get('tts_type', None), + file_upload_enable=application.get('file_upload_enable', False), + file_upload_setting=application.get('file_upload_setting', {}), + work_flow=default_workflow + ) + + +def get_base_node_work_flow(work_flow): + node_list = work_flow.get('nodes') + base_node_list = [node for node in node_list if node.get('id') == 'base-node'] + if len(base_node_list) > 0: + return base_node_list[-1] + return None class ApplicationSerializer(serializers.Serializer): - name = serializers.CharField(required=True, max_length=64, min_length=1, error_messages=ErrMessage.char("应用名称")) + name = serializers.CharField(required=True, max_length=64, min_length=1, + error_messages=ErrMessage.char(_("application name"))) desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=256, min_length=1, - error_messages=ErrMessage.char("应用描述")) + error_messages=ErrMessage.char(_("application describe"))) model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("模型")) - multiple_rounds_dialogue = serializers.BooleanField(required=True, error_messages=ErrMessage.char("多轮对话")) - prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=1024, - error_messages=ErrMessage.char("开场白")) + error_messages=ErrMessage.char(_("Model"))) + dialogue_number = serializers.IntegerField(required=True, + min_value=0, + max_value=1024, + error_messages=ErrMessage.integer(_("Historical chat records"))) + prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char(_("Opening remarks"))) dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True), - allow_null=True, error_messages=ErrMessage.list("关联知识库")) + allow_null=True, + error_messages=ErrMessage.list(_("Related Knowledge Base"))) # 数据集相关设置 dataset_setting = DatasetSettingSerializer(required=True) # 模型相关设置 model_setting = ModelSettingSerializer(required=True) # 问题补全 - problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全")) + problem_optimization = serializers.BooleanField(required=True, + error_messages=ErrMessage.boolean(_("Question completion"))) + problem_optimization_prompt = serializers.CharField(required=False, max_length=102400, + error_messages=ErrMessage.char(_("Question completion prompt"))) + # 应用类型 + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Application Type")), + validators=[ + validators.RegexValidator(regex=re.compile("^SIMPLE|WORK_FLOW$"), + message=_( + "Application type only supports SIMPLE|WORK_FLOW"), + code=500) + ] + ) + model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.dict(_('Model parameters'))) def is_valid(self, *, user_id=None, raise_exception=False): super().is_valid(raise_exception=True) @@ -130,11 +258,13 @@ def is_valid(self, *, user_id=None, raise_exception=False): 'dataset_id_list': self.data.get('dataset_id_list')}).is_valid() class Embed(serializers.Serializer): - host = serializers.CharField(required=True, error_messages=ErrMessage.char("主机")) - protocol = serializers.CharField(required=True, error_messages=ErrMessage.char("协议")) - token = serializers.CharField(required=True, error_messages=ErrMessage.char("token")) + host = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Host"))) + protocol = serializers.CharField(required=True, error_messages=ErrMessage.char(_("protocol"))) + token = serializers.CharField(required=True, error_messages=ErrMessage.char(_("token"))) - def get_embed(self, with_valid=True): + def get_embed(self, with_valid=True, params=None): + if params is None: + params = {} if with_valid: self.is_valid(raise_exception=True) index_path = os.path.join(PROJECT_DIR, 'apps', "application", 'template', 'embed.js') @@ -143,38 +273,92 @@ def get_embed(self, with_valid=True): file.close() application_access_token = QuerySet(ApplicationAccessToken).filter( access_token=self.data.get('token')).first() + is_draggable = 'false' + show_guide = 'true' + float_icon = f"{self.data.get('protocol')}://{self.data.get('host')}/ui/MaxKB.gif" + xpack_cache = DBModelManage.get_model('xpack_cache') + X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False) + # 获取接入的query参数 + query = self.get_query_api_input(application_access_token.application, params) + float_location = {"x": {"type": "right", "value": 0}, "y": {"type": "bottom", "value": 30}} + header_font_color = "rgb(100, 106, 115)" + application_setting_model = DBModelManage.get_model('application_setting') + if application_setting_model is not None and X_PACK_LICENSE_IS_VALID: + application_setting = QuerySet(application_setting_model).filter( + application_id=application_access_token.application_id).first() + if application_setting is not None: + is_draggable = 'true' if application_setting.draggable else 'false' + if application_setting.float_icon is not None and len(application_setting.float_icon) > 0: + float_icon = f"{self.data.get('protocol')}://{self.data.get('host')}{application_setting.float_icon}" + show_guide = 'true' if application_setting.show_guide else 'false' + if application_setting.float_location is not None: + float_location = application_setting.float_location + if application_setting.custom_theme is not None and len( + application_setting.custom_theme.get('header_font_color', 'rgb(100, 106, 115)')) > 0: + header_font_color = application_setting.custom_theme.get('header_font_color', + 'rgb(100, 106, 115)') is_auth = 'true' if application_access_token is not None and application_access_token.is_active else 'false' - application_access_token = QuerySet(ApplicationAccessToken).filter( - access_token=self.data.get('token')).first() t = Template(content) s = t.render( Context( {'is_auth': is_auth, 'protocol': self.data.get('protocol'), 'host': self.data.get('host'), 'token': self.data.get('token'), 'white_list_str': ",".join( - application_access_token.white_list), - 'white_active': 'true' if application_access_token.white_active else 'false'})) + application_access_token.white_list if application_access_token.white_list is not None else []), + 'white_active': 'true' if application_access_token.white_active else 'false', + 'is_draggable': is_draggable, + 'float_icon': float_icon, + 'query': query, + 'show_guide': show_guide, + 'x_type': float_location.get('x', {}).get('type', 'right'), + 'x_value': float_location.get('x', {}).get('value', 0), + 'y_type': float_location.get('y', {}).get('type', 'bottom'), + 'y_value': float_location.get('y', {}).get('value', 30), + 'max_kb_id': str(uuid.uuid1()).replace('-', ''), + 'header_font_color': header_font_color})) response = HttpResponse(s, status=200, headers={'Content-Type': 'text/javascript'}) return response + def get_query_api_input(self, application, params): + query = '' + if application.work_flow is not None: + work_flow = application.work_flow + if work_flow is not None: + for node in work_flow.get('nodes', []): + if node['id'] == 'base-node': + input_field_list = node.get('properties', {}).get('api_input_field_list', + node.get('properties', {}).get( + 'input_field_list', [])) + if input_field_list is not None: + for field in input_field_list: + if field['assignment_method'] == 'api_input' and field['variable'] in params: + query += f"&{field['variable']}={params[field['variable']]}" + if 'asker' in params: + query += f"&asker={params.get('asker')}" + return query + class AccessTokenSerializer(serializers.Serializer): - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.boolean("应用id")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.boolean(_("Application ID"))) class AccessTokenEditSerializer(serializers.Serializer): access_token_reset = serializers.BooleanField(required=False, - error_messages=ErrMessage.boolean("重置Token")) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("是否开启")) + error_messages=ErrMessage.boolean(_("Reset Token"))) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Is it enabled"))) access_num = serializers.IntegerField(required=False, max_value=10000, min_value=0, - error_messages=ErrMessage.integer("访问次数")) - white_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("是否开启白名单")) + error_messages=ErrMessage.integer(_("Number of visits"))) + white_active = serializers.BooleanField(required=False, + error_messages=ErrMessage.boolean(_("Whether to enable whitelist"))) white_list = serializers.ListSerializer(required=False, child=serializers.CharField(required=True, error_messages=ErrMessage.char( - "白名单")), - error_messages=ErrMessage.list("白名单列表")), + _("Whitelist"))), + error_messages=ErrMessage.list(_("Whitelist"))), show_source = serializers.BooleanField(required=False, - error_messages=ErrMessage.boolean("是否显示知识来源")) + error_messages=ErrMessage.boolean( + _("Whether to display knowledge sources"))) + language = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_("language"))) def edit(self, instance: Dict, with_valid=True): if with_valid: @@ -187,6 +371,7 @@ def edit(self, instance: Dict, with_valid=True): if 'is_active' in instance: application_access_token.is_active = instance.get("is_active") if 'access_token_reset' in instance and instance.get('access_token_reset'): + del_application_access_token(application_access_token.access_token) application_access_token.access_token = hashlib.md5(str(uuid.uuid1()).encode()).hexdigest()[8:24] if 'access_num' in instance and instance.get('access_num') is not None: application_access_token.access_num = instance.get("access_num") @@ -196,7 +381,27 @@ def edit(self, instance: Dict, with_valid=True): application_access_token.white_list = instance.get('white_list') if 'show_source' in instance and instance.get('show_source') is not None: application_access_token.show_source = instance.get('show_source') + if 'language' in instance and instance.get('language') is not None: + application_access_token.language = instance.get('language') + if 'language' not in instance or instance.get('language') is None: + application_access_token.language = None application_access_token.save() + application_setting_model = DBModelManage.get_model('application_setting') + xpack_cache = DBModelManage.get_model('xpack_cache') + X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get("XPACK_LICENSE_IS_VALID", False) + if application_setting_model is not None and X_PACK_LICENSE_IS_VALID: + application_setting, _ = application_setting_model.objects.get_or_create( + application_id=self.data.get('application_id')) + if application_setting is not None and instance.get('authentication') is not None and instance.get( + 'authentication_value') is not None: + application_setting.authentication = instance.get('authentication') + application_setting.authentication_value = { + "type": "password", + "value": instance.get('authentication_value') + } + application_setting.save() + + get_application_access_token(application_access_token.access_token, False) return self.one(with_valid=False) def one(self, with_valid=True): @@ -217,11 +422,14 @@ def one(self, with_valid=True): 'access_num': application_access_token.access_num, 'white_active': application_access_token.white_active, 'white_list': application_access_token.white_list, - 'show_source': application_access_token.show_source + 'show_source': application_access_token.show_source, + 'language': application_access_token.language } class Authentication(serializers.Serializer): - access_token = serializers.CharField(required=True, error_messages=ErrMessage.char("access_token")) + access_token = serializers.CharField(required=True, error_messages=ErrMessage.char(_("access_token"))) + authentication_value = serializers.JSONField(required=False, allow_null=True, + error_messages=ErrMessage.char(_("Certification Information"))) def auth(self, request, with_valid=True): token = request.META.get('HTTP_AUTHORIZATION') @@ -236,51 +444,102 @@ def auth(self, request, with_valid=True): self.is_valid(raise_exception=True) access_token = self.data.get("access_token") application_access_token = QuerySet(ApplicationAccessToken).filter(access_token=access_token).first() + authentication_value = self.data.get('authentication_value', None) + authentication = {} if application_access_token is not None and application_access_token.is_active: if token_details is not None and 'client_id' in token_details and token_details.get( 'client_id') is not None: client_id = token_details.get('client_id') + authentication = token_details.get('authentication', {}) else: client_id = str(uuid.uuid1()) + if authentication_value is not None: + # 认证用户token + self.auth_authentication_value(authentication_value, str(application_access_token.application_id)) + authentication = {'type': authentication_value.get('type'), + 'value': password_encrypt(authentication_value.get('value'))} token = signing.dumps({'application_id': str(application_access_token.application_id), 'user_id': str(application_access_token.application.user.id), 'access_token': application_access_token.access_token, 'type': AuthenticationType.APPLICATION_ACCESS_TOKEN.value, - 'client_id': client_id}) + 'client_id': client_id, + 'authentication': authentication}) return token else: - raise NotFound404(404, "无效的access_token") + raise NotFound404(404, _("Invalid access_token")) + + def auth_authentication_value(self, authentication_value, application_id): + application_setting_model = DBModelManage.get_model('application_setting') + xpack_cache = DBModelManage.get_model('xpack_cache') + X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False) + if application_setting_model is not None and X_PACK_LICENSE_IS_VALID: + application_setting = QuerySet(application_setting_model).filter(application_id=application_id).first() + if application_setting.authentication and authentication_value is not None: + if authentication_value.get('type') == 'password': + if not self.auth_password(authentication_value, application_setting.authentication_value): + raise AppApiException(1005, _("Wrong password")) + return True + + @staticmethod + def auth_password(source_authentication_value, authentication_value): + return source_authentication_value.get('value') == authentication_value.get('value') class Edit(serializers.Serializer): name = serializers.CharField(required=False, max_length=64, min_length=1, - error_messages=ErrMessage.char("应用名称")) + error_messages=ErrMessage.char(_("Application Name"))) desc = serializers.CharField(required=False, max_length=256, min_length=1, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("应用描述")) + error_messages=ErrMessage.char(_("Application Description"))) model_id = serializers.CharField(required=False, allow_blank=True, allow_null=True, - error_messages=ErrMessage.char("模型")) - multiple_rounds_dialogue = serializers.BooleanField(required=False, - error_messages=ErrMessage.boolean("多轮会话")) - prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=1024, - error_messages=ErrMessage.char("开场白")) + error_messages=ErrMessage.char(_("Model"))) + dialogue_number = serializers.IntegerField(required=False, + min_value=0, + max_value=1024, + error_messages=ErrMessage.integer(_("Historical chat records"))) + prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char(_("Opening remarks"))) dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True), - error_messages=ErrMessage.list("关联知识库") + error_messages=ErrMessage.list(_("Related Knowledge Base")) ) # 数据集相关设置 dataset_setting = DatasetSettingSerializer(required=False, allow_null=True, - error_messages=ErrMessage.json("数据集设置")) + error_messages=ErrMessage.json(_("Dataset settings"))) # 模型相关设置 model_setting = ModelSettingSerializer(required=False, allow_null=True, - error_messages=ErrMessage.json("模型设置")) + error_messages=ErrMessage.json(_("Model setup"))) # 问题补全 problem_optimization = serializers.BooleanField(required=False, allow_null=True, - error_messages=ErrMessage.boolean("问题补全")) - icon = serializers.CharField(required=False, allow_null=True, error_messages=ErrMessage.char("icon图标")) + error_messages=ErrMessage.boolean(_("Question completion"))) + icon = serializers.CharField(required=False, allow_null=True, error_messages=ErrMessage.char(_("Icon"))) + + model_params_setting = serializers.DictField(required=False, + error_messages=ErrMessage.dict(_('Model parameters'))) class Create(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + @valid_license(model=Application, count=5, + message=_( + 'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).')) @transaction.atomic def insert(self, application: Dict): + application_type = application.get('type') + if 'WORK_FLOW' == application_type: + return self.insert_workflow(application) + else: + return self.insert_simple(application) + + def insert_workflow(self, application: Dict): + self.is_valid(raise_exception=True) + user_id = self.data.get('user_id') + ApplicationWorkflowSerializer(data=application).is_valid(raise_exception=True) + application_model = ApplicationWorkflowSerializer.to_application_model(user_id, application) + application_model.save() + # 插入认证信息 + ApplicationAccessToken(application_id=application_model.id, + access_token=hashlib.md5(str(uuid.uuid1()).encode()).hexdigest()[8:24]).save() + return ApplicationSerializerModel(application_model).data + + def insert_simple(self, application: Dict): self.is_valid(raise_exception=True) user_id = self.data.get('user_id') ApplicationSerializer(data=application).is_valid(user_id=user_id, raise_exception=True) @@ -296,17 +555,29 @@ def insert(self, application: Dict): access_token=hashlib.md5(str(uuid.uuid1()).encode()).hexdigest()[8:24]).save() # 插入关联数据 QuerySet(ApplicationDatasetMapping).bulk_create(application_dataset_mapping_model_list) - return True + return ApplicationSerializerModel(application_model).data @staticmethod def to_application_model(user_id: str, application: Dict): return Application(id=uuid.uuid1(), name=application.get('name'), desc=application.get('desc'), prologue=application.get('prologue'), - dialogue_number=3 if application.get('multiple_rounds_dialogue') else 0, + dialogue_number=application.get('dialogue_number', 0), user_id=user_id, model_id=application.get('model_id'), dataset_setting=application.get('dataset_setting'), model_setting=application.get('model_setting'), - problem_optimization=application.get('problem_optimization') + problem_optimization=application.get('problem_optimization'), + type=ApplicationTypeChoices.SIMPLE, + model_params_setting=application.get('model_params_setting', {}), + problem_optimization_prompt=application.get('problem_optimization_prompt', None), + stt_model_enable=application.get('stt_model_enable', False), + stt_model_id=application.get('stt_model', None), + tts_model_id=application.get('tts_model', None), + tts_model_enable=application.get('tts_model_enable', False), + tts_model_params_setting=application.get('tts_model_params_setting', {}), + tts_type=application.get('tts_type', None), + file_upload_enable=application.get('file_upload_enable', False), + file_upload_setting=application.get('file_upload_setting', {}), + work_flow={} ) @staticmethod @@ -314,22 +585,22 @@ def to_application_dataset_mapping(application_id: str, dataset_id: str): return ApplicationDatasetMapping(id=uuid.uuid1(), application_id=application_id, dataset_id=dataset_id) class HitTest(serializers.Serializer): - id = serializers.CharField(required=True, error_messages=ErrMessage.uuid("应用id")) - user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid("用户id")) - query_text = serializers.CharField(required=True, error_messages=ErrMessage.char("查询文本")) - top_number = serializers.IntegerField(required=True, max_value=10, min_value=1, - error_messages=ErrMessage.integer("topN")) - similarity = serializers.FloatField(required=True, max_value=1, min_value=0, - error_messages=ErrMessage.float("相关度")) + id = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid(_("User ID"))) + query_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Query text"))) + top_number = serializers.IntegerField(required=True, max_value=10000, min_value=1, + error_messages=ErrMessage.integer(_("topN"))) + similarity = serializers.FloatField(required=True, max_value=2, min_value=0, + error_messages=ErrMessage.float(_("Relevance"))) search_mode = serializers.CharField(required=True, validators=[ validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"), - message="类型只支持register|reset_password", code=500) - ], error_messages=ErrMessage.char("检索模式")) + message=_("The type only supports embedding|keywords|blend"), code=500) + ], error_messages=ErrMessage.char(_("Retrieval Mode"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(Application).filter(id=self.data.get('id')).exists(): - raise AppApiException(500, '不存在的应用id') + raise AppApiException(500, _('Application id does not exist')) def hit_test(self): self.is_valid() @@ -337,39 +608,46 @@ def hit_test(self): dataset_id_list = [ad.dataset_id for ad in QuerySet(ApplicationDatasetMapping).filter( application_id=self.data.get('id'))] - + if len(dataset_id_list) == 0: + return [] exclude_document_id_list = [str(document.id) for document in QuerySet(Document).filter( dataset_id__in=dataset_id_list, is_active=False)] + model = get_embedding_model_by_dataset_id_list(dataset_id_list) # 向量库检索 hit_list = vector.hit_test(self.data.get('query_text'), dataset_id_list, exclude_document_id_list, self.data.get('top_number'), self.data.get('similarity'), SearchMode(self.data.get('search_mode')), - EmbeddingModel.get_embedding_model()) + model) hit_dict = reduce(lambda x, y: {**x, **y}, [{hit.get('paragraph_id'): hit} for hit in hit_list], {}) p_list = list_paragraph([h.get('paragraph_id') for h in hit_list]) return [{**p, 'similarity': hit_dict.get(p.get('id')).get('similarity'), 'comprehensive_score': hit_dict.get(p.get('id')).get('comprehensive_score')} for p in p_list] class Query(serializers.Serializer): - name = serializers.CharField(required=False, error_messages=ErrMessage.char("应用名称")) + name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Application Name"))) - desc = serializers.CharField(required=False, error_messages=ErrMessage.char("应用描述")) + desc = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Application Description"))) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + select_user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid(_("Select User ID"))) def get_query_set(self): user_id = self.data.get("user_id") query_set_dict = {} query_set = QuerySet(model=get_dynamics_model( {'temp_application.name': models.CharField(), 'temp_application.desc': models.CharField(), - 'temp_application.create_time': models.DateTimeField()})) + 'temp_application.create_time': models.DateTimeField(), + 'temp_application.user_id': models.CharField(), })) if "desc" in self.data and self.data.get('desc') is not None: query_set = query_set.filter(**{'temp_application.desc__icontains': self.data.get("desc")}) if "name" in self.data and self.data.get('name') is not None: query_set = query_set.filter(**{'temp_application.name__icontains': self.data.get("name")}) + if 'select_user_id' in self.data and self.data.get('select_user_id') is not None and self.data.get( + 'select_user_id') != 'all': + query_set = query_set.filter(**{'temp_application.user_id__exact': self.data.get('select_user_id')}) query_set = query_set.order_by("-temp_application.create_time") query_set_dict['default_sql'] = query_set @@ -382,8 +660,7 @@ def get_query_set(self): query_set_dict['team_member_permission_custom_sql'] = QuerySet(model=get_dynamics_model( {'user_id': models.CharField(), 'team_member_permission.auth_target_type': models.CharField(), - 'team_member_permission.operate': ArrayField(verbose_name="权限操作列表", - base_field=models.CharField(max_length=256, + 'team_member_permission.operate': ArrayField(base_field=models.CharField(max_length=256, blank=True, choices=AuthOperate.choices, default=AuthOperate.USE) @@ -403,7 +680,7 @@ def list(self, with_valid=True): @staticmethod def reset_application(application: Dict): application['multiple_rounds_dialogue'] = True if application.get('dialogue_number') > 0 else False - del application['dialogue_number'] + if 'dataset_setting' in application: application['dataset_setting'] = {'search_mode': 'embedding', 'no_references_setting': { 'status': 'ai_questioning', @@ -420,49 +697,210 @@ def page(self, current_page: int, page_size: int, with_valid=True): class ApplicationModel(serializers.ModelSerializer): class Meta: model = Application - fields = ['id', 'name', 'desc', 'prologue', 'dialogue_number', 'icon'] + fields = ['id', 'name', 'desc', 'prologue', 'dialogue_number', 'icon', 'type'] class IconOperate(serializers.Serializer): - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) - image = UploadedImageField(required=True, error_messages=ErrMessage.image("图片")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + image = UploadedImageField(required=True, error_messages=ErrMessage.image(_("picture"))) def edit(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) application = QuerySet(Application).filter(id=self.data.get('application_id')).first() if application is None: - raise AppApiException(500, '不存在的应用id') + raise AppApiException(500, _('Application id does not exist')) image_id = uuid.uuid1() image = Image(id=image_id, image=self.data.get('image').read(), image_name=self.data.get('image').name) image.save() application.icon = f'/api/image/{image_id}' application.save() + application_access_token = QuerySet(ApplicationAccessToken).filter( + application_id=self.data.get('application_id')).first() + get_application_access_token(application_access_token.access_token, False) return {**ApplicationSerializer.Query.reset_application(ApplicationSerializerModel(application).data)} + class Import(serializers.Serializer): + file = UploadedFileField(required=True, error_messages=ErrMessage.image(_("file"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + + @valid_license(model=Application, count=5, + message=_( + 'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).')) + @transaction.atomic + def import_(self, with_valid=True): + if with_valid: + self.is_valid() + user_id = self.data.get('user_id') + mk_instance_bytes = self.data.get('file').read() + try: + mk_instance = restricted_loads(mk_instance_bytes) + except Exception as e: + raise AppApiException(1001, _("Unsupported file format")) + application = mk_instance.application + function_lib_list = mk_instance.function_lib_list + if len(function_lib_list) > 0: + function_lib_id_list = [function_lib.get('id') for function_lib in function_lib_list] + exits_function_lib_id_list = [str(function_lib.id) for function_lib in + QuerySet(FunctionLib).filter(id__in=function_lib_id_list)] + # 获取到需要插入的函数 + function_lib_list = [function_lib for function_lib in function_lib_list if + not exits_function_lib_id_list.__contains__(function_lib.get('id'))] + application_model = self.to_application(application, user_id) + function_lib_model_list = [self.to_function_lib(f, user_id) for f in function_lib_list] + application_model.save() + # 插入认证信息 + ApplicationAccessToken(application_id=application_model.id, + access_token=hashlib.md5(str(uuid.uuid1()).encode()).hexdigest()[8:24]).save() + QuerySet(FunctionLib).bulk_create(function_lib_model_list) if len(function_lib_model_list) > 0 else None + return True + + @staticmethod + def to_application(application, user_id): + work_flow = application.get('work_flow') + for node in work_flow.get('nodes', []): + if node.get('type') == 'search-dataset-node': + node.get('properties', {}).get('node_data', {})['dataset_id_list'] = [] + return Application(id=uuid.uuid1(), user_id=user_id, name=application.get('name'), + desc=application.get('desc'), + prologue=application.get('prologue'), dialogue_number=application.get('dialogue_number'), + dataset_setting=application.get('dataset_setting'), + model_setting=application.get('model_setting'), + model_params_setting=application.get('model_params_setting'), + tts_model_params_setting=application.get('tts_model_params_setting'), + problem_optimization=application.get('problem_optimization'), + icon="/ui/favicon.ico", + work_flow=work_flow, + type=application.get('type'), + problem_optimization_prompt=application.get('problem_optimization_prompt'), + tts_model_enable=application.get('tts_model_enable'), + stt_model_enable=application.get('stt_model_enable'), + tts_type=application.get('tts_type'), + clean_time=application.get('clean_time'), + file_upload_enable=application.get('file_upload_enable'), + file_upload_setting=application.get('file_upload_setting'), + ) + + @staticmethod + def to_function_lib(function_lib, user_id): + """ + + @param user_id: 用户id + @param function_lib: 函数库 + @return: + """ + return FunctionLib(id=function_lib.get('id'), user_id=user_id, name=function_lib.get('name'), + code=function_lib.get('code'), input_field_list=function_lib.get('input_field_list'), + is_active=function_lib.get('is_active'), + permission_type=PermissionType.PRIVATE) + class Operate(serializers.Serializer): - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(Application).filter(id=self.data.get('application_id')).exists(): - raise AppApiException(500, '不存在的应用id') + raise AppApiException(500, _('Application id does not exist')) - def list_model(self, with_valid=True): + def list_model(self, model_type=None, with_valid=True): if with_valid: self.is_valid() + if model_type is None: + model_type = "LLM" application = QuerySet(Application).filter(id=self.data.get("application_id")).first() return ModelSerializer.Query( - data={'user_id': application.user_id}).list( + data={'user_id': application.user_id, 'model_type': model_type}).list( with_valid=True) + def list_function_lib(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + application = QuerySet(Application).filter(id=self.data.get("application_id")).first() + return FunctionLibSerializer.Query( + data={'user_id': application.user_id, 'is_active': True, + 'function_type': FunctionType.PUBLIC} + ).list(with_valid=True) + + def get_function_lib(self, function_lib_id, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + application = QuerySet(Application).filter(id=self.data.get("application_id")).first() + return FunctionLibSerializer.Operate(data={'user_id': application.user_id, 'id': function_lib_id}).one( + with_valid=True) + + def get_model_params_form(self, model_id, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + application = QuerySet(Application).filter(id=self.data.get("application_id")).first() + return ModelSerializer.ModelParams( + data={'user_id': application.user_id, 'id': model_id}).get_model_params(with_valid=True) + def delete(self, with_valid=True): if with_valid: self.is_valid() QuerySet(Application).filter(id=self.data.get('application_id')).delete() return True + def export(self, with_valid=True): + try: + if with_valid: + self.is_valid() + application_id = self.data.get('application_id') + application = QuerySet(Application).filter(id=application_id).first() + function_lib_id_list = [node.get('properties', {}).get('node_data', {}).get('function_lib_id') for node + in + application.work_flow.get('nodes', []) if + node.get('type') == 'function-lib-node'] + function_lib_list = [] + if len(function_lib_id_list) > 0: + function_lib_list = QuerySet(FunctionLib).filter(id__in=function_lib_id_list) + application_dict = ApplicationSerializerModel(application).data + + mk_instance = MKInstance(application_dict, + [FunctionLibModelSerializer(function_lib).data for function_lib in + function_lib_list], 'v1') + application_pickle = pickle.dumps(mk_instance) + response = HttpResponse(content_type='text/plain', content=application_pickle) + response['Content-Disposition'] = f'attachment; filename="{application.name}.mk"' + return response + except Exception as e: + return result.error(str(e), response_status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + @transaction.atomic + def publish(self, instance, with_valid=True): + if with_valid: + self.is_valid() + user_id = self.data.get('user_id') + user = QuerySet(User).filter(id=user_id).first() + application = QuerySet(Application).filter(id=self.data.get("application_id")).first() + work_flow = instance.get('work_flow') + if work_flow is None: + raise AppApiException(500, _("work_flow is a required field")) + Flow.new_instance(work_flow).is_valid() + base_node = get_base_node_work_flow(work_flow) + if base_node is not None: + node_data = base_node.get('properties').get('node_data') + if node_data is not None: + application.name = node_data.get('name') + application.desc = node_data.get('desc') + application.prologue = node_data.get('prologue') + dataset_list = self.list_dataset(with_valid=False) + application_dataset_id_list = [str(dataset.get('id')) for dataset in dataset_list] + dataset_id_list = self.update_reverse_search_node(work_flow, application_dataset_id_list) + application.work_flow = work_flow + application.save() + # 插入知识库关联关系 + self.save_application_mapping(application_dataset_id_list, dataset_id_list, application.id) + chat_cache.clear_by_application_id(str(application.id)) + work_flow_version = WorkFlowVersion(work_flow=work_flow, application=application, + name=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), + publish_user_id=user_id, + publish_user_name=user.username) + chat_cache.clear_by_application_id(str(application.id)) + work_flow_version.save() + return True + def one(self, with_valid=True): if with_valid: self.is_valid() @@ -474,9 +912,45 @@ def one(self, with_valid=True): dataset_id_list = [d.get('id') for d in list(filter(lambda row: mapping_dataset_id_list.__contains__(row.get('id')), dataset_list))] + self.update_search_node(application.work_flow, [str(dataset.get('id')) for dataset in dataset_list]) return {**ApplicationSerializer.Query.reset_application(ApplicationSerializerModel(application).data), 'dataset_id_list': dataset_id_list} + def get_search_node(self, work_flow): + if work_flow is None: + return [] + return [node for node in work_flow.get('nodes', []) if node.get('type', '') == 'search-dataset-node'] + + def update_search_node(self, work_flow, user_dataset_id_list: List): + search_node_list = self.get_search_node(work_flow) + for search_node in search_node_list: + node_data = search_node.get('properties', {}).get('node_data', {}) + dataset_id_list = node_data.get('dataset_id_list', []) + node_data['source_dataset_id_list'] = dataset_id_list + node_data['dataset_id_list'] = [dataset_id for dataset_id in dataset_id_list if + user_dataset_id_list.__contains__(dataset_id)] + + def update_reverse_search_node(self, work_flow, user_dataset_id_list: List): + search_node_list = self.get_search_node(work_flow) + result_dataset_id_list = [] + for search_node in search_node_list: + node_data = search_node.get('properties', {}).get('node_data', {}) + dataset_id_list = node_data.get('dataset_id_list', []) + for dataset_id in dataset_id_list: + if not user_dataset_id_list.__contains__(dataset_id): + message = lazy_format(_('Unknown knowledge base id {dataset_id}, unable to associate'), + dataset_id=dataset_id) + raise AppApiException(500, message) + + source_dataset_id_list = node_data.get('source_dataset_id_list', []) + source_dataset_id_list = [source_dataset_id for source_dataset_id in source_dataset_id_list if + not user_dataset_id_list.__contains__(source_dataset_id)] + source_dataset_id_list = list({*source_dataset_id_list, *dataset_id_list}) + node_data['source_dataset_id_list'] = [] + node_data['dataset_id_list'] = source_dataset_id_list + result_dataset_id_list = [*source_dataset_id_list, *result_dataset_id_list] + return list(set(result_dataset_id_list)) + def profile(self, with_valid=True): if with_valid: self.is_valid() @@ -484,11 +958,67 @@ def profile(self, with_valid=True): application = QuerySet(Application).get(id=application_id) application_access_token = QuerySet(ApplicationAccessToken).filter(application_id=application.id).first() if application_access_token is None: - raise AppUnauthorizedFailed(500, "非法用户") + raise AppUnauthorizedFailed(500, _("Illegal User")) + application_setting_model = DBModelManage.get_model('application_setting') + if application.type == ApplicationTypeChoices.WORK_FLOW: + work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application.id).order_by( + '-create_time')[0:1].first() + if work_flow_version is not None: + application.work_flow = work_flow_version.work_flow + + xpack_cache = DBModelManage.get_model('xpack_cache') + X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False) + application_setting_dict = {} + if application_setting_model is not None and X_PACK_LICENSE_IS_VALID: + application_setting = QuerySet(application_setting_model).filter( + application_id=application_access_token.application_id).first() + if application_setting is not None: + custom_theme = getattr(application_setting, 'custom_theme', {}) + float_location = getattr(application_setting, 'float_location', {}) + if not custom_theme: + application_setting.custom_theme = { + 'theme_color': '', + 'header_font_color': '' + } + if not float_location: + application_setting.float_location = { + 'x': {'type': '', 'value': ''}, + 'y': {'type': '', 'value': ''} + } + application_setting_dict = {'show_source': application_access_token.show_source, + 'show_history': application_setting.show_history, + 'draggable': application_setting.draggable, + 'show_guide': application_setting.show_guide, + 'avatar': application_setting.avatar, + 'show_avatar': application_setting.show_avatar, + 'float_icon': application_setting.float_icon, + 'authentication': application_setting.authentication, + 'authentication_type': application_setting.authentication_value.get( + 'type', 'password'), + 'disclaimer': application_setting.disclaimer, + 'disclaimer_value': application_setting.disclaimer_value, + 'custom_theme': application_setting.custom_theme, + 'user_avatar': application_setting.user_avatar, + 'show_user_avatar': application_setting.show_user_avatar, + 'float_location': application_setting.float_location} return ApplicationSerializer.Query.reset_application( {**ApplicationSerializer.ApplicationModel(application).data, - 'show_source': application_access_token.show_source}) + 'stt_model_id': application.stt_model_id, + 'tts_model_id': application.tts_model_id, + 'stt_model_enable': application.stt_model_enable, + 'tts_model_enable': application.tts_model_enable, + 'tts_type': application.tts_type, + 'tts_autoplay': application.tts_autoplay, + 'stt_autosend': application.stt_autosend, + 'file_upload_enable': application.file_upload_enable, + 'file_upload_setting': application.file_upload_setting, + 'work_flow': {'nodes': [node for node in ((application.work_flow or {}).get('nodes', []) or []) if + node.get('id') == 'base-node']}, + 'show_source': application_access_token.show_source, + 'language': application_access_token.language, + **application_setting_dict}) + @transaction.atomic def edit(self, instance: Dict, with_valid=True): if with_valid: self.is_valid() @@ -501,19 +1031,50 @@ def edit(self, instance: Dict, with_valid=True): application.model_id = None else: model = QuerySet(Model).filter( - id=instance.get('model_id'), - user_id=application.user_id).first() + id=instance.get('model_id')).first() + if model is None: + raise AppApiException(500, _("Model does not exist")) + if not model.is_permission(application.user_id): + message = lazy_format(_('No permission to use this model:{model_name}'), model_name=model.name) + raise AppApiException(500, message) + if instance.get('stt_model_id') is None or len(instance.get('stt_model_id')) == 0: + application.stt_model_id = None + else: + model = QuerySet(Model).filter( + id=instance.get('stt_model_id')).first() + if model is None: + raise AppApiException(500, _("Model does not exist")) + if not model.is_permission(application.user_id): + message = lazy_format(_('No permission to use this model:{model_name}'), model_name=model.name) + raise AppApiException(500, message) + if instance.get('tts_model_id') is None or len(instance.get('tts_model_id')) == 0: + application.tts_model_id = None + else: + model = QuerySet(Model).filter( + id=instance.get('tts_model_id')).first() if model is None: - raise AppApiException(500, "模型不存在") + raise AppApiException(500, _("Model does not exist")) + if not model.is_permission(application.user_id): + message = lazy_format(_('No permission to use this model:{model_name}'), model_name=model.name) + raise AppApiException(500, message) + if 'work_flow' in instance: + # 当前用户可修改关联的知识库列表 + application_dataset_id_list = [str(dataset_dict.get('id')) for dataset_dict in + self.list_dataset(with_valid=False)] + self.update_reverse_search_node(instance.get('work_flow'), application_dataset_id_list) + # 找到语音配置相关 + self.get_work_flow_model(instance) + update_keys = ['name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'prologue', 'status', - 'dataset_setting', 'model_setting', 'problem_optimization', - 'api_key_is_active', 'icon'] + 'dataset_setting', 'model_setting', 'problem_optimization', 'dialogue_number', + 'stt_model_id', 'tts_model_id', 'tts_model_enable', 'stt_model_enable', 'tts_type', + 'tts_autoplay', 'stt_autosend', 'file_upload_enable', 'file_upload_setting', + 'api_key_is_active', 'icon', 'work_flow', 'model_params_setting', 'tts_model_params_setting', + 'problem_optimization_prompt', 'clean_time'] for update_key in update_keys: if update_key in instance and instance.get(update_key) is not None: - if update_key == 'multiple_rounds_dialogue': - application.__setattr__('dialogue_number', 0 if not instance.get(update_key) else 3) - else: - application.__setattr__(update_key, instance.get(update_key)) + application.__setattr__(update_key, instance.get(update_key)) + print(application.name) application.save() if 'dataset_id_list' in instance: @@ -523,17 +1084,31 @@ def edit(self, instance: Dict, with_valid=True): self.list_dataset(with_valid=False)] for dataset_id in dataset_id_list: if not application_dataset_id_list.__contains__(dataset_id): - raise AppApiException(500, f"未知的知识库id${dataset_id},无法关联") - - # 删除已经关联的id - QuerySet(ApplicationDatasetMapping).filter(dataset_id__in=application_dataset_id_list, - application_id=application_id).delete() - # 插入 - QuerySet(ApplicationDatasetMapping).bulk_create( - [ApplicationDatasetMapping(application_id=application_id, dataset_id=dataset_id) for dataset_id in - dataset_id_list]) if len(dataset_id_list) > 0 else None + message = lazy_format(_('Unknown knowledge base id {dataset_id}, unable to associate'), + dataset_id=dataset_id) + raise AppApiException(500, message) + + self.save_application_mapping(application_dataset_id_list, dataset_id_list, application_id) + if application.type == ApplicationTypeChoices.SIMPLE: + chat_cache.clear_by_application_id(application_id) + application_access_token = QuerySet(ApplicationAccessToken).filter(application_id=application_id).first() + # 更新缓存数据 + print(application.name) + get_application_access_token(application_access_token.access_token, False) return self.one(with_valid=False) + @staticmethod + def save_application_mapping(application_dataset_id_list, dataset_id_list, application_id): + # 需要排除已删除的数据集 + dataset_id_list = [dataset.id for dataset in QuerySet(DataSet).filter(id__in=dataset_id_list)] + # 删除已经关联的id + QuerySet(ApplicationDatasetMapping).filter(dataset_id__in=application_dataset_id_list, + application_id=application_id).delete() + # 插入 + QuerySet(ApplicationDatasetMapping).bulk_create( + [ApplicationDatasetMapping(application_id=application_id, dataset_id=dataset_id) for dataset_id in + dataset_id_list]) if len(dataset_id_list) > 0 else None + def list_dataset(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) @@ -543,22 +1118,139 @@ def list_dataset(self, with_valid=True): [self.data.get('user_id') if self.data.get('user_id') == str(application.user_id) else None, application.user_id, self.data.get('user_id')]) + @staticmethod + def get_work_flow_model(instance): + if 'nodes' not in instance.get('work_flow'): + return + nodes = instance.get('work_flow')['nodes'] + for node in nodes: + if node['id'] == 'base-node': + node_data = node['properties']['node_data'] + if 'stt_model_id' in node_data: + instance['stt_model_id'] = node_data['stt_model_id'] + if 'tts_model_id' in node_data: + instance['tts_model_id'] = node_data['tts_model_id'] + if 'stt_model_enable' in node_data: + instance['stt_model_enable'] = node_data['stt_model_enable'] + if 'tts_model_enable' in node_data: + instance['tts_model_enable'] = node_data['tts_model_enable'] + if 'tts_type' in node_data: + instance['tts_type'] = node_data['tts_type'] + if 'tts_autoplay' in node_data: + instance['tts_autoplay'] = node_data['tts_autoplay'] + if 'stt_autosend' in node_data: + instance['stt_autosend'] = node_data['stt_autosend'] + if 'tts_model_params_setting' in node_data: + instance['tts_model_params_setting'] = node_data['tts_model_params_setting'] + if 'file_upload_enable' in node_data: + instance['file_upload_enable'] = node_data['file_upload_enable'] + if 'file_upload_setting' in node_data: + instance['file_upload_setting'] = node_data['file_upload_setting'] + if 'name' in node_data: + instance['name'] = node_data['name'] + break + + def speech_to_text(self, file, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + application_id = self.data.get('application_id') + application = QuerySet(Application).filter(id=application_id).first() + if application.stt_model_enable: + model = get_model_instance_by_model_user_id(application.stt_model_id, application.user_id) + text = model.speech_to_text(file) + return text + + def text_to_speech(self, text, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + application_id = self.data.get('application_id') + application = QuerySet(Application).filter(id=application_id).first() + if application.tts_model_enable: + model = get_model_instance_by_model_user_id(application.tts_model_id, application.user_id, + **application.tts_model_params_setting) + + return model.text_to_speech(text) + + def play_demo_text(self, form_data, with_valid=True): + text = '你好,这里是语音播放测试' + if with_valid: + self.is_valid(raise_exception=True) + application_id = self.data.get('application_id') + application = QuerySet(Application).filter(id=application_id).first() + if 'tts_model_id' in form_data: + tts_model_id = form_data.pop('tts_model_id') + model = get_model_instance_by_model_user_id(tts_model_id, application.user_id, **form_data) + return model.text_to_speech(text) + + def application_list(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + user_id = self.data.get('user_id') + application_id = self.data.get('application_id') + application = QuerySet(Application).get(id=application_id) + + application_user_id = user_id if user_id == str(application.user_id) else None + + if application_user_id is not None: + all_applications = Application.objects.filter(user_id=application_user_id).exclude(id=application_id) + else: + all_applications = Application.objects.none() + + # 获取团队共享的应用 + shared_applications = Application.objects.filter( + id__in=TeamMemberPermission.objects.filter( + auth_target_type='APPLICATION', + operate__contains=RawSQL("ARRAY['USE']", []), + member_id__team_id=application.user_id, + member_id__user_id=user_id + ).values('target') + ) + all_applications = all_applications.union(shared_applications) + + # 把应用的type为WORK_FLOW的应用放到最上面 然后再按名称排序 + serialized_data = ApplicationSerializerModel(all_applications, many=True).data + application = sorted(serialized_data, key=lambda x: (x['type'] != 'WORK_FLOW', x['name'])) + return list(application) + + def get_application(self, app_id, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + if with_valid: + self.is_valid() + embed_application = QuerySet(Application).filter(id=app_id).first() + if embed_application is None: + raise AppApiException(500, _('Application does not exist')) + if embed_application.type == ApplicationTypeChoices.WORK_FLOW: + work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=embed_application.id).order_by( + '-create_time')[0:1].first() + if work_flow_version is not None: + embed_application.work_flow = work_flow_version.work_flow + dataset_list = self.list_dataset(with_valid=False) + mapping_dataset_id_list = [adm.dataset_id for adm in + QuerySet(ApplicationDatasetMapping).filter(application_id=app_id)] + dataset_id_list = [d.get('id') for d in + list(filter(lambda row: mapping_dataset_id_list.__contains__(row.get('id')), + dataset_list))] + self.update_search_node(embed_application.work_flow, [str(dataset.get('id')) for dataset in dataset_list]) + return {**ApplicationSerializer.Query.reset_application(ApplicationSerializerModel(embed_application).data), + 'dataset_id_list': dataset_id_list} + class ApplicationKeySerializerModel(serializers.ModelSerializer): class Meta: model = ApplicationApiKey fields = "__all__" class ApplicationKeySerializer(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) application_id = self.data.get("application_id") application = QuerySet(Application).filter(id=application_id).first() if application is None: - raise AppApiException(1001, "应用不存在") + raise AppApiException(1001, _("Application does not exist")) def generate(self, with_valid=True): if with_valid: @@ -580,29 +1272,32 @@ def list(self, with_valid=True): QuerySet(ApplicationApiKey).filter(application_id=application_id)] class Edit(serializers.Serializer): - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("是否可用")) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Availability"))) allow_cross_domain = serializers.BooleanField(required=False, - error_messages=ErrMessage.boolean("是否允许跨域")) + error_messages=ErrMessage.boolean( + _("Is cross-domain allowed"))) cross_domain_list = serializers.ListSerializer(required=False, child=serializers.CharField(required=True, error_messages=ErrMessage.char( - "跨域列表")), - error_messages=ErrMessage.char("跨域地址")) + _("Cross-domain address"))), + error_messages=ErrMessage.char(_("Cross-domain list"))) class Operate(serializers.Serializer): - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) - api_key_id = serializers.CharField(required=True, error_messages=ErrMessage.char("ApiKeyid")) + api_key_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("ApiKeyid"))) def delete(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) api_key_id = self.data.get("api_key_id") application_id = self.data.get('application_id') - QuerySet(ApplicationApiKey).filter(id=api_key_id, - application_id=application_id).delete() + application_api_key = QuerySet(ApplicationApiKey).filter(id=api_key_id, + application_id=application_id).first() + del_application_api_key(application_api_key.secret_key) + application_api_key.delete() def edit(self, instance, with_valid=True): if with_valid: @@ -613,7 +1308,7 @@ def edit(self, instance, with_valid=True): application_api_key = QuerySet(ApplicationApiKey).filter(id=api_key_id, application_id=application_id).first() if application_api_key is None: - raise AppApiException(500, '不存在') + raise AppApiException(500, _('APIKey does not exist')) if 'is_active' in instance and instance.get('is_active') is not None: application_api_key.is_active = instance.get('is_active') if 'allow_cross_domain' in instance and instance.get('allow_cross_domain') is not None: @@ -621,3 +1316,31 @@ def edit(self, instance, with_valid=True): if 'cross_domain_list' in instance and instance.get('cross_domain_list') is not None: application_api_key.cross_domain_list = instance.get('cross_domain_list') application_api_key.save() + # 写入缓存 + get_application_api_key(application_api_key.secret_key, False) + + class McpServers(serializers.Serializer): + mcp_servers = serializers.JSONField(required=True) + + def get_mcp_servers(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + if '"stdio"' in self.data.get('mcp_servers'): + raise AppApiException(500, _('stdio is not supported')) + servers = json.loads(self.data.get('mcp_servers')) + + async def get_mcp_tools(servers): + async with MultiServerMCPClient(servers) as client: + return client.get_tools() + + tools = [] + for server in servers: + tools += [ + { + 'server': server, + 'name': tool.name, + 'description': tool.description, + 'args_schema': tool.args_schema, + } + for tool in asyncio.run(get_mcp_tools({server: servers[server]}))] + return tools diff --git a/apps/application/serializers/application_statistics_serializers.py b/apps/application/serializers/application_statistics_serializers.py index e958cb34dba..d60f3fe7910 100644 --- a/apps/application/serializers/application_statistics_serializers.py +++ b/apps/application/serializers/application_statistics_serializers.py @@ -19,12 +19,13 @@ from common.util.field_message import ErrMessage from common.util.file_util import get_file_content from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ class ApplicationStatisticsSerializer(serializers.Serializer): - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("应用id")) - start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date("开始时间")) - end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date("结束时间")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_("Application ID"))) + start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("Start time"))) + end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("End time"))) def get_end_time(self): return datetime.datetime.combine( diff --git a/apps/application/serializers/application_version_serializers.py b/apps/application/serializers/application_version_serializers.py new file mode 100644 index 00000000000..6b8df0e8493 --- /dev/null +++ b/apps/application/serializers/application_version_serializers.py @@ -0,0 +1,86 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: application_version_serializers.py + @date:2024/10/15 16:42 + @desc: +""" +from typing import Dict + +from django.db.models import QuerySet +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.models import WorkFlowVersion +from common.db.search import page_search +from common.exception.app_exception import AppApiException +from common.util.field_message import ErrMessage + + +class ApplicationVersionModelSerializer(serializers.ModelSerializer): + class Meta: + model = WorkFlowVersion + fields = ['id', 'name', 'application_id', 'work_flow', 'publish_user_id', 'publish_user_name', 'create_time', + 'update_time'] + + +class ApplicationVersionEditSerializer(serializers.Serializer): + name = serializers.CharField(required=False, max_length=128, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("Version Name"))) + + +class ApplicationVersionSerializer(serializers.Serializer): + class Query(serializers.Serializer): + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_("Application ID"))) + name = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("summary"))) + + def get_query_set(self): + query_set = QuerySet(WorkFlowVersion).filter(application_id=self.data.get('application_id')) + if 'name' in self.data and self.data.get('name') is not None: + query_set = query_set.filter(name__contains=self.data.get('name')) + return query_set.order_by("-create_time") + + def list(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + query_set = self.get_query_set() + return [ApplicationVersionModelSerializer(v).data for v in query_set] + + def page(self, current_page, page_size, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + return page_search(current_page, page_size, + self.get_query_set(), + post_records_handler=lambda v: ApplicationVersionModelSerializer(v).data) + + class Operate(serializers.Serializer): + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_("Application ID"))) + work_flow_version_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_("Workflow version id"))) + + def one(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=self.data.get('application_id'), + id=self.data.get('work_flow_version_id')).first() + if work_flow_version is not None: + return ApplicationVersionModelSerializer(work_flow_version).data + else: + raise AppApiException(500, _('Workflow version does not exist')) + + def edit(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + ApplicationVersionEditSerializer(data=instance).is_valid(raise_exception=True) + work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=self.data.get('application_id'), + id=self.data.get('work_flow_version_id')).first() + if work_flow_version is not None: + name = instance.get('name', None) + if name is not None and len(name) > 0: + work_flow_version.name = name + work_flow_version.save() + return ApplicationVersionModelSerializer(work_flow_version).data + else: + raise AppApiException(500, _('Workflow version does not exist')) diff --git a/apps/application/serializers/chat_message_serializers.py b/apps/application/serializers/chat_message_serializers.py index f8c80a865c9..2ce5273f73d 100644 --- a/apps/application/serializers/chat_message_serializers.py +++ b/apps/application/serializers/chat_message_serializers.py @@ -6,13 +6,13 @@ @date:2023/11/14 13:51 @desc: """ -import json -from typing import List +import uuid +from datetime import datetime +from typing import List, Dict from uuid import UUID from django.core.cache import caches from django.db.models import QuerySet -from langchain.chat_models.base import BaseChatModel from rest_framework import serializers from application.chat_pipeline.pipeline_manage import PipelineManage @@ -22,44 +22,67 @@ BaseGenerateHumanMessageStep from application.chat_pipeline.step.reset_problem_step.impl.base_reset_problem_step import BaseResetProblemStep from application.chat_pipeline.step.search_dataset_step.impl.base_search_dataset_step import BaseSearchDatasetStep -from application.models import ChatRecord, Chat, Application, ApplicationDatasetMapping +from application.flow.common import Answer +from application.flow.i_step_node import WorkFlowPostHandler +from application.flow.workflow_manage import WorkflowManage, Flow +from application.models import ChatRecord, Chat, Application, ApplicationDatasetMapping, ApplicationTypeChoices, \ + WorkFlowVersion from application.models.api_key_model import ApplicationPublicAccessClient, ApplicationAccessToken from common.constants.authentication_type import AuthenticationType -from common.exception.app_exception import AppApiException, AppChatNumOutOfBoundsFailed +from common.exception.app_exception import AppChatNumOutOfBoundsFailed, ChatException +from common.handle.base_to_response import BaseToResponse +from common.handle.impl.response.openai_to_response import OpenaiToResponse +from common.handle.impl.response.system_to_response import SystemToResponse from common.util.field_message import ErrMessage -from common.util.rsa_util import rsa_long_decrypt from common.util.split_model import flat_map from dataset.models import Paragraph, Document from setting.models import Model, Status -from setting.models_provider.constants.model_provider_constants import ModelProvideConstants +from setting.models_provider import get_model_credential +from django.utils.translation import gettext_lazy as _ -chat_cache = caches['model_cache'] +chat_cache = caches['chat_cache'] class ChatInfo: def __init__(self, chat_id: str, - chat_model: BaseChatModel, dataset_id_list: List[str], exclude_document_id_list: list[str], - application: Application): + application: Application, + work_flow_version: WorkFlowVersion = None): """ :param chat_id: 对话id - :param chat_model: 对话模型 :param dataset_id_list: 数据集列表 :param exclude_document_id_list: 排除的文档 :param application: 应用信息 """ self.chat_id = chat_id self.application = application - self.chat_model = chat_model self.dataset_id_list = dataset_id_list self.exclude_document_id_list = exclude_document_id_list self.chat_record_list: List[ChatRecord] = [] + self.work_flow_version = work_flow_version + + @staticmethod + def get_no_references_setting(dataset_setting, model_setting): + no_references_setting = dataset_setting.get( + 'no_references_setting', { + 'status': 'ai_questioning', + 'value': '{question}'}) + if no_references_setting.get('status') == 'ai_questioning': + no_references_prompt = model_setting.get('no_references_prompt', '{question}') + no_references_setting['value'] = no_references_prompt if len(no_references_prompt) > 0 else "{question}" + return no_references_setting def to_base_pipeline_manage_params(self): dataset_setting = self.application.dataset_setting model_setting = self.application.model_setting + model_id = self.application.model.id if self.application.model is not None else None + model_params_setting = None + if model_id is not None: + model = QuerySet(Model).filter(id=model_id).first() + credential = get_model_credential(model.provider, model.model_type, model.model_name) + model_params_setting = credential.get_model_params_setting_form(model.model_name).get_default_form_data() return { 'dataset_id_list': self.dataset_id_list, 'exclude_document_id_list': self.exclude_document_id_list, @@ -71,36 +94,64 @@ def to_base_pipeline_manage_params(self): 'history_chat_record': self.chat_record_list, 'chat_id': self.chat_id, 'dialogue_number': self.application.dialogue_number, + 'problem_optimization_prompt': self.application.problem_optimization_prompt if self.application.problem_optimization_prompt is not None and len( + self.application.problem_optimization_prompt) > 0 else _( + "() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the tag"), 'prompt': model_setting.get( - 'prompt') if 'prompt' in model_setting else Application.get_default_model_prompt(), - 'chat_model': self.chat_model, - 'model_id': self.application.model.id if self.application.model is not None else None, + 'prompt') if 'prompt' in model_setting and len(model_setting.get( + 'prompt')) > 0 else Application.get_default_model_prompt(), + 'system': model_setting.get( + 'system', None), + 'model_id': model_id, 'problem_optimization': self.application.problem_optimization, 'stream': True, + 'model_setting': model_setting, + 'model_params_setting': model_params_setting if self.application.model_params_setting is None or len( + self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting, 'search_mode': self.application.dataset_setting.get( 'search_mode') if 'search_mode' in self.application.dataset_setting else 'embedding', - 'no_references_setting': self.application.dataset_setting.get( - 'no_references_setting') if 'no_references_setting' in self.application.dataset_setting else { - 'status': 'ai_questioning', - 'value': '{question}'} - + 'no_references_setting': self.get_no_references_setting(self.application.dataset_setting, model_setting), + 'user_id': self.application.user_id, + 'application_id': self.application.id } def to_pipeline_manage_params(self, problem_text: str, post_response_handler: PostResponseHandler, - exclude_paragraph_id_list, client_id: str, client_type, stream=True): + exclude_paragraph_id_list, client_id: str, client_type, stream=True, form_data=None): + if form_data is None: + form_data = {} params = self.to_base_pipeline_manage_params() return {**params, 'problem_text': problem_text, 'post_response_handler': post_response_handler, 'exclude_paragraph_id_list': exclude_paragraph_id_list, 'stream': stream, 'client_id': client_id, - 'client_type': client_type} + 'client_type': client_type, 'form_data': form_data} - def append_chat_record(self, chat_record: ChatRecord, client_id=None): + def append_chat_record(self, chat_record: ChatRecord, client_id=None, asker=None): + chat_record.problem_text = chat_record.problem_text[0:10240] if chat_record.problem_text is not None else "" + chat_record.answer_text = chat_record.answer_text[0:40960] if chat_record.problem_text is not None else "" + is_save = True # 存入缓存中 - self.chat_record_list.append(chat_record) + for index in range(len(self.chat_record_list)): + record = self.chat_record_list[index] + if record.id == chat_record.id: + self.chat_record_list[index] = chat_record + is_save = False + if is_save: + self.chat_record_list.append(chat_record) if self.application.id is not None: # 插入数据库 if not QuerySet(Chat).filter(id=self.chat_id).exists(): - Chat(id=self.chat_id, application_id=self.application.id, abstract=chat_record.problem_text, - client_id=client_id).save() + asker_dict = {'user_name': '游客'} + if asker is not None: + if isinstance(asker, str): + asker_dict = { + 'user_name': asker + } + elif isinstance(asker, dict): + asker_dict = asker + + Chat(id=self.chat_id, application_id=self.application.id, abstract=chat_record.problem_text[0:1024], + client_id=client_id, asker=asker_dict, update_time=datetime.now()).save() + else: + Chat.objects.filter(id=self.chat_id).update(update_time=datetime.now()) # 插入会话记录 chat_record.save() @@ -119,6 +170,8 @@ def handler(self, padding_problem_text: str = None, client_id=None, **kwargs): + answer_list = [[Answer(answer_text, 'ai-chat-node', 'ai-chat-node', 'ai-chat-node', {}, 'ai-chat-node', + kwargs.get('reasoning_content', '')).to_dict()]] chat_record = ChatRecord(id=chat_record_id, chat_id=chat_id, problem_text=problem_text, @@ -126,9 +179,11 @@ def handler(self, details=manage.get_details(), message_tokens=manage.context['message_tokens'], answer_tokens=manage.context['answer_tokens'], + answer_text_list=answer_list, run_time=manage.context['run_time'], index=len(chat_info.chat_record_list) + 1) - chat_info.append_chat_record(chat_record, client_id) + asker = kwargs.get("asker", None) + chat_info.append_chat_record(chat_record, client_id, asker=asker) # 重新设置缓存 chat_cache.set(chat_id, chat_info, timeout=60 * 30) @@ -136,21 +191,119 @@ def handler(self, return PostHandler() +class OpenAIMessage(serializers.Serializer): + content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('content'))) + role = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Role'))) + + +class OpenAIInstanceSerializer(serializers.Serializer): + messages = serializers.ListField(child=OpenAIMessage()) + chat_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char(_("Conversation ID"))) + re_chat = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Regenerate"))) + stream = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Streaming Output"))) + + +class OpenAIChatSerializer(serializers.Serializer): + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id"))) + client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type"))) + + @staticmethod + def get_message(instance): + return instance.get('messages')[-1].get('content') + + @staticmethod + def generate_chat(chat_id, application_id, message, client_id, asker=None): + if chat_id is None: + chat_id = str(uuid.uuid1()) + chat = QuerySet(Chat).filter(id=chat_id).first() + if chat is None: + asker_dict = {'user_name': '游客'} + if asker is not None: + if isinstance(asker, str): + asker_dict = { + 'user_name': asker + } + elif isinstance(asker, dict): + asker_dict = asker + Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id, + asker=asker_dict).save() + return chat_id + + def chat(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + OpenAIInstanceSerializer(data=instance).is_valid(raise_exception=True) + chat_id = instance.get('chat_id') + message = self.get_message(instance) + re_chat = instance.get('re_chat', False) + stream = instance.get('stream', False) + application_id = self.data.get('application_id') + client_id = self.data.get('client_id') + client_type = self.data.get('client_type') + chat_id = self.generate_chat(chat_id, application_id, message, client_id, + asker=instance.get('form_data', {}).get("asker")) + return ChatMessageSerializer( + data={ + 'chat_id': chat_id, 'message': message, + 're_chat': re_chat, + 'stream': stream, + 'application_id': application_id, + 'client_id': client_id, + 'client_type': client_type, + 'form_data': instance.get('form_data', {}), + 'image_list': instance.get('image_list', []), + 'document_list': instance.get('document_list', []), + 'audio_list': instance.get('audio_list', []), + 'other_list': instance.get('other_list', []), + } + ).chat(base_to_response=OpenaiToResponse()) + + class ChatMessageSerializer(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("对话id")) - message = serializers.CharField(required=True, error_messages=ErrMessage.char("用户问题"), max_length=1024) - stream = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否流式回答")) - re_chat = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否重新回答")) - application_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid("应用id")) - client_id = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端id")) - client_type = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端类型")) - - def is_valid(self, *, raise_exception=False): - super().is_valid(raise_exception=True) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) + message = serializers.CharField(required=True, error_messages=ErrMessage.char(_("User Questions"))) + stream = serializers.BooleanField(required=True, + error_messages=ErrMessage.char(_("Is the answer in streaming mode"))) + re_chat = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_("Do you want to reply again"))) + chat_record_id = serializers.UUIDField(required=False, allow_null=True, + error_messages=ErrMessage.uuid(_("Conversation record id"))) + + node_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("Node id"))) + + runtime_node_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("Runtime node id"))) + + node_data = serializers.DictField(required=False, allow_null=True, + error_messages=ErrMessage.char(_("Node parameters"))) + application_id = serializers.UUIDField(required=False, allow_null=True, + error_messages=ErrMessage.uuid(_("Application ID"))) + client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id"))) + client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type"))) + form_data = serializers.DictField(required=False, error_messages=ErrMessage.char(_("Global variables"))) + image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture"))) + document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document"))) + audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio"))) + other_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Other"))) + child_node = serializers.DictField(required=False, allow_null=True, + error_messages=ErrMessage.dict(_("Child Nodes"))) + + def is_valid_application_workflow(self, *, raise_exception=False): + self.is_valid_intraday_access_num() + + def is_valid_chat_id(self, chat_info: ChatInfo): + if self.data.get('application_id') is not None and self.data.get('application_id') != str( + chat_info.application.id): + raise ChatException(500, _("Conversation does not exist")) + + def is_valid_intraday_access_num(self): if self.data.get('client_type') == AuthenticationType.APPLICATION_ACCESS_TOKEN.value: - access_client = QuerySet(ApplicationPublicAccessClient).filter(id=self.data.get('client_id')).first() + access_client = QuerySet(ApplicationPublicAccessClient).filter(client_id=self.data.get('client_id'), + application_id=self.data.get( + 'application_id')).first() if access_client is None: - access_client = ApplicationPublicAccessClient(id=self.data.get('client_id'), + access_client = ApplicationPublicAccessClient(client_id=self.data.get('client_id'), application_id=self.data.get('application_id'), access_num=0, intraday_access_num=0) @@ -159,13 +312,10 @@ def is_valid(self, *, raise_exception=False): application_access_token = QuerySet(ApplicationAccessToken).filter( application_id=self.data.get('application_id')).first() if application_access_token.access_num <= access_client.intraday_access_num: - raise AppChatNumOutOfBoundsFailed(1002, "访问次数超过今日访问量") - chat_id = self.data.get('chat_id') - chat_info: ChatInfo = chat_cache.get(chat_id) - if chat_info is None: - chat_info = self.re_open_chat(chat_id) - chat_cache.set(chat_id, - chat_info, timeout=60 * 30) + raise AppChatNumOutOfBoundsFailed(1002, _("The number of visits exceeds today's visits")) + + def is_valid_application_simple(self, *, chat_info: ChatInfo, raise_exception=False): + self.is_valid_intraday_access_num() model = chat_info.application.model if model is None: return chat_info @@ -173,19 +323,18 @@ def is_valid(self, *, raise_exception=False): if model is None: return chat_info if model.status == Status.ERROR: - raise AppApiException(500, "当前模型不可用") + raise ChatException(500, _("The current model is not available")) if model.status == Status.DOWNLOAD: - raise AppApiException(500, "模型正在下载中,请稍后再发起对话") + raise ChatException(500, _("The model is downloading, please try again later")) return chat_info - def chat(self): - self.is_valid(raise_exception=True) + def chat_simple(self, chat_info: ChatInfo, base_to_response): message = self.data.get('message') re_chat = self.data.get('re_chat') stream = self.data.get('stream') client_id = self.data.get('client_id') client_type = self.data.get('client_type') - chat_info = self.is_valid(raise_exception=True) + form_data = self.data.get("form_data") pipeline_manage_builder = PipelineManage.builder() # 如果开启了问题优化,则添加上问题优化步骤 if chat_info.application.problem_optimization: @@ -194,6 +343,7 @@ def chat(self): pipeline_message = (pipeline_manage_builder.append_step(BaseSearchDatasetStep) .append_step(BaseGenerateHumanMessageStep) .append_step(BaseChatStep) + .add_base_to_response(base_to_response) .build()) exclude_paragraph_id_list = [] # 相同问题是否需要排除已经查询到的段落 @@ -206,27 +356,92 @@ def chat(self): exclude_paragraph_id_list = list(set(paragraph_id_list)) # 构建运行参数 params = chat_info.to_pipeline_manage_params(message, get_post_handler(chat_info), exclude_paragraph_id_list, - client_id, client_type, stream) + client_id, client_type, stream, form_data) # 运行流水线作业 pipeline_message.run(params) return pipeline_message.context['chat_result'] @staticmethod - def re_open_chat(chat_id: str): + def get_chat_record(chat_info, chat_record_id): + if chat_info is not None: + chat_record_list = [chat_record for chat_record in chat_info.chat_record_list if + str(chat_record.id) == str(chat_record_id)] + if chat_record_list is not None and len(chat_record_list): + return chat_record_list[-1] + chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_info.chat_id).first() + if chat_record is None: + raise ChatException(500, _("Conversation record does not exist")) + chat_record = QuerySet(ChatRecord).filter(id=chat_record_id).first() + return chat_record + + def chat_work_flow(self, chat_info: ChatInfo, base_to_response): + message = self.data.get('message') + re_chat = self.data.get('re_chat') + stream = self.data.get('stream') + client_id = self.data.get('client_id') + client_type = self.data.get('client_type') + form_data = self.data.get('form_data') + image_list = self.data.get('image_list') + document_list = self.data.get('document_list') + audio_list = self.data.get('audio_list') + other_list = self.data.get('other_list') + user_id = chat_info.application.user_id + chat_record_id = self.data.get('chat_record_id') + chat_record = None + history_chat_record = chat_info.chat_record_list + if chat_record_id is not None: + chat_record = self.get_chat_record(chat_info, chat_record_id) + history_chat_record = [r for r in chat_info.chat_record_list if str(r.id) != chat_record_id] + work_flow_manage = WorkflowManage(Flow.new_instance(chat_info.work_flow_version.work_flow), + {'history_chat_record': history_chat_record, 'question': message, + 'chat_id': chat_info.chat_id, 'chat_record_id': str( + uuid.uuid1()) if chat_record is None else chat_record.id, + 'stream': stream, + 're_chat': re_chat, + 'client_id': client_id, + 'client_type': client_type, + 'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type), + base_to_response, form_data, image_list, document_list, audio_list, other_list, + self.data.get('runtime_node_id'), + self.data.get('node_data'), chat_record, self.data.get('child_node')) + r = work_flow_manage.run() + return r + + def chat(self, base_to_response: BaseToResponse = SystemToResponse()): + super().is_valid(raise_exception=True) + chat_info = self.get_chat_info() + self.is_valid_chat_id(chat_info) + if chat_info.application.type == ApplicationTypeChoices.SIMPLE: + self.is_valid_application_simple(raise_exception=True, chat_info=chat_info), + return self.chat_simple(chat_info, base_to_response) + else: + self.is_valid_application_workflow(raise_exception=True) + return self.chat_work_flow(chat_info, base_to_response) + + def get_chat_info(self): + self.is_valid(raise_exception=True) + chat_id = self.data.get('chat_id') + chat_info: ChatInfo = chat_cache.get(chat_id) + if chat_info is None: + chat_info: ChatInfo = self.re_open_chat(chat_id) + chat_cache.set(chat_id, + chat_info, timeout=60 * 30) + return chat_info + + def re_open_chat(self, chat_id: str): chat = QuerySet(Chat).filter(id=chat_id).first() if chat is None: - raise AppApiException(500, "会话不存在") + raise ChatException(500, _("Conversation does not exist")) application = QuerySet(Application).filter(id=chat.application_id).first() if application is None: - raise AppApiException(500, "应用不存在") - model = QuerySet(Model).filter(id=application.model_id).first() - chat_model = None - if model is not None: - # 对话模型 - chat_model = ModelProvideConstants[model.provider].value.get_model(model.model_type, model.model_name, - json.loads( - rsa_long_decrypt(model.credential)), - streaming=True) + raise ChatException(500, _("Application does not exist")) + if application.type == ApplicationTypeChoices.SIMPLE: + return self.re_open_chat_simple(chat_id, application) + else: + return self.re_open_chat_work_flow(chat_id, application) + + @staticmethod + def re_open_chat_simple(chat_id, application): # 数据集id列表 dataset_id_list = [str(row.dataset_id) for row in QuerySet(ApplicationDatasetMapping).filter( @@ -237,4 +452,23 @@ def re_open_chat(chat_id: str): QuerySet(Document).filter( dataset_id__in=dataset_id_list, is_active=False)] - return ChatInfo(chat_id, chat_model, dataset_id_list, exclude_document_id_list, application) + chat_info = ChatInfo(chat_id, dataset_id_list, exclude_document_id_list, application) + chat_record_list = list(QuerySet(ChatRecord).filter(chat_id=chat_id).order_by('-create_time')[0:5]) + chat_record_list.sort(key=lambda r: r.create_time) + for chat_record in chat_record_list: + chat_info.chat_record_list.append(chat_record) + return chat_info + + @staticmethod + def re_open_chat_work_flow(chat_id, application): + work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application.id).order_by( + '-create_time')[0:1].first() + if work_flow_version is None: + raise ChatException(500, _("The application has not been published. Please use it after publishing.")) + + chat_info = ChatInfo(chat_id, [], [], application, work_flow_version) + chat_record_list = list(QuerySet(ChatRecord).filter(chat_id=chat_id).order_by('-create_time')[0:5]) + chat_record_list.sort(key=lambda r: r.create_time) + for chat_record in chat_record_list: + chat_info.chat_record_list.append(chat_record) + return chat_info diff --git a/apps/application/serializers/chat_serializers.py b/apps/application/serializers/chat_serializers.py index d8a3e648b98..ea43c6c5793 100644 --- a/apps/application/serializers/chat_serializers.py +++ b/apps/application/serializers/chat_serializers.py @@ -7,48 +7,90 @@ @desc: """ import datetime -import json import os import re import uuid from functools import reduce +from io import BytesIO from typing import Dict - -import xlwt +import pytz +import openpyxl from django.core import validators from django.core.cache import caches from django.db import transaction, models from django.db.models import QuerySet, Q -from django.http import HttpResponse +from django.http import StreamingHttpResponse +from django.utils.translation import gettext_lazy as _, gettext +from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE from rest_framework import serializers +from rest_framework.utils.formatting import lazy_format -from application.models import Chat, Application, ApplicationDatasetMapping, VoteChoices, ChatRecord +from application.flow.workflow_manage import Flow +from application.models import Chat, Application, ApplicationDatasetMapping, VoteChoices, ChatRecord, WorkFlowVersion, \ + ApplicationTypeChoices from application.models.api_key_model import ApplicationAccessToken from application.serializers.application_serializers import ModelDatasetAssociation, DatasetSettingSerializer, \ ModelSettingSerializer from application.serializers.chat_message_serializers import ChatInfo from common.constants.permission_constants import RoleConstants from common.db.search import native_search, native_page_search, page_search, get_dynamics_model -from common.event import ListenerManagement from common.exception.app_exception import AppApiException from common.util.common import post from common.util.field_message import ErrMessage from common.util.file_util import get_file_content from common.util.lock import try_lock, un_lock -from common.util.rsa_util import rsa_long_decrypt from dataset.models import Document, Problem, Paragraph, ProblemParagraphMapping +from dataset.serializers.common_serializers import get_embedding_model_id_by_dataset_id, update_document_char_length from dataset.serializers.paragraph_serializers import ParagraphSerializers +from embedding.task import embedding_by_paragraph, embedding_by_paragraph_list from setting.models import Model -from setting.models_provider.constants.model_provider_constants import ModelProvideConstants +from setting.models_provider import get_model_credential from smartdoc.conf import PROJECT_DIR +from smartdoc.settings import TIME_ZONE + +chat_cache = caches['chat_cache'] + + +class WorkFlowSerializers(serializers.Serializer): + nodes = serializers.ListSerializer(child=serializers.DictField(), error_messages=ErrMessage.uuid(_("node"))) + edges = serializers.ListSerializer(child=serializers.DictField(), error_messages=ErrMessage.uuid(_("Connection"))) -chat_cache = caches['model_cache'] + +def valid_model_params_setting(model_id, model_params_setting): + if model_id is None: + return + model = QuerySet(Model).filter(id=model_id).first() + credential = get_model_credential(model.provider, model.model_type, model.model_name) + model_params_setting_form = credential.get_model_params_setting_form(model.model_name) + if model_params_setting is None or len(model_params_setting.keys()) == 0: + model_params_setting = model_params_setting_form.get_default_form_data() + credential.get_model_params_setting_form(model.model_name).valid_form(model_params_setting) + + +class ReAbstractInstanceSerializers(serializers.Serializer): + abstract = serializers.CharField(required=True, error_messages=ErrMessage.char(_("abstract"))) class ChatSerializers(serializers.Serializer): class Operate(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + + def logic_delete(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + QuerySet(Chat).filter(id=self.data.get('chat_id'), application_id=self.data.get('application_id')).update( + is_deleted=True) + return True + + def re_abstract(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + ReAbstractInstanceSerializers(data=instance).is_valid(raise_exception=True) + + QuerySet(Chat).filter(id=self.data.get('chat_id'), application_id=self.data.get('application_id')).update( + abstract=instance.get('abstract')) + return True def delete(self, with_valid=True): if with_valid: @@ -56,38 +98,63 @@ def delete(self, with_valid=True): QuerySet(Chat).filter(id=self.data.get('chat_id'), application_id=self.data.get('application_id')).delete() return True + class ClientChatHistory(serializers.Serializer): + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + client_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Client id"))) + + def page(self, current_page: int, page_size: int, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + queryset = QuerySet(Chat).filter(client_id=self.data.get('client_id'), + application_id=self.data.get('application_id'), + is_deleted=False) + queryset = queryset.order_by('-create_time') + return page_search(current_page, page_size, queryset, lambda row: ChatSerializerModel(row).data) + class Query(serializers.Serializer): - abstract = serializers.CharField(required=False, error_messages=ErrMessage.char("摘要")) - history_day = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("历史天数")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) + abstract = serializers.CharField(required=False, error_messages=ErrMessage.char(_("summary"))) + start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("Start time"))) + end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("End time"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) min_star = serializers.IntegerField(required=False, min_value=0, - error_messages=ErrMessage.integer("最小点赞数")) + error_messages=ErrMessage.integer(_("Minimum number of likes"))) min_trample = serializers.IntegerField(required=False, min_value=0, - error_messages=ErrMessage.integer("最小点踩数")) - comparer = serializers.CharField(required=False, error_messages=ErrMessage.char("比较器"), validators=[ + error_messages=ErrMessage.integer(_("Minimum number of clicks"))) + comparer = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Comparator")), validators=[ validators.RegexValidator(regex=re.compile("^and|or$"), - message="只支持and|or", code=500) + message=_("Only supports and|or"), code=500) ]) def get_end_time(self): - history_day = self.data.get('history_day') - return datetime.datetime.now() - datetime.timedelta(days=history_day) + return datetime.datetime.combine( + datetime.datetime.strptime(self.data.get('end_time'), '%Y-%m-%d'), + datetime.datetime.max.time()) + + def get_start_time(self): + return self.data.get('start_time') - def get_query_set(self): + def get_query_set(self, select_ids=None): end_time = self.get_end_time() + start_time = self.get_start_time() query_set = QuerySet(model=get_dynamics_model( {'application_chat.application_id': models.CharField(), 'application_chat.abstract': models.CharField(), "star_num": models.IntegerField(), 'trample_num': models.IntegerField(), 'comparer': models.CharField(), - 'application_chat.create_time': models.DateTimeField()})) + 'application_chat.update_time': models.DateTimeField(), + 'application_chat.id': models.UUIDField(), })) base_query_dict = {'application_chat.application_id': self.data.get("application_id"), - 'application_chat.create_time__gte': end_time} + 'application_chat.update_time__gte': start_time, + 'application_chat.update_time__lte': end_time, + } if 'abstract' in self.data and self.data.get('abstract') is not None: base_query_dict['application_chat.abstract__icontains'] = self.data.get('abstract') + + if select_ids is not None and len(select_ids) > 0: + base_query_dict['application_chat.id__in'] = select_ids base_condition = Q(**base_query_dict) min_star_query = None min_trample_query = None @@ -107,7 +174,14 @@ def get_query_set(self): condition = base_condition & min_trample_query else: condition = base_condition - return query_set.filter(condition).order_by("-application_chat.create_time") + inner_queryset = QuerySet(Chat).filter(application_id=self.data.get("application_id")) + if 'abstract' in self.data and self.data.get('abstract') is not None: + inner_queryset = inner_queryset.filter(abstract__icontains=self.data.get('abstract')) + + return { + 'inner_queryset': inner_queryset, + 'default_queryset': query_set.filter(condition).order_by("-application_chat.update_time") + } def list(self, with_valid=True): if with_valid: @@ -116,52 +190,97 @@ def list(self, with_valid=True): os.path.join(PROJECT_DIR, "apps", "application", 'sql', 'list_application_chat.sql')), with_table_name=False) + @staticmethod + def paragraph_list_to_string(paragraph_list): + return "\n**********\n".join( + [f"{paragraph.get('title')}:\n{paragraph.get('content')}" for paragraph in + paragraph_list] if paragraph_list is not None else '') + @staticmethod def to_row(row: Dict): details = row.get('details') - padding_problem_text = details.get('problem_padding').get( - 'padding_problem_text') if 'problem_padding' in details and 'padding_problem_text' in details.get( - 'problem_padding') else "" - paragraph_list = details.get('search_step').get( - 'paragraph_list') if 'search_step' in details and 'paragraph_list' in details.get('search_step') else [] + padding_problem_text = ' '.join(node.get("answer", "") for key, node in details.items() if + node.get("type") == 'question-node') + search_dataset_node_list = [(key, node) for key, node in details.items() if + node.get("type") == 'search-dataset-node' or node.get( + "step_type") == 'search_step'] + reference_paragraph_len = '\n'.join([str(len(node.get('paragraph_list', + []))) if key == 'search_step' else node.get( + 'name') + ':' + str( + len(node.get('paragraph_list', [])) if node.get('paragraph_list', []) is not None else '0') for + key, node in search_dataset_node_list]) + reference_paragraph = '\n----------\n'.join( + [ChatSerializers.Query.paragraph_list_to_string(node.get('paragraph_list', + [])) if key == 'search_step' else node.get( + 'name') + ':\n' + ChatSerializers.Query.paragraph_list_to_string(node.get('paragraph_list', + [])) for + key, node in search_dataset_node_list]) improve_paragraph_list = row.get('improve_paragraph_list') vote_status_map = {'-1': '未投票', '0': '赞同', '1': '反对'} return [str(row.get('chat_id')), row.get('abstract'), row.get('problem_text'), padding_problem_text, - row.get('answer_text'), vote_status_map.get(row.get('vote_status')), len(paragraph_list), "\n".join( - [f"{index}、{paragraph_list[index].get('title')}\n{paragraph_list[index].get('content')}" for index - in - range(len(paragraph_list))]), + row.get('answer_text'), vote_status_map.get(row.get('vote_status')), reference_paragraph_len, + reference_paragraph, "\n".join([ f"{improve_paragraph_list[index].get('title')}\n{improve_paragraph_list[index].get('content')}" for index in range(len(improve_paragraph_list))]), + row.get('asker').get('user_name'), row.get('message_tokens') + row.get('answer_tokens'), row.get('run_time'), - str(row.get('create_time'))] + str(row.get('create_time').astimezone(pytz.timezone(TIME_ZONE)).strftime('%Y-%m-%d %H:%M:%S') + )] - def export(self, with_valid=True): + def export(self, data, with_valid=True): if with_valid: self.is_valid(raise_exception=True) - data_list = native_search(self.get_query_set(), select_string=get_file_content( - os.path.join(PROJECT_DIR, "apps", "application", 'sql', 'export_application_chat.sql')), + + data_list = native_search(self.get_query_set(data.get('select_ids')), + select_string=get_file_content( + os.path.join(PROJECT_DIR, "apps", "application", 'sql', + 'export_application_chat.sql')), with_table_name=False) - # 创建工作簿对象 - workbook = xlwt.Workbook(encoding='utf-8') - # 添加工作表 - worksheet = workbook.add_sheet('Sheet1') - data = [ - ['会话ID', '摘要', '用户问题', '优化后问题', '回答', '用户反馈', '引用分段数', '分段标题+内容', - '标注', '消耗tokens', '耗时(s)', '提问时间'], - *[self.to_row(row) for row in data_list] - ] - # 写入数据到工作表 - for row_idx, row in enumerate(data): - for col_idx, col in enumerate(row): - worksheet.write(row_idx, col_idx, col) - # 创建HttpResponse对象返回Excel文件 - response = HttpResponse(content_type='application/vnd.ms-excel') - response['Content-Disposition'] = 'attachment; filename="data.xls"' - - workbook.save(response) + batch_size = 500 + + def stream_response(): + workbook = openpyxl.Workbook() + worksheet = workbook.active + worksheet.title = 'Sheet1' + + headers = [gettext('Conversation ID'), gettext('summary'), gettext('User Questions'), + gettext('Problem after optimization'), + gettext('answer'), gettext('User feedback'), + gettext('Reference segment number'), + gettext('Section title + content'), + gettext('Annotation'), gettext('USER'), gettext('Consuming tokens'), + gettext('Time consumed (s)'), + gettext('Question Time')] + for col_idx, header in enumerate(headers, 1): + cell = worksheet.cell(row=1, column=col_idx) + cell.value = header + + for i in range(0, len(data_list), batch_size): + batch_data = data_list[i:i + batch_size] + + for row_idx, row in enumerate(batch_data, start=i + 2): + for col_idx, value in enumerate(self.to_row(row), 1): + cell = worksheet.cell(row=row_idx, column=col_idx) + if isinstance(value, str): + value = re.sub(ILLEGAL_CHARACTERS_RE, '', value) + if isinstance(value, datetime.datetime): + eastern = pytz.timezone(TIME_ZONE) + c = datetime.timezone(eastern._utcoffset) + value = value.astimezone(c) + cell.value = value + + output = BytesIO() + workbook.save(output) + output.seek(0) + yield output.getvalue() + output.close() + workbook.close() + + response = StreamingHttpResponse(stream_response(), + content_type='application/vnd.open.xmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename="data.xlsx"' return response def page(self, current_page: int, page_size: int, with_valid=True): @@ -172,36 +291,50 @@ def page(self, current_page: int, page_size: int, with_valid=True): with_table_name=False) class OpenChat(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) user_id = self.data.get('user_id') application_id = self.data.get('application_id') if not QuerySet(Application).filter(id=application_id, user_id=user_id).exists(): - raise AppApiException(500, '应用不存在') + raise AppApiException(500, gettext('Application does not exist')) def open(self): self.is_valid(raise_exception=True) application_id = self.data.get('application_id') application = QuerySet(Application).get(id=application_id) - model = QuerySet(Model).filter(id=application.model_id).first() + if application.type == ApplicationTypeChoices.SIMPLE: + return self.open_simple(application) + else: + return self.open_work_flow(application) + + def open_work_flow(self, application): + self.is_valid(raise_exception=True) + application_id = self.data.get('application_id') + chat_id = str(uuid.uuid1()) + work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application_id).order_by( + '-create_time')[0:1].first() + if work_flow_version is None: + raise AppApiException(500, + gettext( + "The application has not been published. Please use it after publishing.")) + chat_cache.set(chat_id, + ChatInfo(chat_id, [], + [], + application, work_flow_version), timeout=60 * 30) + return chat_id + + def open_simple(self, application): + application_id = self.data.get('application_id') dataset_id_list = [str(row.dataset_id) for row in QuerySet(ApplicationDatasetMapping).filter( application_id=application_id)] - chat_model = None - if model is not None: - chat_model = ModelProvideConstants[model.provider].value.get_model(model.model_type, model.model_name, - json.loads( - rsa_long_decrypt( - model.credential)), - streaming=True) - chat_id = str(uuid.uuid1()) chat_cache.set(chat_id, - ChatInfo(chat_id, chat_model, dataset_id_list, + ChatInfo(chat_id, dataset_id_list, [str(document.id) for document in QuerySet(Document).filter( dataset_id__in=dataset_id_list, @@ -209,25 +342,53 @@ def open(self): application), timeout=60 * 30) return chat_id + class OpenWorkFlowChat(serializers.Serializer): + work_flow = WorkFlowSerializers(error_messages=ErrMessage.uuid(_("Workflow"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + + def open(self): + self.is_valid(raise_exception=True) + work_flow = self.data.get('work_flow') + Flow.new_instance(work_flow).is_valid() + chat_id = str(uuid.uuid1()) + application = Application(id=None, dialogue_number=3, model=None, + dataset_setting={}, + model_setting={}, + problem_optimization=None, + type=ApplicationTypeChoices.WORK_FLOW, + user_id=self.data.get('user_id') + ) + work_flow_version = WorkFlowVersion(work_flow=work_flow) + chat_cache.set(chat_id, + ChatInfo(chat_id, [], + [], + application, work_flow_version), timeout=60 * 30) + return chat_id + class OpenTempChat(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) id = serializers.UUIDField(required=False, allow_null=True, - error_messages=ErrMessage.uuid("应用id")) + error_messages=ErrMessage.uuid(_("Application ID"))) model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.uuid("模型id")) + error_messages=ErrMessage.uuid(_("Model id"))) multiple_rounds_dialogue = serializers.BooleanField(required=True, - error_messages=ErrMessage.boolean("多轮会话")) + error_messages=ErrMessage.boolean( + _("Multi-round conversation"))) dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True), - error_messages=ErrMessage.list("关联数据集")) + error_messages=ErrMessage.list(_("Related Datasets"))) # 数据集相关设置 dataset_setting = DatasetSettingSerializer(required=True) # 模型相关设置 model_setting = ModelSettingSerializer(required=True) # 问题补全 - problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全")) + problem_optimization = serializers.BooleanField(required=True, + error_messages=ErrMessage.boolean(_("Question completion"))) + # 模型相关设置 + model_params_setting = serializers.JSONField(required=False, + error_messages=ErrMessage.dict(_("Model parameter settings"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -241,7 +402,7 @@ def get_user_id(self): if 'id' in self.data and self.data.get('id') is not None: application = QuerySet(Application).filter(id=self.data.get('id')).first() if application is None: - raise AppApiException(500, "应用不存在") + raise AppApiException(500, gettext("Application does not exist")) return application.user_id return self.data.get('user_id') @@ -249,23 +410,16 @@ def open(self): user_id = self.is_valid(raise_exception=True) chat_id = str(uuid.uuid1()) model_id = self.data.get('model_id') - if model_id is not None and len(model_id) > 0: - model = QuerySet(Model).filter(user_id=user_id, id=self.data.get('model_id')).first() - chat_model = ModelProvideConstants[model.provider].value.get_model(model.model_type, model.model_name, - json.loads( - rsa_long_decrypt( - model.credential)), - streaming=True) - else: - model = None - chat_model = None dataset_id_list = self.data.get('dataset_id_list') - application = Application(id=None, dialogue_number=3, model=model, + dialogue_number = 3 if self.data.get('multiple_rounds_dialogue', False) else 0 + application = Application(id=None, dialogue_number=dialogue_number, model_id=model_id, dataset_setting=self.data.get('dataset_setting'), model_setting=self.data.get('model_setting'), - problem_optimization=self.data.get('problem_optimization')) + problem_optimization=self.data.get('problem_optimization'), + model_params_setting=self.data.get('model_params_setting'), + user_id=user_id) chat_cache.set(chat_id, - ChatInfo(chat_id, chat_model, dataset_id_list, + ChatInfo(chat_id, dataset_id_list, [str(document.id) for document in QuerySet(Document).filter( dataset_id__in=dataset_id_list, @@ -279,23 +433,31 @@ class Meta: model = ChatRecord fields = ['id', 'chat_id', 'vote_status', 'problem_text', 'answer_text', 'message_tokens', 'answer_tokens', 'const', 'improve_paragraph_id_list', 'run_time', 'index', + 'answer_text_list', 'create_time', 'update_time'] +class ChatSerializerModel(serializers.ModelSerializer): + class Meta: + model = Chat + fields = ['id', 'application_id', 'abstract', 'client_id'] + + class ChatRecordSerializer(serializers.Serializer): class Operate(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) - application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id")) - chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) + application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID"))) + chat_record_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_("Conversation record id"))) def is_valid(self, *, current_role=None, raise_exception=False): super().is_valid(raise_exception=True) application_access_token = QuerySet(ApplicationAccessToken).filter( application_id=self.data.get('application_id')).first() if application_access_token is None: - raise AppApiException(500, '不存在的应用认证信息') + raise AppApiException(500, gettext('Application authentication information does not exist')) if not application_access_token.show_source and current_role == RoleConstants.APPLICATION_ACCESS_TOKEN.value: - raise AppApiException(500, '未开启显示知识来源') + raise AppApiException(500, gettext('Displaying knowledge sources is not enabled')) def get_chat_record(self): chat_record_id = self.data.get('chat_record_id') @@ -303,7 +465,7 @@ def get_chat_record(self): chat_info: ChatInfo = chat_cache.get(chat_id) if chat_info is not None: chat_record_list = [chat_record for chat_record in chat_info.chat_record_list if - chat_record.id == uuid.UUID(chat_record_id)] + str(chat_record.id) == str(chat_record_id)] if chat_record_list is not None and len(chat_record_list): return chat_record_list[-1] return QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first() @@ -313,24 +475,28 @@ def one(self, current_role: RoleConstants, with_valid=True): self.is_valid(current_role=current_role, raise_exception=True) chat_record = self.get_chat_record() if chat_record is None: - raise AppApiException(500, "对话不存在") + raise AppApiException(500, gettext("Conversation does not exist")) return ChatRecordSerializer.Query.reset_chat_record(chat_record) class Query(serializers.Serializer): application_id = serializers.UUIDField(required=True) chat_id = serializers.UUIDField(required=True) + order_asc = serializers.BooleanField(required=False, allow_null=True) def list(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) QuerySet(ChatRecord).filter(chat_id=self.data.get('chat_id')) + order_by = 'create_time' if self.data.get('order_asc') is None or self.data.get( + 'order_asc') else '-create_time' return [ChatRecordSerializerModel(chat_record).data for chat_record in - QuerySet(ChatRecord).filter(chat_id=self.data.get('chat_id')).order_by("create_time")] + QuerySet(ChatRecord).filter(chat_id=self.data.get('chat_id')).order_by(order_by)] @staticmethod def reset_chat_record(chat_record): dataset_list = [] paragraph_list = [] + if 'search_step' in chat_record.details and chat_record.details.get('search_step').get( 'paragraph_list') is not None: paragraph_list = chat_record.details.get('search_step').get( @@ -342,41 +508,56 @@ def reset_chat_record(chat_record): row in paragraph_list], {}).items()] + if len(chat_record.improve_paragraph_id_list) > 0: + paragraph_model_list = QuerySet(Paragraph).filter(id__in=chat_record.improve_paragraph_id_list) + if len(paragraph_model_list) < len(chat_record.improve_paragraph_id_list): + paragraph_model_id_list = [str(p.id) for p in paragraph_model_list] + chat_record.improve_paragraph_id_list = list( + filter(lambda p_id: paragraph_model_id_list.__contains__(p_id), + chat_record.improve_paragraph_id_list)) + chat_record.save() return { **ChatRecordSerializerModel(chat_record).data, 'padding_problem_text': chat_record.details.get('problem_padding').get( 'padding_problem_text') if 'problem_padding' in chat_record.details else None, 'dataset_list': dataset_list, - 'paragraph_list': paragraph_list + 'paragraph_list': paragraph_list, + 'execution_details': [chat_record.details[key] for key in chat_record.details] } def page(self, current_page: int, page_size: int, with_valid=True): if with_valid: self.is_valid(raise_exception=True) + order_by = '-create_time' if self.data.get('order_asc') is None or self.data.get( + 'order_asc') else 'create_time' page = page_search(current_page, page_size, - QuerySet(ChatRecord).filter(chat_id=self.data.get('chat_id')).order_by("create_time"), + QuerySet(ChatRecord).filter(chat_id=self.data.get('chat_id')).order_by(order_by), post_records_handler=lambda chat_record: self.reset_chat_record(chat_record)) return page class Vote(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) - chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id")) + chat_record_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_("Conversation record id"))) - vote_status = serializers.ChoiceField(choices=VoteChoices.choices, error_messages=ErrMessage.uuid("投标状态")) + vote_status = serializers.ChoiceField(choices=VoteChoices.choices, + error_messages=ErrMessage.uuid(_("Bidding Status"))) @transaction.atomic def vote(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) if not try_lock(self.data.get('chat_record_id')): - raise AppApiException(500, "正在对当前会话纪要进行投票中,请勿重复发送请求") + raise AppApiException(500, + gettext( + "Voting on the current session minutes, please do not send repeated requests")) try: chat_record_details_model = QuerySet(ChatRecord).get(id=self.data.get('chat_record_id'), chat_id=self.data.get('chat_id')) if chat_record_details_model is None: - raise AppApiException(500, "不存在的对话 chat_record_id") + raise AppApiException(500, gettext("Non-existent conversation chat_record_id")) vote_status = self.data.get("vote_status") if chat_record_details_model.vote_status == VoteChoices.UN_VOTE: if vote_status == VoteChoices.STAR: @@ -393,15 +574,18 @@ def vote(self, with_valid=True): chat_record_details_model.vote_status = VoteChoices.UN_VOTE chat_record_details_model.save() else: - raise AppApiException(500, "已经投票过,请先取消后再进行投票") + raise AppApiException(500, gettext("Already voted, please cancel first and then vote again")) finally: un_lock(self.data.get('chat_record_id')) return True class ImproveSerializer(serializers.Serializer): - title = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("段落标题")) - content = serializers.CharField(required=True, error_messages=ErrMessage.char("段落内容")) + title = serializers.CharField(required=False, max_length=256, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("Section title"))) + content = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Paragraph content"))) + + problem_text = serializers.CharField(required=False, max_length=256, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_("question"))) class ParagraphModel(serializers.ModelSerializer): class Meta: @@ -409,9 +593,10 @@ class Meta: fields = "__all__" class ChatRecordImprove(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) - chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id")) + chat_record_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_("Conversation record id"))) def get(self, with_valid=True): if with_valid: @@ -420,7 +605,7 @@ def get(self, with_valid=True): chat_id = self.data.get('chat_id') chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first() if chat_record is None: - raise AppApiException(500, '不存在的对话记录') + raise AppApiException(500, gettext('Conversation record does not exist')) if chat_record.improve_paragraph_id_list is None or len(chat_record.improve_paragraph_id_list) == 0: return [] @@ -434,24 +619,26 @@ def get(self, with_valid=True): return [ChatRecordSerializer.ParagraphModel(p).data for p in paragraph_model_list] class Improve(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) - chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id")) + chat_record_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_("Conversation record id"))) - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Knowledge base id"))) - document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id")) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Document id"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(Document).filter(id=self.data.get('document_id'), dataset_id=self.data.get('dataset_id')).exists(): - raise AppApiException(500, "文档id不正确") + raise AppApiException(500, gettext("The document id is incorrect")) @staticmethod - def post_embedding_paragraph(chat_record, paragraph_id): + def post_embedding_paragraph(chat_record, paragraph_id, dataset_id): + model_id = get_embedding_model_id_by_dataset_id(dataset_id) # 发送向量化事件 - ListenerManagement.embedding_by_paragraph_signal.send(paragraph_id) + embedding_by_paragraph(paragraph_id, model_id) return chat_record @post(post_function=post_embedding_paragraph) @@ -464,7 +651,7 @@ def improve(self, instance: Dict, with_valid=True): chat_id = self.data.get('chat_id') chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first() if chat_record is None: - raise AppApiException(500, '不存在的对话记录') + raise AppApiException(500, gettext('Conversation record does not exist')) document_id = self.data.get("document_id") dataset_id = self.data.get("dataset_id") @@ -473,33 +660,34 @@ def improve(self, instance: Dict, with_valid=True): content=instance.get("content"), dataset_id=dataset_id, title=instance.get("title") if 'title' in instance else '') - - problem = Problem(id=uuid.uuid1(), content=chat_record.problem_text, dataset_id=dataset_id) + problem_text = instance.get('problem_text') if instance.get( + 'problem_text') is not None else chat_record.problem_text + problem, _ = Problem.objects.get_or_create(content=problem_text, dataset_id=dataset_id) problem_paragraph_mapping = ProblemParagraphMapping(id=uuid.uuid1(), dataset_id=dataset_id, document_id=document_id, problem_id=problem.id, paragraph_id=paragraph.id) - # 插入问题 - problem.save() # 插入段落 paragraph.save() # 插入关联问题 problem_paragraph_mapping.save() chat_record.improve_paragraph_id_list.append(paragraph.id) + update_document_char_length(document_id) # 添加标注 chat_record.save() - return ChatRecordSerializerModel(chat_record).data, paragraph.id + return ChatRecordSerializerModel(chat_record).data, paragraph.id, dataset_id class Operate(serializers.Serializer): - chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id")) + chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID"))) - chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id")) + chat_record_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_("Conversation record id"))) - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Knowledge base id"))) - document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id")) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Document id"))) - paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("段落id")) + paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Paragraph id"))) def delete(self, with_valid=True): if with_valid: @@ -512,9 +700,12 @@ def delete(self, with_valid=True): paragraph_id = self.data.get('paragraph_id') chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first() if chat_record is None: - raise AppApiException(500, '不存在的对话记录') + raise AppApiException(500, gettext('Conversation record does not exist')) if not chat_record.improve_paragraph_id_list.__contains__(uuid.UUID(paragraph_id)): - raise AppApiException(500, f'段落id错误,当前对话记录不存在【{paragraph_id}】段落id') + message = lazy_format( + _('The paragraph id is wrong. The current conversation record does not exist. [{paragraph_id}] paragraph id'), + paragraph_id=paragraph_id) + raise AppApiException(500, message) chat_record.improve_paragraph_id_list = [row for row in chat_record.improve_paragraph_id_list if str(row) != paragraph_id] chat_record.save() @@ -522,3 +713,68 @@ def delete(self, with_valid=True): data={"dataset_id": dataset_id, 'document_id': document_id, "paragraph_id": paragraph_id}) o.is_valid(raise_exception=True) return o.delete() + + class PostImprove(serializers.Serializer): + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Knowledge base id"))) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Document id"))) + chat_ids = serializers.ListSerializer(child=serializers.UUIDField(), required=True, + error_messages=ErrMessage.list(_("Conversation ID"))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + if not Document.objects.filter(id=self.data['document_id'], dataset_id=self.data['dataset_id']).exists(): + raise AppApiException(500, gettext("The document id is incorrect")) + + @staticmethod + def post_embedding_paragraph(paragraph_ids, dataset_id): + model_id = get_embedding_model_id_by_dataset_id(dataset_id) + embedding_by_paragraph_list(paragraph_ids, model_id) + + @post(post_function=post_embedding_paragraph) + @transaction.atomic + def post_improve(self, instance: Dict): + ChatRecordSerializer.PostImprove(data=instance).is_valid(raise_exception=True) + + chat_ids = instance['chat_ids'] + document_id = instance['document_id'] + dataset_id = instance['dataset_id'] + + # 获取所有聊天记录 + chat_record_list = list(ChatRecord.objects.filter(chat_id__in=chat_ids)) + if len(chat_record_list) < len(chat_ids): + raise AppApiException(500, gettext("Conversation records that do not exist")) + + # 批量创建段落和问题映射 + paragraphs = [] + paragraph_ids = [] + problem_paragraph_mappings = [] + for chat_record in chat_record_list: + paragraph = Paragraph( + id=uuid.uuid1(), + document_id=document_id, + content=chat_record.answer_text, + dataset_id=dataset_id, + title=chat_record.problem_text + ) + problem, _ = Problem.objects.get_or_create(content=chat_record.problem_text, dataset_id=dataset_id) + problem_paragraph_mapping = ProblemParagraphMapping( + id=uuid.uuid1(), + dataset_id=dataset_id, + document_id=document_id, + problem_id=problem.id, + paragraph_id=paragraph.id + ) + paragraphs.append(paragraph) + paragraph_ids.append(paragraph.id) + problem_paragraph_mappings.append(problem_paragraph_mapping) + chat_record.improve_paragraph_id_list.append(paragraph.id) + + # 批量保存段落和问题映射 + Paragraph.objects.bulk_create(paragraphs) + ProblemParagraphMapping.objects.bulk_create(problem_paragraph_mappings) + + # 批量保存聊天记录 + ChatRecord.objects.bulk_update(chat_record_list, ['improve_paragraph_id_list']) + update_document_char_length(document_id) + + return paragraph_ids, dataset_id diff --git a/apps/application/sql/export_application_chat.sql b/apps/application/sql/export_application_chat.sql index dc580847e21..de9c7ee6ef7 100644 --- a/apps/application/sql/export_application_chat.sql +++ b/apps/application/sql/export_application_chat.sql @@ -10,7 +10,8 @@ SELECT application_chat_record_temp."index" as "index", application_chat_record_temp.improve_paragraph_list as improve_paragraph_list, application_chat_record_temp.vote_status as vote_status, - application_chat_record_temp.create_time as create_time + application_chat_record_temp.create_time as create_time, + to_json(application_chat.asker) as asker FROM application_chat application_chat LEFT JOIN ( @@ -22,6 +23,8 @@ FROM chat_id FROM application_chat_record + WHERE chat_id IN ( + SELECT id FROM application_chat ${inner_queryset}) GROUP BY application_chat_record.chat_id ) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id @@ -34,4 +37,5 @@ FROM END as improve_paragraph_list FROM application_chat_record application_chat_record - ) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id" \ No newline at end of file + ) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id" + ${default_queryset} \ No newline at end of file diff --git a/apps/application/sql/list_application.sql b/apps/application/sql/list_application.sql index b7aa0fbe9ba..4a4cde55675 100644 --- a/apps/application/sql/list_application.sql +++ b/apps/application/sql/list_application.sql @@ -1,4 +1,4 @@ -SELECT *,to_json(dataset_setting) as dataset_setting,to_json(model_setting) as model_setting FROM ( SELECT * FROM application ${application_custom_sql} UNION +SELECT *,to_json(dataset_setting) as dataset_setting,to_json(model_setting) as model_setting,to_json(work_flow) as work_flow FROM ( SELECT * FROM application ${application_custom_sql} UNION SELECT * FROM diff --git a/apps/application/sql/list_application_chat.sql b/apps/application/sql/list_application_chat.sql index bf269d005b6..c9f83c6b7c3 100644 --- a/apps/application/sql/list_application_chat.sql +++ b/apps/application/sql/list_application_chat.sql @@ -1,5 +1,5 @@ SELECT - * + *,to_json(asker) as asker FROM application_chat application_chat LEFT JOIN ( @@ -11,6 +11,9 @@ FROM chat_id FROM application_chat_record + WHERE chat_id IN ( + SELECT id FROM application_chat ${inner_queryset}) GROUP BY application_chat_record.chat_id - ) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id \ No newline at end of file + ) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id +${default_queryset} \ No newline at end of file diff --git a/apps/application/sql/list_dataset_paragraph_by_paragraph_id.sql b/apps/application/sql/list_dataset_paragraph_by_paragraph_id.sql index 2bacd53e17a..803b6307ecb 100644 --- a/apps/application/sql/list_dataset_paragraph_by_paragraph_id.sql +++ b/apps/application/sql/list_dataset_paragraph_by_paragraph_id.sql @@ -2,6 +2,7 @@ SELECT paragraph.*, dataset."name" AS "dataset_name", "document"."name" AS "document_name", + "document"."meta" AS "meta", "document"."hit_handling_method" AS "hit_handling_method", "document"."directly_return_similarity" as "directly_return_similarity" FROM diff --git a/apps/application/swagger_api/application_api.py b/apps/application/swagger_api/application_api.py index 4bacc5831cd..a2f08f0eae8 100644 --- a/apps/application/swagger_api/application_api.py +++ b/apps/application/swagger_api/application_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class ApplicationApi(ApiMixin): @@ -20,7 +21,7 @@ def get_request_params_api(): in_=openapi.IN_FORM, type=openapi.TYPE_FILE, required=True, - description='上传文件') + description=_('Upload files')) ] class Authentication(ApiMixin): @@ -30,43 +31,75 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['access_token', ], properties={ - 'access_token': openapi.Schema(type=openapi.TYPE_STRING, title="应用认证token", - description="应用认证token"), + 'access_token': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Application authentication token"), + description=_("Application authentication token")), } ) + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_STRING, + title=_("Application authentication token"), + description=_("Application authentication token"), + default="token" + ) + @staticmethod def get_response_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, - required=['id', 'name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'user_id', 'status', 'create_time', + required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'user_id', 'status', 'create_time', 'update_time'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), - 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), - 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_("Primary key id")), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"), + description=_("Application Name")), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"), + description=_("Application Description")), + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"), description=_("Model id")), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, + title=_("Number of multi-round conversations"), + description=_("Number of multi-round conversations")), + 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"), + description=_("Opening remarks")), 'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="示例列表", description="示例列表"), - 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户", description="所属用户"), + title=_("Example List"), description=_("Example List")), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Affiliation user"), + description=_("Affiliation user")), - 'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否发布", description='是否发布'), + 'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is publish"), description=_('Is publish')), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description='创建时间'), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"), + description=_('Creation time')), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description='修改时间'), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"), + description=_('Modification time')), 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="关联知识库Id列表", - description="关联知识库Id列表(查询详情的时候返回)") + title=_("List of associated knowledge base IDs"), + description=_( + "List of associated knowledge base IDs (returned when querying details)")) } ) + class Model(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=False, + description=_('Model Type')), + ] + class ApiKey(ApiMixin): @staticmethod def get_request_params_api(): @@ -74,7 +107,7 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id') + description=_('Application ID')) ] @@ -85,12 +118,12 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='api_key_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用api_key id') + description=_('Application api_key id')) ] @staticmethod @@ -99,11 +132,33 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=[], properties={ - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活", - description="是否激活"), - 'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否允许跨域", - description="是否允许跨域"), - 'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title='跨域列表', + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"), + description=_("Is activation")), + 'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN, + title=_("Is cross-domain allowed"), + description=_("Is cross-domain allowed")), + 'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'), + items=openapi.Schema(type=openapi.TYPE_STRING)) + } + ) + + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"), + description=_("Primary key id")), + 'secret_key': openapi.Schema(type=openapi.TYPE_STRING, title=_("Secret key"), + description=_("Secret key")), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"), + description=_("Is activation")), + 'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"), + description=_("Application ID")), + 'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN, + title=_("Is cross-domain allowed"), + description=_("Is cross-domain allowed")), + 'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'), items=openapi.Schema(type=openapi.TYPE_STRING)) } ) @@ -115,7 +170,7 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id') + description=_('Application ID')) ] @@ -125,18 +180,55 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=[], properties={ - 'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重置Token", - description="重置Token"), + 'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"), + description=_("Reset Token")), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活", description="是否激活"), - 'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="访问次数", description="访问次数"), - 'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启白名单", - description="是否开启白名单"), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"), + description=_("Is activation")), + 'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"), + description=_("Number of visits")), + 'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"), + description=_("Whether to enable whitelist")), 'white_list': openapi.Schema(type=openapi.TYPE_ARRAY, - items=openapi.Schema(type=openapi.TYPE_STRING), title="白名单列表", - description="白名单列表"), - 'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否显示知识来源", - description="是否显示知识来源"), + items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"), + description=_("Whitelist")), + 'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN, + title=_("Whether to display knowledge sources"), + description=_("Whether to display knowledge sources")), + 'language': openapi.Schema(type=openapi.TYPE_STRING, + title=_("language"), + description=_("language")) + } + ) + + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"), + description=_("Primary key id")), + 'access_token': openapi.Schema(type=openapi.TYPE_STRING, title=_("Access Token"), + description=_("Access Token")), + 'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"), + description=_("Reset Token")), + + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"), + description=_("Is activation")), + 'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"), + description=_("Number of visits")), + 'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"), + description=_("Whether to enable whitelist")), + 'white_list': openapi.Schema(type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"), + description=_("Whitelist")), + 'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN, + title=_("Whether to display knowledge sources"), + description=_("Whether to display knowledge sources")), + 'language': openapi.Schema(type=openapi.TYPE_STRING, + title=_("language"), + description=_("language")) } ) @@ -147,21 +239,63 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=[], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), - 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), - 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"), + description=_("Application Name")), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"), + description=_("Application Description")), + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"), + description=_("Model id")), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, + title=_("Number of multi-round conversations"), + description=_("Number of multi-round conversations")), + 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"), + description=_("Opening remarks")), 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="关联知识库Id列表", description="关联知识库Id列表"), + title=_("List of associated knowledge base IDs"), + description=_("List of associated knowledge base IDs")), 'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(), 'model_setting': ApplicationApi.ModelSetting.get_request_body_api(), - 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化", - description="是否开启问题优化", default=True), + 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"), + description=_("Whether to enable problem optimization"), + default=True), 'icon': openapi.Schema(type=openapi.TYPE_STRING, title="icon", - description="icon", default="/ui/favicon.ico") + description="icon", default="/ui/favicon.ico"), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"), + description=_("Application Type SIMPLE | WORK_FLOW")), + 'work_flow': ApplicationApi.WorkFlow.get_request_body_api(), + 'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, + title=_('Question optimization tips'), + description=_("Question optimization tips"), + default=_( + "() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the tag")), + 'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"), + description=_("Text-to-speech model ID")), + 'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"), + description=_("Speech-to-text model id")), + 'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"), + description=_("Is speech-to-text enabled")), + 'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"), + description=_("Is text-to-speech enabled")), + 'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"), + description=_("Text-to-speech type")) + + } + ) + + class WorkFlow(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[''], + properties={ + 'nodes': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_OBJECT), + title=_("Node List"), description=_("Node List"), + default=[]), + 'edges': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_OBJECT), + title=_('Connection List'), description=_("Connection List"), + default=[]), } ) @@ -173,24 +307,31 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=[''], properties={ - 'top_n': openapi.Schema(type=openapi.TYPE_NUMBER, title="引用分段数", description="引用分段数", + 'top_n': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Reference segment number"), + description=_("Reference segment number"), default=5), - 'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title='相似度', description="相似度", + 'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('Similarity'), + description=_("Similarity"), default=0.6), - 'max_paragraph_char_number': openapi.Schema(type=openapi.TYPE_NUMBER, title='最多引用字符数', - description="最多引用字符数", default=3000), - 'search_mode': openapi.Schema(type=openapi.TYPE_STRING, title='检索模式', + 'max_paragraph_char_number': openapi.Schema(type=openapi.TYPE_NUMBER, + title=_('Maximum number of quoted characters'), + description=_("Maximum number of quoted characters"), + default=3000), + 'search_mode': openapi.Schema(type=openapi.TYPE_STRING, title=_('Retrieval Mode'), description="embedding|keywords|blend", default='embedding'), - 'no_references_setting': openapi.Schema(type=openapi.TYPE_OBJECT, title='检索模式', + 'no_references_setting': openapi.Schema(type=openapi.TYPE_OBJECT, + title=_('No reference segment settings'), required=['status', 'value'], properties={ 'status': openapi.Schema(type=openapi.TYPE_STRING, - title="状态", - description="ai作答:ai_questioning,指定回答:designated_answer", + title=_("state"), + description=_( + "ai_questioning|designated_answer"), default='ai_questioning'), 'value': openapi.Schema(type=openapi.TYPE_STRING, - title="值", - description="ai作答:就是题词,指定回答:就是指定回答内容", + title=_("value"), + description=_( + "ai_questioning: is the title, designated_answer: is the designated answer content"), default='{question}'), }), } @@ -203,19 +344,38 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['prompt'], properties={ - 'prompt': openapi.Schema(type=openapi.TYPE_STRING, title="提示词", description="提示词", - default=('已知信息:' - '\n{data}' - '\n回答要求:' - '\n- 如果你不知道答案或者没有从获取答案,请回答“没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作”。' - '\n- 避免提及你是从中获得的知识。' - '\n- 请保持答案与中描述的一致。' - '\n- 请使用markdown 语法优化答案的格式。' - '\n- 中的图片链接、链接地址和脚本语言请完整返回。' - '\n- 请使用与问题相同的语言来回答。' - '\n问题:' - '\n{question}')), + 'prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_("Prompt word"), + description=_("Prompt word"), + default=_(("Known information:\n" + "{data}\n" + "Answer requirements:\n" + "- If you don't know the answer or don't get the answer, please answer \"No relevant information found in the knowledge base, it is recommended to consult relevant technical support or refer to official documents for operation\".\n" + "- Avoid mentioning that you got the knowledge from .\n" + "- Please keep the answer consistent with the description in .\n" + "- Please use markdown syntax to optimize the format of the answer.\n" + "- Please return the image link, link address and script language in completely.\n" + "- Please answer in the same language as the question.\n" + "Question:\n" + "{question}"))), + 'system': openapi.Schema(type=openapi.TYPE_STRING, title=_("System prompt words (role)"), + description=_("System prompt words (role)")), + 'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING, + title=_("No citation segmentation prompt"), + default="{question}", + description=_("No citation segmentation prompt")) + + } + ) + + class Publish(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'work_flow': ApplicationApi.WorkFlow.get_request_body_api() } ) @@ -224,23 +384,97 @@ class Create(ApiMixin): def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, - required=['name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'dataset_setting', 'model_setting', - 'problem_optimization'], + required=['name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting', + 'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type', + 'work_flow'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), - 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), - 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"), + description=_("Application Name")), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"), + description=_("Application Description")), + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"), + description=_("Model id")), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, + title=_("Number of multi-round conversations"), + description=_("Number of multi-round conversations")), + 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"), + description=_("Opening remarks")), 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="关联知识库Id列表", description="关联知识库Id列表"), + title=_("List of associated knowledge base IDs"), + description=_("List of associated knowledge base IDs")), 'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(), 'model_setting': ApplicationApi.ModelSetting.get_request_body_api(), - 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化", - description="是否开启问题优化", default=True) + 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"), + description=_("Problem Optimization"), default=True), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"), + description=_("Application Type SIMPLE | WORK_FLOW")), + 'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, + title=_('Question optimization tips'), + description=_("Question optimization tips"), + default=_( + "() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the tag")), + 'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"), + description=_("Text-to-speech model ID")), + 'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"), + description=_("Speech-to-text model id")), + 'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"), + description=_("Is speech-to-text enabled")), + 'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"), + description=_("Is text-to-speech enabled")), + 'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"), + description=_("Text-to-speech type")), + 'work_flow': ApplicationApi.WorkFlow.get_request_body_api(), + } + ) + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting', + 'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type', + 'work_flow'], + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"), + description=_("Primary key id")), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"), + description=_("Application Name")), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"), + description=_("Application Description")), + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"), + description=_("Model id")), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, + title=_("Number of multi-round conversations"), + description=_("Number of multi-round conversations")), + 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"), + description=_("Opening remarks")), + 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_STRING), + title=_("List of associated knowledge base IDs"), + description=_("List of associated knowledge base IDs")), + 'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(), + 'model_setting': ApplicationApi.ModelSetting.get_request_body_api(), + 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"), + description=_("Problem Optimization"), default=True), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"), + description=_("Application Type SIMPLE | WORK_FLOW")), + 'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, + title=_('Question optimization tips'), + description=_("Question optimization tips"), + default=_( + "() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the tag")), + 'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"), + description=_("Text-to-speech model ID")), + 'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"), + description=_("Speech-to-text model id")), + 'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"), + description=_("Is speech-to-text enabled")), + 'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"), + description=_("Is text-to-speech enabled")), + 'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"), + description=_("Text-to-speech type")), + 'work_flow': ApplicationApi.WorkFlow.get_request_body_api(), } ) @@ -251,12 +485,33 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='应用名称'), + description=_('Application Name')), openapi.Parameter(name='desc', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='应用描述') + description=_('Application Description')) + ] + + class Export(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + + ] + + class Import(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='file', + in_=openapi.IN_FORM, + type=openapi.TYPE_FILE, + required=True, + description=_('Upload image files')) ] class Operate(ApiMixin): @@ -266,6 +521,28 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), ] + + class TextToSpeech(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + + ] + + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'text': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text"), + description=_("Text")), + } + ) diff --git a/apps/application/swagger_api/application_statistics_api.py b/apps/application/swagger_api/application_statistics_api.py index 87fde1098bb..7824a34c51d 100644 --- a/apps/application/swagger_api/application_statistics_api.py +++ b/apps/application/swagger_api/application_statistics_api.py @@ -9,7 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin - +from django.utils.translation import gettext_lazy as _ class ApplicationStatisticsApi(ApiMixin): @staticmethod @@ -18,17 +18,17 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='start_time', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='开始时间'), + description=_('Start time')), openapi.Parameter(name='end_time', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='结束时间'), + description=_('End time')), ] class ChatRecordAggregate(ApiMixin): @@ -38,21 +38,21 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['star_num', 'trample_num', 'tokens_num', 'chat_record_count'], properties={ - 'star_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="点赞数量", - description="点赞数量"), + 'star_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of Likes"), + description=_("Number of Likes")), - 'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="点踩数量", description="点踩数量"), - 'tokens_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="token使用数量", - description="token使用数量"), - 'chat_record_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="对话次数", - description="对话次数"), - 'customer_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="客户数量", - description="客户数量"), - 'customer_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="客户新增数量", - description="客户新增数量"), + 'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of thumbs-downs"), description=_("Number of thumbs-downs")), + 'tokens_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of tokens used"), + description=_("Number of tokens used")), + 'chat_record_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of conversations"), + description=_("Number of conversations")), + 'customer_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of customers"), + description=_("Number of customers")), + 'customer_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of new customers"), + description=_("Number of new customers")), 'day': openapi.Schema(type=openapi.TYPE_STRING, - title="日期", - description="日期,只有查询趋势的时候才有该字段"), + title=_("time"), + description=_("Time, this field is only available when querying trends")), } ) @@ -63,11 +63,11 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['added_count'], properties={ - 'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="新增数量", description="新增数量"), + 'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("New quantity"), description=_("New quantity")), 'day': openapi.Schema(type=openapi.TYPE_STRING, - title="时间", - description="时间"), + title=_("time"), + description=_("time")), } ) @@ -78,9 +78,9 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['added_count'], properties={ - 'today_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="今日新增数量", - description="今日新增数量"), - 'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="新增数量", description="新增数量"), + 'today_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Today's new quantity"), + description=_("Today's new quantity")), + 'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("New quantity"), description=_("New quantity")), } ) diff --git a/apps/application/swagger_api/application_version_api.py b/apps/application/swagger_api/application_version_api.py new file mode 100644 index 00000000000..d7edb9a7dd8 --- /dev/null +++ b/apps/application/swagger_api/application_version_api.py @@ -0,0 +1,73 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: application_version_api.py + @date:2024/10/15 17:18 + @desc: +""" +from drf_yasg import openapi + +from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ + + +class ApplicationVersionApi(ApiMixin): + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['id', 'name', 'work_flow', 'create_time', 'update_time'], + properties={ + 'id': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Primary key id"), + description=_("Primary key id")), + 'name': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Version Name"), + description=_("Version Name")), + 'work_flow': openapi.Schema(type=openapi.TYPE_STRING, title=_("Workflow data"), + description=_('Workflow data')), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"), + description=_('Creation time')), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"), + description=_('Modification time')) + } + ) + + class Query(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + openapi.Parameter(name='name', + in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=False, + description=_('Version Name'))] + + class Operate(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + openapi.Parameter(name='work_flow_version_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application version id')), ] + + class Edit(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Version Name"), + description=_("Version Name")) + } + ) diff --git a/apps/application/swagger_api/chat_api.py b/apps/application/swagger_api/chat_api.py index 29f60c3d0ca..c5fa614c964 100644 --- a/apps/application/swagger_api/chat_api.py +++ b/apps/application/swagger_api/chat_api.py @@ -10,6 +10,155 @@ from application.swagger_api.application_api import ApplicationApi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ + + +class ChatClientHistoryApi(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')) + ] + + class Operate(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + openapi.Parameter(name='chat_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Conversation ID')), + ] + + class ReAbstract(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['abstract'], + properties={ + 'abstract': openapi.Schema(type=openapi.TYPE_STRING, title=_("abstract"), + description=_("abstract")) + + } + ) + + +class OpenAIChatApi(ApiMixin): + @staticmethod + def get_response_body_api(): + return openapi.Responses(responses={ + 200: openapi.Response(description=_('response parameters'), + schema=openapi.Schema(type=openapi.TYPE_OBJECT, + required=['id', + 'choices'], + properties={ + 'id': openapi.Schema( + type=openapi.TYPE_STRING, + title=_( + "Conversation ID")), + 'choices': openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[ + 'message'], + properties={ + 'finish_reason': openapi.Schema( + type=openapi.TYPE_STRING, ), + 'index': openapi.Schema( + type=openapi.TYPE_INTEGER), + 'answer_list': openapi.Schema( + type=openapi.TYPE_ARRAY, + items=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[ + 'content'], + properties={ + 'content': openapi.Schema( + type=openapi.TYPE_STRING), + 'view_type': openapi.Schema( + type=openapi.TYPE_STRING), + 'runtime_node_id': openapi.Schema( + type=openapi.TYPE_STRING), + 'chat_record_id': openapi.Schema( + type=openapi.TYPE_STRING), + 'reasoning_content': openapi.Schema( + type=openapi.TYPE_STRING), + } + )), + 'message': openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[ + 'content'], + properties={ + 'content': openapi.Schema( + type=openapi.TYPE_STRING), + 'role': openapi.Schema( + type=openapi.TYPE_STRING) + + }), + + } + )), + 'created': openapi.Schema( + type=openapi.TYPE_INTEGER), + 'model': openapi.Schema( + type=openapi.TYPE_STRING), + 'object': openapi.Schema( + type=openapi.TYPE_STRING), + 'usage': openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[ + 'completion_tokens', + 'prompt_tokens', + 'total_tokens'], + properties={ + 'completion_tokens': openapi.Schema( + type=openapi.TYPE_INTEGER), + 'prompt_tokens': openapi.Schema( + type=openapi.TYPE_INTEGER), + 'total_tokens': openapi.Schema( + type=openapi.TYPE_INTEGER) + }) + + }))}) + + @staticmethod + def get_request_body_api(): + return openapi.Schema(type=openapi.TYPE_OBJECT, + required=['message'], + properties={ + 'messages': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("problem"), + description=_("problem"), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=['role', 'content'], + properties={ + 'content': openapi.Schema( + type=openapi.TYPE_STRING, + title=_("Question content"), + default=''), + 'role': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('role'), + default="user") + } + )), + 'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Conversation ID")), + 're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("regenerate"), + default=False), + 'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Stream Output"), + default=True) + + }) class ChatApi(ApiMixin): @@ -19,9 +168,68 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['message'], properties={ - 'message': openapi.Schema(type=openapi.TYPE_STRING, title="问题", description="问题"), - 're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重新生成", default=False), - 'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重新生成", default=True) + 'message': openapi.Schema(type=openapi.TYPE_STRING, title=_("problem"), description=_("problem")), + 're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("regenerate"), default=False), + 'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is it streaming output"), default=True), + + 'form_data': openapi.Schema(type=openapi.TYPE_OBJECT, title=_("Form data"), + description=_("Form data"), + default={}), + 'image_list': openapi.Schema( + type=openapi.TYPE_ARRAY, + title=_("Image list"), + description=_("Image list"), + items=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'name': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Image name")), + 'url': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Image URL")), + 'file_id': openapi.Schema(type=openapi.TYPE_STRING), + } + ), + default=[] + ), + 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Document list"), + description=_("Document list"), + items=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + # 定义对象的具体属性 + 'name': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Document name")), + 'url': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Document URL")), + 'file_id': openapi.Schema(type=openapi.TYPE_STRING), + } + ), + default=[]), + 'audio_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Audio list"), + description=_("Audio list"), + items=openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'name': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Audio name")), + 'url': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Audio URL")), + 'file_id': openapi.Schema(type=openapi.TYPE_STRING), + } + ), + default=[]), + 'runtime_node_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Runtime node id"), + description=_("Runtime node id"), + default=""), + 'node_data': openapi.Schema(type=openapi.TYPE_OBJECT, title=_("Node data"), + description=_("Node data"), + default={}), + 'chat_record_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Conversation record id"), + description=_("Conversation record id"), + default=""), + 'child_node': openapi.Schema(type=openapi.TYPE_STRING, title=_("Child node"), + description=_("Child node"), + default={}), } ) @@ -35,26 +243,26 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'application_id': openapi.Schema(type=openapi.TYPE_STRING, title="应用id", - description="应用id", default='应用id'), - 'abstract': openapi.Schema(type=openapi.TYPE_STRING, title="摘要", - description="摘要", default='摘要'), - 'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title="对话id", - description="对话id", default="对话id"), - 'chat_record_count': openapi.Schema(type=openapi.TYPE_STRING, title="对话提问数量", - description="对话提问数量", - default="对话提问数量"), - 'mark_sum': openapi.Schema(type=openapi.TYPE_STRING, title="标记数量", - description="标记数量", default=1), - 'star_num': openapi.Schema(type=openapi.TYPE_STRING, title="点赞数量", - description="点赞数量", default=1), - 'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="点踩数量", - description="点踩数量", default=1), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"), + description=_("Application ID"), default=_('Application ID')), + 'abstract': openapi.Schema(type=openapi.TYPE_STRING, title=_("abstract"), + description=_("abstract"), default=_('abstract')), + 'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Conversation ID"), + description=_("Conversation ID"), default=_("Conversation ID")), + 'chat_record_count': openapi.Schema(type=openapi.TYPE_STRING, title=_("Number of dialogue questions"), + description=_("Number of dialogue questions"), + default=0), + 'mark_sum': openapi.Schema(type=openapi.TYPE_STRING, title=_("Number of tags"), + description=_("Number of tags"), default=1), + 'star_num': openapi.Schema(type=openapi.TYPE_STRING, title=_("Number of likes"), + description=_("Number of likes"), default=1), + 'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of clicks"), + description=_("Number of clicks"), default=1), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Change time"), + description=_("Change time"), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"), + description=_("Creation time"), default="1970-01-01 00:00:00" ) } @@ -67,10 +275,21 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), ] + class OpenWorkFlowTemp(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'work_flow': ApplicationApi.WorkFlow.get_request_body_api() + } + ) + class OpenTempChat(ApiMixin): @staticmethod def get_request_body_api(): @@ -79,41 +298,64 @@ def get_request_body_api(): required=['model_id', 'multiple_rounds_dialogue', 'dataset_setting', 'model_setting', 'problem_optimization'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="应用id", - description="应用id,修改的时候传,创建的时候不传"), - 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"), + description=_( + "Application ID, pass when modifying, do not pass when creating")), + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model ID"), + description=_("Model ID")), 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="关联知识库Id列表", description="关联知识库Id列表"), - 'multiple_rounds_dialogue': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮会话", - description="是否开启多轮会话"), + title=_("List of associated knowledge base IDs"), + description=_("List of associated knowledge base IDs")), + 'multiple_rounds_dialogue': openapi.Schema(type=openapi.TYPE_BOOLEAN, + title=_("Do you want to initiate multiple sessions"), + description=_( + "Do you want to initiate multiple sessions")), 'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(), 'model_setting': ApplicationApi.ModelSetting.get_request_body_api(), - 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化", - description="是否开启问题优化", default=True) + 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem optimization"), + description=_("Do you want to enable problem optimization"), + default=True) } ) + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_STRING, + title=_("Conversation ID"), + description=_("Conversation ID"), + default="chat_id" + ) + @staticmethod def get_request_params_api(): return [openapi.Parameter(name='application_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='history_day', in_=openapi.IN_QUERY, type=openapi.TYPE_NUMBER, required=True, - description='历史天数'), + description=_('Historical days')), openapi.Parameter(name='abstract', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description="摘要"), + description=_("abstract")), openapi.Parameter(name='min_star', in_=openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False, - description="最小点赞数"), + description=_("Minimum number of likes")), openapi.Parameter(name='min_trample', in_=openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False, - description="最小点踩数"), + description=_("Minimum number of clicks")), openapi.Parameter(name='comparer', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description="or|and 比较器") + description=_("or|and comparator")), + openapi.Parameter(name='start_time', in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=True, + description=_('start time')), + openapi.Parameter(name='end_time', in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=True, + description=_('End time')), ] @@ -124,12 +366,17 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='chat_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='对话id'), + description=_('Conversation ID')), + openapi.Parameter(name='order_asc', + in_=openapi.IN_QUERY, + type=openapi.TYPE_BOOLEAN, + required=False, + description=_('Is it ascending order')), ] @staticmethod @@ -142,34 +389,39 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'chat': openapi.Schema(type=openapi.TYPE_STRING, title="会话日志id", - description="会话日志id", default='会话日志id'), - 'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title="投票状态", - description="投票状态", default="投票状态"), - 'dataset': openapi.Schema(type=openapi.TYPE_STRING, title="数据集id", description="数据集id", - default="数据集id"), - 'paragraph': openapi.Schema(type=openapi.TYPE_STRING, title="段落id", - description="段落id", default=1), - 'source_id': openapi.Schema(type=openapi.TYPE_STRING, title="资源id", - description="资源id", default=1), - 'source_type': openapi.Schema(type=openapi.TYPE_STRING, title="资源类型", - description="资源类型", default='xxx'), - 'message_tokens': openapi.Schema(type=openapi.TYPE_INTEGER, title="问题消耗token数量", - description="问题消耗token数量", default=0), - 'answer_tokens': openapi.Schema(type=openapi.TYPE_INTEGER, title="答案消耗token数量", - description="答案消耗token数量", default=0), - 'improve_paragraph_id_list': openapi.Schema(type=openapi.TYPE_STRING, title="改进标注列表", - description="改进标注列表", + 'chat': openapi.Schema(type=openapi.TYPE_STRING, title=_("Session log id"), + description=_("Conversation log id"), default=_('Conversation log id')), + 'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title=_("Voting Status"), + description=_("Voting Status"), default=_("Voting Status")), + 'dataset': openapi.Schema(type=openapi.TYPE_STRING, title=_("Dataset id"), description=_("Dataset id"), + default=_("Dataset id")), + 'paragraph': openapi.Schema(type=openapi.TYPE_STRING, title=_("Paragraph id"), + description=_("Paragraph id"), default=1), + 'source_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Resource ID"), + description=_("Resource ID"), default=1), + 'source_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Resource Type"), + description=_("Resource Type"), default='xxx'), + 'message_tokens': openapi.Schema(type=openapi.TYPE_INTEGER, + title=_("Number of tokens consumed by the question"), + description=_("Number of tokens consumed by the question"), default=0), + 'answer_tokens': openapi.Schema(type=openapi.TYPE_INTEGER, + title=_("The number of tokens consumed by the answer"), + description=_("The number of tokens consumed by the answer"), + default=0), + 'improve_paragraph_id_list': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Improved annotation list"), + description=_("Improved annotation list"), default=[]), - 'index': openapi.Schema(type=openapi.TYPE_STRING, title="对应会话 对应下标", - description="对应会话id对应下标", - default="对应会话id对应下标" + 'index': openapi.Schema(type=openapi.TYPE_STRING, + title=_("Corresponding session Corresponding subscript"), + description=_("Corresponding session id corresponding subscript"), + default=0 ), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"), + description=_("Modification time"), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"), + description=_("Creation time"), default="1970-01-01 00:00:00" ) } @@ -183,27 +435,27 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='chat_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='会话id'), + description=_('Conversation ID')), openapi.Parameter(name='chat_record_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='会话记录id'), + description=_('Conversation record id')), openapi.Parameter(name='dataset_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('Knowledge base id')), openapi.Parameter(name='document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id'), + description=_('Document id')), ] @staticmethod @@ -212,14 +464,46 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['content'], properties={ - 'title': openapi.Schema(type=openapi.TYPE_STRING, title="段落标题", - description="段落标题"), - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容", - description="段落内容") + 'title': openapi.Schema(type=openapi.TYPE_STRING, title=_("Section title"), + description=_("Section title")), + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_("Paragraph content"), + description=_("Paragraph content")) } ) + @staticmethod + def get_request_body_api_post(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['dataset_id', 'document_id', 'chat_ids'], + properties={ + 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Knowledge base id"), + description=_("Knowledge base id")), + 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Document id"), + description=_("Document id")), + 'chat_ids': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Conversation id list"), + description=_("Conversation id list"), + items=openapi.Schema(type=openapi.TYPE_STRING)) + + } + ) + + @staticmethod + def get_request_params_api_post(): + return [openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + openapi.Parameter(name='dataset_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Knowledge base id')), + + ] + class VoteApi(ApiMixin): @staticmethod @@ -228,17 +512,17 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='chat_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='会话id'), + description=_('Conversation ID')), openapi.Parameter(name='chat_record_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='会话记录id') + description=_('Conversation record id')) ] @staticmethod @@ -247,8 +531,8 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['vote_status'], properties={ - 'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title="投票状态", - description="-1:取消投票|0:赞同|1:反对"), + 'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title=_("Voting Status"), + description=_("-1: Cancel vote | 0: Agree | 1: Oppose")), } ) @@ -261,17 +545,17 @@ def get_request_body_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='应用id'), + description=_('Application ID')), openapi.Parameter(name='chat_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='会话id'), + description=_('Conversation ID')), openapi.Parameter(name='chat_record_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='会话记录id') + description=_('Conversation record id')) ] @staticmethod @@ -284,27 +568,28 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容", - description="段落内容", default='段落内容'), - 'title': openapi.Schema(type=openapi.TYPE_STRING, title="标题", - description="标题", default="xxx的描述"), - 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量", + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_("Paragraph content"), + description=_("Paragraph content"), default=_('Paragraph content')), + 'title': openapi.Schema(type=openapi.TYPE_STRING, title=_("title"), + description=_("title"), default=_("Description of xxx")), + 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_("Number of hits"), + description=_("Number of hits"), default=1), - 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点赞数量", - description="点赞数量", default=1), - 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点踩数量", - description="点踩数", default=1), - 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id", - description="知识库id", default='xxx'), - 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id", - description="文档id", default='xxx'), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", - description="是否可用", default=True), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_("Number of Likes"), + description=_("Number of Likes"), default=1), + 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_("Number of thumbs-downs"), + description=_("Number of thumbs-downs"), default=1), + 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Knowledge base id"), + description=_("Knowledge base id"), default='xxx'), + 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Document id"), + description=_("Document id"), default='xxx'), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Availability"), + description=_("Availability"), default=True), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"), + description=_("Modification time"), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"), + description=_("Creation time"), default="1970-01-01 00:00:00" ) } diff --git a/apps/application/task/__init__.py b/apps/application/task/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/application/template/embed.js b/apps/application/template/embed.js index 33545af47d6..168bc3b4ba7 100644 --- a/apps/application/template/embed.js +++ b/apps/application/template/embed.js @@ -1,3 +1,4 @@ +(function() { const guideHtml=`
@@ -19,54 +20,31 @@ const guideHtml=`
` const chatButtonHtml= -`
- - - - - - - - - - - - - - - - - - - - - - - - - - - +`
+
` - -const getChatContainerHtml=(protocol,host,token)=>{ + +const getChatContainerHtml=(protocol,host,token,query)=>{ return `
- -
+ +
+ +
- -
-
- - + + +
+
+ +
` } /** * 初始化引导 - * @param {*} root + * @param {*} root */ const initGuide=(root)=>{ root.insertAdjacentHTML("beforeend",guideHtml) @@ -84,16 +62,32 @@ const initChat=(root)=>{ // 添加对话icon root.insertAdjacentHTML("beforeend",chatButtonHtml) // 添加对话框 - root.insertAdjacentHTML('beforeend',getChatContainerHtml('{{protocol}}','{{host}}','{{token}}')) + root.insertAdjacentHTML('beforeend',getChatContainerHtml('{{protocol}}','{{host}}','{{token}}','{{query}}')) // 按钮元素 const chat_button=root.querySelector('.maxkb-chat-button') + const chat_button_img=root.querySelector('.maxkb-chat-button > img') // 对话框元素 const chat_container=root.querySelector('#maxkb-chat-container') + // 引导层 + const mask_content = root.querySelector('.maxkb-mask > .maxkb-content') + const mask_tips = root.querySelector('.maxkb-tips') + chat_button_img.onload=(event)=>{ + if(mask_content){ + mask_content.style.width = chat_button_img.width + 'px' + mask_content.style.height = chat_button_img.height + 'px' + if('{{x_type}}'=='left'){ + mask_tips.style.marginLeft = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px' + }else{ + mask_tips.style.marginRight = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px' + } + } + } const viewport=root.querySelector('.maxkb-openviewport') const closeviewport=root.querySelector('.maxkb-closeviewport') const close_func=()=>{ chat_container.style['display']=chat_container.style['display']=='block'?'none':'block' + chat_button.style['display']=chat_container.style['display']=='block'?'none':'block' } close_icon=chat_container.querySelector('.maxkb-chat-close') chat_button.onclick = close_func @@ -108,6 +102,26 @@ const initChat=(root)=>{ viewport.classList.add('maxkb-viewportnone') closeviewport.classList.remove('maxkb-viewportnone') } + } + const drag=(e)=>{ + if (['touchmove','touchstart'].includes(e.type)) { + chat_button.style.top=(e.touches[0].clientY-chat_button_img.naturalHeight/2)+'px' + chat_button.style.left=(e.touches[0].clientX-chat_button_img.naturalWidth/2)+'px' + } else { + chat_button.style.top=(e.y-chat_button_img.naturalHeight/2)+'px' + chat_button.style.left=(e.x-chat_button_img.naturalWidth/2)+'px' + } + chat_button.style.width =chat_button_img.naturalWidth+'px' + chat_button.style.height =chat_button_img.naturalHeight+'px' + } + if({{is_draggable}}){ + chat_button.addEventListener("drag",drag) + chat_button.addEventListener("dragover",(e)=>{ + e.preventDefault() + }) + chat_button.addEventListener("dragend",drag) + chat_button.addEventListener("touchstart",drag) + chat_button.addEventListener("touchmove",drag) } viewport.onclick=viewport_func closeviewport.onclick=viewport_func @@ -118,20 +132,21 @@ const initChat=(root)=>{ function initMaxkb(){ const maxkb=document.createElement('div') const root=document.createElement('div') - root.id="maxkb" - initMaxkbStyle(maxkb) + const maxkbId = 'maxkb-'+'{{max_kb_id}}' + root.id=maxkbId + initMaxkbStyle(maxkb, maxkbId) maxkb.appendChild(root) document.body.appendChild(maxkb) const maxkbMaskTip=localStorage.getItem('maxkbMaskTip') - if(maxkbMaskTip==null){ + if(maxkbMaskTip==null && {{show_guide}}){ initGuide(root) } initChat(root) } - + // 初始化全局样式 -function initMaxkbStyle(root){ +function initMaxkbStyle(root, maxkbId){ style=document.createElement('style') style.type='text/css' style.innerText= ` @@ -155,7 +170,7 @@ function initMaxkbStyle(root){ #maxkb .maxkb-mask { position: fixed; - z-index: 999; + z-index: 10001; background-color: transparent; height: 100%; width: 100%; @@ -163,25 +178,24 @@ function initMaxkbStyle(root){ left: 0; } #maxkb .maxkb-mask .maxkb-content { - width: 45px; - height: 48px; - box-shadow: 1px 1px 1px 2000px rgba(0,0,0,.6); - border-radius: 50% 0 0 50%; + width: 64px; + height: 64px; + box-shadow: 1px 1px 1px 9999px rgba(0,0,0,.6); position: absolute; - right: 0; - bottom: 38px; - z-index: 1000; + {{x_type}}: {{x_value}}px; + {{y_type}}: {{y_value}}px; + z-index: 10001; } #maxkb .maxkb-tips { position: fixed; - bottom: 30px; - right: 60px; + {{x_type}}:calc({{x_value}}px + 75px); + {{y_type}}: calc({{y_value}}px + 0px); padding: 22px 24px 24px; border-radius: 6px; color: #ffffff; font-size: 14px; background: #3370FF; - z-index: 1000; + z-index: 10001; } #maxkb .maxkb-tips .maxkb-arrow { position: absolute; @@ -192,8 +206,8 @@ function initMaxkbStyle(root){ transform: rotate(45deg); box-sizing: border-box; /* left */ - right: -5px; - bottom: 33px; + {{x_type}}: -5px; + {{y_type}}: 33px; border-left-color: transparent; border-bottom-color: transparent } @@ -226,7 +240,7 @@ function initMaxkbStyle(root){ } #maxkb-chat-container { - width: 420px; + width: 450px; height: 600px; display:none; } @@ -240,9 +254,10 @@ function initMaxkbStyle(root){ #maxkb .maxkb-chat-button{ position: fixed; - bottom: 30px; - right: 0; + {{x_type}}: {{x_value}}px; + {{y_type}}: {{y_value}}px; cursor: pointer; + z-index:10000; } #maxkb #maxkb-chat-container{ z-index:10000;position: relative; @@ -250,15 +265,16 @@ function initMaxkbStyle(root){ border: 1px solid #ffffff; background: linear-gradient(188deg, rgba(235, 241, 255, 0.20) 39.6%, rgba(231, 249, 255, 0.20) 94.3%), #EFF0F1; box-shadow: 0px 4px 8px 0px rgba(31, 35, 41, 0.10); - position: fixed;bottom: 20px;right: 45px;overflow: hidden; + position: fixed;bottom: 16px;right: 16px;overflow: hidden; } #maxkb #maxkb-chat-container .maxkb-operate{ - top: 15px; - right: 10px; + top: 18px; + right: 15px; position: absolute; display: flex; align-items: center; + line-height: 18px; } #maxkb #maxkb-chat-container .maxkb-operate .maxkb-chat-close{ margin-left:15px; @@ -292,6 +308,7 @@ function initMaxkbStyle(root){ height: 600px; } }` + .replaceAll('#maxkb ',`#${maxkbId} `) root.appendChild(style) } @@ -304,4 +321,5 @@ function embedChatbot() { initMaxkb() } else console.error('invalid parameter') } -window.onload = embedChatbot +window.addEventListener('load',embedChatbot) +})(); \ No newline at end of file diff --git a/apps/application/urls.py b/apps/application/urls.py index 30866c81a22..b294289541e 100644 --- a/apps/application/urls.py +++ b/apps/application/urls.py @@ -5,10 +5,14 @@ app_name = "application" urlpatterns = [ path('application', views.Application.as_view(), name="application"), - path('application/profile', views.Application.Profile.as_view()), + path('application/import', views.Application.Import.as_view()), + path('application/profile', views.Application.Profile.as_view(), name='application/profile'), path('application/embed', views.Application.Embed.as_view()), path('application/authentication', views.Application.Authentication.as_view()), + path('application/mcp_servers', views.Application.McpServers.as_view()), + path('application//publish', views.Application.Publish.as_view()), path('application//edit_icon', views.Application.EditIcon.as_view()), + path('application//export', views.Application.Export.as_view()), path('application//statistics/customer_count', views.ApplicationStatistics.CustomerCount.as_view()), path('application//statistics/customer_count_trend', @@ -18,6 +22,14 @@ path('application//statistics/chat_record_aggregate_trend', views.ApplicationStatistics.ChatRecordAggregateTrend.as_view()), path('application//model', views.Application.Model.as_view()), + path('application//function_lib', views.Application.FunctionLib.as_view()), + path('application//function_lib/', + views.Application.FunctionLib.Operate.as_view()), + path('application//application', views.Application.Application.as_view()), + path('application//application/', + views.Application.Application.Operate.as_view()), + path('application//model_params_form/', + views.Application.ModelParamsForm.as_view()), path('application//hit_test', views.Application.HitTest.as_view()), path('application//api_key', views.Application.ApplicationKey.as_view()), path("application//api_key/", @@ -28,13 +40,21 @@ path('application//access_token', views.Application.AccessToken.as_view(), name='application/access_token'), path('application//', views.Application.Page.as_view(), name='application_page'), - path('application//chat/open', views.ChatView.Open.as_view()), + path('application//chat/open', views.ChatView.Open.as_view(), name='application/open'), path("application/chat/open", views.ChatView.OpenTemp.as_view()), + path("application/chat_workflow/open", views.ChatView.OpenWorkFlowTemp.as_view()), + path("application//chat/client//", + views.ChatView.ClientChatHistoryPage.as_view()), + path("application//chat/client/", + views.ChatView.ClientChatHistoryPage.Operate.as_view()), path('application//chat/export', views.ChatView.Export.as_view(), name='export'), + path('application//chat/completions', views.Openai.as_view(), + name='application/chat_completions'), path('application//chat', views.ChatView.as_view(), name='chats'), path('application//chat//', views.ChatView.Page.as_view()), path('application//chat/', views.ChatView.Operate.as_view()), path('application//chat//chat_record/', views.ChatView.ChatRecord.as_view()), + path('application//chat//upload_file', views.ChatView.UploadFile.as_view()), path('application//chat//chat_record//', views.ChatView.ChatRecord.Page.as_view()), path('application//chat//chat_record/', @@ -46,11 +66,27 @@ 'application//chat//chat_record//dataset//document_id//improve', views.ChatView.ChatRecord.Improve.as_view(), name=''), + path( + 'application//dataset//improve', + views.ChatView.ChatRecord.Improve.as_view(), + name=''), path('application//chat//chat_record//improve', views.ChatView.ChatRecord.ChatRecordImprove.as_view()), - path('application/chat_message/', views.ChatView.Message.as_view()), + path('application/chat_message/', views.ChatView.Message.as_view(), name='application/message'), path( 'application//chat//chat_record//dataset//document_id//improve/', views.ChatView.ChatRecord.Improve.Operate.as_view(), - name='') + name=''), + path('application//speech_to_text', views.Application.SpeechToText.as_view(), + name='application/audio'), + path('application//text_to_speech', views.Application.TextToSpeech.as_view(), + name='application/audio'), + path('application//work_flow_version', views.ApplicationVersionView.as_view()), + path('application//work_flow_version//', + views.ApplicationVersionView.Page.as_view()), + path('application//work_flow_version/', + views.ApplicationVersionView.Operate.as_view()), + path('application//play_demo_text', views.Application.PlayDemoText.as_view(), + name='application/audio') + ] diff --git a/apps/application/views/__init__.py b/apps/application/views/__init__.py index 52d004041eb..24569c17e3b 100644 --- a/apps/application/views/__init__.py +++ b/apps/application/views/__init__.py @@ -8,3 +8,4 @@ """ from .application_views import * from .chat_views import * +from .application_version_views import * diff --git a/apps/application/views/application_version_views.py b/apps/application/views/application_version_views.py new file mode 100644 index 00000000000..de900936268 --- /dev/null +++ b/apps/application/views/application_version_views.py @@ -0,0 +1,94 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: application_version_views.py + @date:2024/10/15 16:49 + @desc: +""" +from drf_yasg.utils import swagger_auto_schema +from rest_framework.decorators import action +from rest_framework.request import Request +from rest_framework.views import APIView + +from application.serializers.application_version_serializers import ApplicationVersionSerializer +from application.swagger_api.application_version_api import ApplicationVersionApi +from application.views import get_application_operation_object +from common.auth import has_permissions, TokenAuth +from common.constants.permission_constants import PermissionConstants, CompareConstants, ViewPermission, RoleConstants, \ + Permission, Group, Operate +from common.log.log import log +from common.response import result +from django.utils.translation import gettext_lazy as _ + + +class ApplicationVersionView(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Get the application list"), + operation_id=_("Get the application list"), + manual_parameters=ApplicationVersionApi.Query.get_request_params_api(), + responses=result.get_api_array_response(ApplicationVersionApi.get_response_body_api()), + tags=[_('Application/Version')]) + @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND) + def get(self, request: Request, application_id: str): + return result.success( + ApplicationVersionSerializer.Query( + data={'name': request.query_params.get('name'), 'user_id': request.user.id, + 'application_id': application_id}).list()) + + class Page(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Get the list of application versions by page"), + operation_id=_("Get the list of application versions by page"), + manual_parameters=result.get_page_request_params( + ApplicationVersionApi.Query.get_request_params_api()), + responses=result.get_page_api_response(ApplicationVersionApi.get_response_body_api()), + tags=[_('Application/Version')]) + @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND) + def get(self, request: Request, application_id: str, current_page: int, page_size: int): + return result.success( + ApplicationVersionSerializer.Query( + data={'name': request.query_params.get('name'), 'user_id': request.user, + 'application_id': application_id}).page( + current_page, page_size)) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Get application version details"), + operation_id=_("Get application version details"), + manual_parameters=ApplicationVersionApi.Operate.get_request_params_api(), + responses=result.get_api_response(ApplicationVersionApi.get_response_body_api()), + tags=[_('Application/Version')]) + @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND) + def get(self, request: Request, application_id: str, work_flow_version_id: str): + return result.success( + ApplicationVersionSerializer.Operate( + data={'user_id': request.user, + 'application_id': application_id, 'work_flow_version_id': work_flow_version_id}).one()) + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_("Modify application version information"), + operation_id=_("Modify application version information"), + manual_parameters=ApplicationVersionApi.Operate.get_request_params_api(), + request_body=ApplicationVersionApi.Edit.get_request_body_api(), + responses=result.get_api_response(ApplicationVersionApi.get_response_body_api()), + tags=[_('Application/Version')]) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + @log(menu='Application', operate="Modify application version information", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def put(self, request: Request, application_id: str, work_flow_version_id: str): + return result.success( + ApplicationVersionSerializer.Operate( + data={'application_id': application_id, 'work_flow_version_id': work_flow_version_id, + 'user_id': request.user.id}).edit( + request.data)) diff --git a/apps/application/views/application_views.py b/apps/application/views/application_views.py index 3ebed08995c..2628644f1d1 100644 --- a/apps/application/views/application_views.py +++ b/apps/application/views/application_views.py @@ -7,8 +7,11 @@ @desc: """ +from django.core import cache from django.http import HttpResponse +from django.utils.translation import gettext_lazy as _, gettext from drf_yasg.utils import swagger_auto_schema +from langchain_core.prompts import PromptTemplate from rest_framework.decorators import action from rest_framework.parsers import MultiPartParser from rest_framework.request import Request @@ -18,24 +21,28 @@ from application.serializers.application_statistics_serializers import ApplicationStatisticsSerializer from application.swagger_api.application_api import ApplicationApi from application.swagger_api.application_statistics_api import ApplicationStatisticsApi +from application.views.common import get_application_operation_object from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import CompareConstants, PermissionConstants, Permission, Group, Operate, \ ViewPermission, RoleConstants from common.exception.app_exception import AppAuthenticationFailed +from common.log.log import log from common.response import result from common.swagger_api.common_api import CommonApi from common.util.common import query_params_to_single_dict from dataset.serializers.dataset_serializers import DataSetSerializers +chat_cache = cache.caches['chat_cache'] + class ApplicationStatistics(APIView): class CustomerCount(APIView): authentication_classes = [TokenAuth] @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="用户统计", - operation_id="用户统计", - tags=["应用/统计"], + @swagger_auto_schema(operation_summary=_("User Statistics"), + operation_id=_("User Statistics"), + tags=[_("Application/Statistics")], manual_parameters=ApplicationStatisticsApi.get_request_params_api(), responses=result.get_api_response( ApplicationStatisticsApi.CustomerCount.get_response_body_api()) @@ -58,9 +65,9 @@ class CustomerCountTrend(APIView): authentication_classes = [TokenAuth] @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="用户统计趋势", - operation_id="用户统计趋势", - tags=["应用/统计"], + @swagger_auto_schema(operation_summary=_("User demographic trends"), + operation_id=_("User demographic trends"), + tags=[_("Application/Statistics")], manual_parameters=ApplicationStatisticsApi.get_request_params_api(), responses=result.get_api_array_response( ApplicationStatisticsApi.CustomerCountTrend.get_response_body_api())) @@ -82,9 +89,9 @@ class ChatRecordAggregate(APIView): authentication_classes = [TokenAuth] @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="对话相关统计", - operation_id="对话相关统计", - tags=["应用/统计"], + @swagger_auto_schema(operation_summary=_("Conversation statistics"), + operation_id=_("Conversation statistics"), + tags=[_("Application/Statistics")], manual_parameters=ApplicationStatisticsApi.get_request_params_api(), responses=result.get_api_response( ApplicationStatisticsApi.ChatRecordAggregate.get_response_body_api()) @@ -107,9 +114,9 @@ class ChatRecordAggregateTrend(APIView): authentication_classes = [TokenAuth] @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="对话相关统计趋势", - operation_id="对话相关统计趋势", - tags=["应用/统计"], + @swagger_auto_schema(operation_summary=_("Dialogue-related statistical trends"), + operation_id=_("Dialogue-related statistical trends"), + tags=[_("Application/Statistics")], manual_parameters=ApplicationStatisticsApi.get_request_params_api(), responses=result.get_api_array_response( ApplicationStatisticsApi.ChatRecordAggregate.get_response_body_api()) @@ -137,9 +144,9 @@ class EditIcon(APIView): parser_classes = [MultiPartParser] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改应用icon", - operation_id="修改应用icon", - tags=['应用'], + @swagger_auto_schema(operation_summary=_("Modify application icon"), + operation_id=_("Modify application icon"), + tags=[_('Application')], manual_parameters=ApplicationApi.EditApplicationIcon.get_request_params_api(), request_body=ApplicationApi.Operate.get_request_body_api()) @has_permissions(ViewPermission( @@ -148,31 +155,137 @@ class EditIcon(APIView): dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND), PermissionConstants.APPLICATION_EDIT, compare=CompareConstants.AND) + @log(menu='Application', operate="Modify application icon", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def put(self, request: Request, application_id: str): return result.success( ApplicationSerializer.IconOperate( data={'application_id': application_id, 'user_id': request.user.id, 'image': request.FILES.get('file')}).edit(request.data)) + class Import(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods="POST", detail=False) + @swagger_auto_schema(operation_summary=_("Import Application"), operation_id=_("Import Application"), + manual_parameters=ApplicationApi.Import.get_request_params_api(), + tags=[_("Application")] + ) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Application', operate="Import Application") + def post(self, request: Request): + return result.success(ApplicationSerializer.Import( + data={'user_id': request.user.id, 'file': request.FILES.get('file')}).import_()) + + class Export(APIView): + authentication_classes = [TokenAuth] + + @action(methods="GET", detail=False) + @swagger_auto_schema(operation_summary=_("Export Application"), operation_id=_("Export Application"), + manual_parameters=ApplicationApi.Export.get_request_params_api(), + tags=[_("Application")] + ) + @has_permissions(lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, + dynamic_tag=keywords.get('application_id'))) + @log(menu='Application', operate="Export Application", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def get(self, request: Request, application_id: str): + return ApplicationSerializer.Operate( + data={'application_id': application_id, 'user_id': request.user.id}).export() + class Embed(APIView): @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="获取嵌入js", - operation_id="获取嵌入js", - tags=["应用"], + @swagger_auto_schema(operation_summary=_("Get embedded js"), + operation_id=_("Get embedded js"), + tags=[_("Application")], manual_parameters=ApplicationApi.ApiKey.get_request_params_api()) def get(self, request: Request): return ApplicationSerializer.Embed( data={'protocol': request.query_params.get('protocol'), 'token': request.query_params.get('token'), - 'host': request.query_params.get('host'), }).get_embed() + 'host': request.query_params.get('host'), }).get_embed(params=request.query_params) class Model(APIView): authentication_classes = [TokenAuth] @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="获取模型列表", - operation_id="获取模型列表", - tags=["应用"], - manual_parameters=ApplicationApi.ApiKey.get_request_params_api()) + @swagger_auto_schema(operation_summary=_("Get a list of models"), + operation_id=_("Get a list of models"), + tags=[_("Application")], + manual_parameters=ApplicationApi.Model.get_request_params_api()) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + def get(self, request: Request, application_id: str): + return result.success( + ApplicationSerializer.Operate( + data={'application_id': application_id, + 'user_id': request.user.id}).list_model(request.query_params.get('model_type'))) + + class ModelParamsForm(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Get model parameter form"), + operation_id=_("Get model parameter form"), + tags=[_("Application")]) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + def get(self, request: Request, application_id: str, model_id: str): + return result.success( + ApplicationSerializer.Operate( + data={'application_id': application_id, + 'user_id': request.user.id}).get_model_params_form(model_id)) + + class FunctionLib(APIView): + authentication_classes = [TokenAuth] + + @action(methods=["GET"], detail=False) + @swagger_auto_schema(operation_summary=_("Get a list of function libraries"), + operation_id=_("Get a list of function libraries"), + tags=[_("Application")]) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + def get(self, request: Request, application_id: str): + return result.success( + ApplicationSerializer.Operate( + data={'application_id': application_id, + 'user_id': request.user.id}).list_function_lib()) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @action(methods=["GET"], detail=False) + @swagger_auto_schema(operation_summary=_("Get library details"), + operation_id=_("Get library details"), + tags=[_("Application")], + ) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + def get(self, request: Request, application_id: str, function_lib_id: str): + return result.success( + ApplicationSerializer.Operate( + data={'application_id': application_id, + 'user_id': request.user.id}).get_function_lib(function_lib_id)) + + class Application(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Get the list of apps created by the current user"), + operation_id=_("Get the list of apps created by the current user"), + tags=[_("Application/Chat")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, @@ -182,45 +295,65 @@ def get(self, request: Request, application_id: str): return result.success( ApplicationSerializer.Operate( data={'application_id': application_id, - 'user_id': request.user.id}).list_model()) + 'user_id': request.user.id}).application_list()) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @action(methods=["GET"], detail=False) + @swagger_auto_schema(operation_summary=_("Get application data"), + operation_id=_("Get application data"), + tags=[_("Application")], + ) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + def get(self, request: Request, application_id: str, app_id: str): + return result.success( + ApplicationSerializer.Operate( + data={'application_id': application_id, + 'user_id': request.user.id}).get_application(app_id)) class Profile(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取应用相关信息", - operation_id="获取应用相关信息", - tags=["应用/会话"]) + @swagger_auto_schema(operation_summary=_("Get application related information"), + operation_id=_("Get application related information"), + tags=[_("Application/Chat")]) def get(self, request: Request): if 'application_id' in request.auth.keywords: return result.success(ApplicationSerializer.Operate( data={'application_id': request.auth.keywords.get('application_id'), 'user_id': request.user.id}).profile()) - else: - raise AppAuthenticationFailed(401, "身份异常") + raise AppAuthenticationFailed(401, "身份异常") class ApplicationKey(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="新增ApiKey", - operation_id="新增ApiKey", - tags=['应用/API_KEY'], + @swagger_auto_schema(operation_summary=_("Add ApiKey"), + operation_id=_("Add ApiKey"), + tags=[_('Application/API_KEY')], manual_parameters=ApplicationApi.ApiKey.get_request_params_api()) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND)) + @log(menu='Application', operate="Add ApiKey", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def post(self, request: Request, application_id: str): return result.success( ApplicationSerializer.ApplicationKeySerializer( data={'application_id': application_id, 'user_id': request.user.id}).generate()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取应用API_KEY列表", - operation_id="获取应用API_KEY列表", - tags=['应用/API_KEY'], + @swagger_auto_schema(operation_summary=_("Get the application API_KEY list"), + operation_id=_("Get the application API_KEY list"), + tags=[_('Application/API_KEY')], manual_parameters=ApplicationApi.ApiKey.get_request_params_api() ) @has_permissions(ViewPermission( @@ -236,17 +369,20 @@ class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改应用API_KEY", - operation_id="修改应用API_KEY", - tags=['应用/API_KEY'], + @swagger_auto_schema(operation_summary=_("Modify application API_KEY"), + operation_id=_("Modify application API_KEY"), + tags=[_('Application/API_KEY')], manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api(), - request_body=ApplicationApi.ApiKey.Operate.get_request_body_api()) + request_body=ApplicationApi.ApiKey.Operate.get_request_body_api(), + responses=result.get_api_response(ApplicationApi.ApiKey.Operate.get_response_body_api())) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND), PermissionConstants.APPLICATION_EDIT, compare=CompareConstants.AND) + @log(menu='Application', operate="Modify application API_KEY", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def put(self, request: Request, application_id: str, api_key_id: str): return result.success( ApplicationSerializer.ApplicationKeySerializer.Operate( @@ -254,9 +390,9 @@ def put(self, request: Request, application_id: str, api_key_id: str): 'api_key_id': api_key_id}).edit(request.data)) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除应用API_KEY", - operation_id="删除应用API_KEY", - tags=['应用/API_KEY'], + @swagger_auto_schema(operation_summary=_("Delete Application API_KEY"), + operation_id=_("Delete Application API_KEY"), + tags=[_('Application/API_KEY')], manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api()) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], @@ -264,6 +400,8 @@ def put(self, request: Request, application_id: str, api_key_id: str): dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND), PermissionConstants.APPLICATION_DELETE, compare=CompareConstants.AND) + @log(menu='Application', operate="Delete Application API_KEY", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def delete(self, request: Request, application_id: str, api_key_id: str): return result.success( ApplicationSerializer.ApplicationKeySerializer.Operate( @@ -274,25 +412,29 @@ class AccessToken(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改 应用AccessToken", - operation_id="修改 应用AccessToken", - tags=['应用/公开访问'], + @swagger_auto_schema(operation_summary=_("Modify Application AccessToken"), + operation_id=_("Modify Application AccessToken"), + tags=[_('Application/Public Access')], manual_parameters=ApplicationApi.AccessToken.get_request_params_api(), - request_body=ApplicationApi.AccessToken.get_request_body_api()) + request_body=ApplicationApi.AccessToken.get_request_body_api(), + responses=result.get_api_response(ApplicationApi.AccessToken.get_response_body_api())) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND)) + @log(menu='Application', operate="Modify Application AccessToken", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def put(self, request: Request, application_id: str): return result.success( - ApplicationSerializer.AccessTokenSerializer(data={'application_id': application_id}).edit(request.data)) + ApplicationSerializer.AccessTokenSerializer(data={'application_id': application_id}).edit( + request.data)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取应用 AccessToken信息", - operation_id="获取应用 AccessToken信息", + @swagger_auto_schema(operation_summary=_("Get the application AccessToken information"), + operation_id=_("Get the application AccessToken information"), manual_parameters=ApplicationApi.AccessToken.get_request_params_api(), - tags=['应用/公开访问'], + tags=[_('Application/Public Access')], ) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], @@ -306,19 +448,23 @@ def get(self, request: Request, application_id: str): class Authentication(APIView): @action(methods=['OPTIONS'], detail=False) def options(self, request, *args, **kwargs): - return HttpResponse(headers={"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true", - "Access-Control-Allow-Methods": "POST", - "Access-Control-Allow-Headers": "Origin,Content-Type,Cookie,Accept,Token"}, ) + return HttpResponse( + headers={"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "Origin,Content-Type,Cookie,Accept,Token"}, ) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="应用认证", - operation_id="应用认证", + @swagger_auto_schema(operation_summary=_("Application Certification"), + operation_id=_("Application Certification"), request_body=ApplicationApi.Authentication.get_request_body_api(), - tags=["应用/认证"], + responses=result.get_api_response(ApplicationApi.Authentication.get_response_body_api()), + tags=[_("Application/Certification")], security=[]) def post(self, request: Request): return result.success( - ApplicationSerializer.Authentication(data={'access_token': request.data.get("access_token")}).auth( + ApplicationSerializer.Authentication(data={'access_token': request.data.get("access_token"), + 'authentication_value': request.data.get( + 'authentication_value')}).auth( request), headers={"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true", "Access-Control-Allow-Methods": "POST", @@ -326,21 +472,23 @@ def post(self, request: Request): ) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建应用", - operation_id="创建应用", + @swagger_auto_schema(operation_summary=_("Create an application"), + operation_id=_("Create an application"), request_body=ApplicationApi.Create.get_request_body_api(), - tags=['应用']) + responses=result.get_api_response(ApplicationApi.Create.get_response_body_api()), + tags=[_('Application')]) @has_permissions(PermissionConstants.APPLICATION_CREATE, compare=CompareConstants.AND) + @log(menu='Application', operate="Create an application", + get_operation_object=lambda r, k: {'name': r.data.get('name')}) def post(self, request: Request): - ApplicationSerializer.Create(data={'user_id': request.user.id}).insert(request.data) - return result.success(True) + return result.success(ApplicationSerializer.Create(data={'user_id': request.user.id}).insert(request.data)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取应用列表", - operation_id="获取应用列表", + @swagger_auto_schema(operation_summary=_("Get the application list"), + operation_id=_("Get the application list"), manual_parameters=ApplicationApi.Query.get_request_params_api(), responses=result.get_api_array_response(ApplicationApi.get_response_body_api()), - tags=['应用']) + tags=[_('Application')]) @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND) def get(self, request: Request): return result.success( @@ -351,10 +499,10 @@ class HitTest(APIView): authentication_classes = [TokenAuth] @action(methods="GET", detail=False) - @swagger_auto_schema(operation_summary="命中测试列表", operation_id="命中测试列表", + @swagger_auto_schema(operation_summary=_("Hit Test List"), operation_id=_("Hit Test List"), manual_parameters=CommonApi.HitTestApi.get_request_params_api(), responses=result.get_api_array_response(CommonApi.HitTestApi.get_response_body_api()), - tags=["应用"]) + tags=[_("Application")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.APPLICATION_KEY], @@ -367,18 +515,41 @@ def get(self, request: Request, application_id: str): "query_text": request.query_params.get("query_text"), "top_number": request.query_params.get("top_number"), 'similarity': request.query_params.get('similarity'), - 'search_mode': request.query_params.get('search_mode')}).hit_test( + 'search_mode': request.query_params.get( + 'search_mode')}).hit_test( )) + class Publish(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_("Publishing an application"), + operation_id=_("Publishing an application"), + manual_parameters=ApplicationApi.Operate.get_request_params_api(), + request_body=ApplicationApi.Publish.get_request_body_api(), + responses=result.get_default_response(), + tags=[_('Application')]) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND)) + @log(menu='Application', operate="Publishing an application", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def put(self, request: Request, application_id: str): + return result.success( + ApplicationSerializer.Operate( + data={'application_id': application_id, 'user_id': request.user.id}).publish(request.data)) + class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除应用", - operation_id="删除应用", + @swagger_auto_schema(operation_summary=_("Deleting application"), + operation_id=_("Deleting application"), manual_parameters=ApplicationApi.Operate.get_request_params_api(), responses=result.get_default_response(), - tags=['应用']) + tags=[_('Application')]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, @@ -386,34 +557,39 @@ class Operate(APIView): compare=CompareConstants.AND), lambda r, k: Permission(group=Group.APPLICATION, operate=Operate.DELETE, dynamic_tag=k.get('application_id')), compare=CompareConstants.AND) + @log(menu='Application', operate="Deleting application", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def delete(self, request: Request, application_id: str): return result.success(ApplicationSerializer.Operate( data={'application_id': application_id, 'user_id': request.user.id}).delete( with_valid=True)) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改应用", - operation_id="修改应用", + @swagger_auto_schema(operation_summary=_("Modify the application"), + operation_id=_("Modify the application"), manual_parameters=ApplicationApi.Operate.get_request_params_api(), request_body=ApplicationApi.Edit.get_request_body_api(), responses=result.get_api_array_response(ApplicationApi.get_response_body_api()), - tags=['应用']) + tags=[_('Application')]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND)) + @log(menu='Application', operate="Modify the application", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def put(self, request: Request, application_id: str): return result.success( - ApplicationSerializer.Operate(data={'application_id': application_id, 'user_id': request.user.id}).edit( + ApplicationSerializer.Operate( + data={'application_id': application_id, 'user_id': request.user.id}).edit( request.data)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取应用详情", - operation_id="获取应用详情", + @swagger_auto_schema(operation_summary=_("Get application details"), + operation_id=_("Get application details"), manual_parameters=ApplicationApi.Operate.get_request_params_api(), responses=result.get_api_array_response(ApplicationApi.get_response_body_api()), - tags=['应用']) + tags=[_('Application')]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.APPLICATION_KEY], @@ -428,14 +604,17 @@ class ListApplicationDataSet(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取当前应用可使用的知识库", - operation_id="获取当前应用可使用的知识库", + @swagger_auto_schema(operation_summary=_("Get the knowledge base available to the current application"), + operation_id=_("Get the knowledge base available to the current application"), manual_parameters=ApplicationApi.Operate.get_request_params_api(), - responses=result.get_api_array_response(DataSetSerializers.Query.get_response_body_api()), - tags=['应用']) + responses=result.get_api_array_response( + DataSetSerializers.Query.get_response_body_api()), + tags=[_('Application')]) @has_permissions(ViewPermission([RoleConstants.ADMIN, RoleConstants.USER], - [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, - dynamic_tag=keywords.get('application_id'))], + [lambda r, keywords: Permission(group=Group.APPLICATION, + operate=Operate.USE, + dynamic_tag=keywords.get( + 'application_id'))], compare=CompareConstants.AND)) def get(self, request: Request, application_id: str): return result.success(ApplicationSerializer.Operate( @@ -445,15 +624,83 @@ class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="分页获取应用列表", - operation_id="分页获取应用列表", + @swagger_auto_schema(operation_summary=_("Get the application list by page"), + operation_id=_("Get the application list by page"), manual_parameters=result.get_page_request_params( ApplicationApi.Query.get_request_params_api()), responses=result.get_page_api_response(ApplicationApi.get_response_body_api()), - tags=['应用']) + tags=[_('Application')]) @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND) def get(self, request: Request, current_page: int, page_size: int): return result.success( ApplicationSerializer.Query( data={**query_params_to_single_dict(request.query_params), 'user_id': request.user.id}).page( current_page, page_size)) + + class SpeechToText(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @has_permissions( + ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN], + [lambda r, keywords: Permission(group=Group.APPLICATION, + operate=Operate.USE, + dynamic_tag=keywords.get( + 'application_id'))], + compare=CompareConstants.AND)) + def post(self, request: Request, application_id: str): + return result.success( + ApplicationSerializer.Operate(data={'application_id': application_id, 'user_id': request.user.id}) + .speech_to_text(request.FILES.getlist('file')[0])) + + class TextToSpeech(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("text to speech"), + operation_id=_("text to speech"), + manual_parameters=ApplicationApi.TextToSpeech.get_request_params_api(), + request_body=ApplicationApi.TextToSpeech.get_request_body_api(), + responses=result.get_default_response(), + tags=[_('Application')]) + @has_permissions( + ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN], + [lambda r, keywords: Permission(group=Group.APPLICATION, + operate=Operate.USE, + dynamic_tag=keywords.get( + 'application_id'))], + compare=CompareConstants.AND)) + def post(self, request: Request, application_id: str): + byte_data = ApplicationSerializer.Operate( + data={'application_id': application_id, 'user_id': request.user.id}).text_to_speech( + request.data.get('text')) + return HttpResponse(byte_data, status=200, headers={'Content-Type': 'audio/mp3', + 'Content-Disposition': 'attachment; filename="abc.mp3"'}) + + class PlayDemoText(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @has_permissions( + ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN], + [lambda r, keywords: Permission(group=Group.APPLICATION, + operate=Operate.USE, + dynamic_tag=keywords.get( + 'application_id'))], + compare=CompareConstants.AND)) + @log(menu='Application', operate="trial listening", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def post(self, request: Request, application_id: str): + byte_data = ApplicationSerializer.Operate( + data={'application_id': application_id, 'user_id': request.user.id}).play_demo_text(request.data) + return HttpResponse(byte_data, status=200, headers={'Content-Type': 'audio/mp3', + 'Content-Disposition': 'attachment; filename="abc.mp3"'}) + + class McpServers(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND) + def get(self, request: Request): + return result.success(ApplicationSerializer.McpServers( + data={'mcp_servers': request.query_params.get('mcp_servers')}).get_mcp_servers()) diff --git a/apps/application/views/chat_views.py b/apps/application/views/chat_views.py index 2d6ef10f11e..e8d402b9ae9 100644 --- a/apps/application/views/chat_views.py +++ b/apps/application/views/chat_views.py @@ -6,20 +6,42 @@ @date:2023/11/14 9:53 @desc: """ + +from django.utils.translation import gettext_lazy as _ +from drf_yasg import openapi from drf_yasg.utils import swagger_auto_schema from rest_framework.decorators import action +from rest_framework.parsers import MultiPartParser from rest_framework.request import Request from rest_framework.views import APIView -from application.serializers.chat_message_serializers import ChatMessageSerializer +from application.serializers.chat_message_serializers import ChatMessageSerializer, OpenAIChatSerializer from application.serializers.chat_serializers import ChatSerializers, ChatRecordSerializer -from application.swagger_api.chat_api import ChatApi, VoteApi, ChatRecordApi, ImproveApi, ChatRecordImproveApi -from common.auth import TokenAuth, has_permissions +from application.swagger_api.chat_api import ChatApi, VoteApi, ChatRecordApi, ImproveApi, ChatRecordImproveApi, \ + ChatClientHistoryApi, OpenAIChatApi +from application.views import get_application_operation_object +from common.auth import TokenAuth, has_permissions, OpenAIKeyAuth from common.constants.authentication_type import AuthenticationType from common.constants.permission_constants import Permission, Group, Operate, \ RoleConstants, ViewPermission, CompareConstants +from common.log.log import log from common.response import result from common.util.common import query_params_to_single_dict +from dataset.serializers.file_serializers import FileSerializer + + +class Openai(APIView): + authentication_classes = [OpenAIKeyAuth] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("OpenAI Interface Dialogue"), + operation_id=_("OpenAI Interface Dialogue"), + request_body=OpenAIChatApi.get_request_body_api(), + responses=OpenAIChatApi.get_response_body_api(), + tags=[_("OpenAI Dialogue")]) + def post(self, request: Request, application_id: str): + return OpenAIChatSerializer(data={'application_id': application_id, 'client_id': request.auth.client_id, + 'client_type': request.auth.client_type}).chat(request.data) class ChatView(APIView): @@ -28,30 +50,32 @@ class ChatView(APIView): class Export(APIView): authentication_classes = [TokenAuth] - @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="导出对话", - operation_id="导出对话", + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("Export conversation"), + operation_id=_("Export conversation"), manual_parameters=ChatApi.get_request_params_api(), - tags=["应用/对话日志"] + tags=[_("Application/Conversation Log")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, dynamic_tag=keywords.get('application_id'))]) ) - def get(self, request: Request, application_id: str): + @log(menu='Conversation Log', operate="Export conversation", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def post(self, request: Request, application_id: str): return ChatSerializers.Query( data={**query_params_to_single_dict(request.query_params), 'application_id': application_id, - 'user_id': request.user.id}).export() + 'user_id': request.user.id}).export(request.data) class Open(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取会话id,根据应用id", - operation_id="获取会话id,根据应用id", + @swagger_auto_schema(operation_summary=_("Get the session id according to the application id"), + operation_id=_("Get the session id according to the application id"), manual_parameters=ChatApi.OpenChat.get_request_params_api(), - tags=["应用/会话"]) + tags=[_("Application/Chat")]) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.APPLICATION_KEY], @@ -63,14 +87,28 @@ def get(self, request: Request, application_id: str): return result.success(ChatSerializers.OpenChat( data={'user_id': request.user.id, 'application_id': application_id}).open()) + class OpenWorkFlowTemp(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("Get the workflow temporary session id"), + operation_id=_("Get the workflow temporary session id"), + request_body=ChatApi.OpenWorkFlowTemp.get_request_body_api(), + responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()), + tags=[_("Application/Chat")]) + def post(self, request: Request): + return result.success(ChatSerializers.OpenWorkFlowChat( + data={'user_id': request.user.id, **request.data}).open()) + class OpenTemp(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="获取会话id(根据模型id,知识库列表,是否多轮会话)", - operation_id="获取会话id", + @swagger_auto_schema(operation_summary=_("Get a temporary session id"), + operation_id=_("Get a temporary session id"), request_body=ChatApi.OpenTempChat.get_request_body_api(), - tags=["应用/会话"]) + responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()), + tags=[_("Application/Chat")]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def post(self, request: Request): return result.success(ChatSerializers.OpenTempChat( @@ -80,10 +118,10 @@ class Message(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="对话", - operation_id="对话", + @swagger_auto_schema(operation_summary=_("dialogue"), + operation_id=_("dialogue"), request_body=ChatApi.get_request_body_api(), - tags=["应用/会话"]) + tags=[_("Application/Chat")]) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY, RoleConstants.APPLICATION_ACCESS_TOKEN], @@ -99,14 +137,31 @@ def post(self, request: Request, chat_id: str): 'application_id': (request.auth.keywords.get( 'application_id') if request.auth.client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value else None), 'client_id': request.auth.client_id, - 'client_type': request.auth.client_type}).chat() + 'form_data': (request.data.get( + 'form_data') if 'form_data' in request.data else {}), + + 'image_list': request.data.get( + 'image_list') if 'image_list' in request.data else [], + 'document_list': request.data.get( + 'document_list') if 'document_list' in request.data else [], + 'audio_list': request.data.get( + 'audio_list') if 'audio_list' in request.data else [], + 'other_list': request.data.get( + 'other_list') if 'other_list' in request.data else [], + 'client_type': request.auth.client_type, + 'node_id': request.data.get('node_id', None), + 'runtime_node_id': request.data.get('runtime_node_id', None), + 'node_data': request.data.get('node_data', {}), + 'chat_record_id': request.data.get('chat_record_id'), + 'child_node': request.data.get('child_node')} + ).chat() @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取对话列表", - operation_id="获取对话列表", + @swagger_auto_schema(operation_summary=_("Get the conversation list"), + operation_id=_("Get the conversation list"), manual_parameters=ChatApi.get_request_params_api(), responses=result.get_api_array_response(ChatApi.get_response_body_api()), - tags=["应用/对话日志"] + tags=[_("Application/Conversation Log")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY], @@ -122,30 +177,95 @@ class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除对话", - operation_id="删除对话", - tags=["应用/对话日志"]) + @swagger_auto_schema(operation_summary=_("Delete a conversation"), + operation_id=_("Delete a conversation"), + tags=[_("Application/Conversation Log")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE, dynamic_tag=keywords.get('application_id'))], compare=CompareConstants.AND), compare=CompareConstants.AND) + @log(menu='Conversation Log', operate="Delete a conversation", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def delete(self, request: Request, application_id: str, chat_id: str): return result.success( ChatSerializers.Operate( data={'application_id': application_id, 'user_id': request.user.id, 'chat_id': chat_id}).delete()) + class ClientChatHistoryPage(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Get client conversation list by paging"), + operation_id=_("Get client conversation list by paging"), + manual_parameters=result.get_page_request_params( + ChatClientHistoryApi.get_request_params_api()), + responses=result.get_page_api_response(ChatApi.get_response_body_api()), + tags=[_("Application/Conversation Log")] + ) + @has_permissions( + ViewPermission([RoleConstants.APPLICATION_ACCESS_TOKEN], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))]) + ) + def get(self, request: Request, application_id: str, current_page: int, page_size: int): + return result.success(ChatSerializers.ClientChatHistory( + data={'client_id': request.auth.client_id, 'application_id': application_id}).page( + current_page=current_page, + page_size=page_size)) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['DELETE'], detail=False) + @swagger_auto_schema(operation_summary=_("Client deletes conversation"), + operation_id=_("Client deletes conversation"), + tags=[_("Application/Conversation Log")]) + @has_permissions(ViewPermission( + [RoleConstants.APPLICATION_ACCESS_TOKEN], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND), + compare=CompareConstants.AND) + @log(menu='Conversation Log', operate="Client deletes conversation", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def delete(self, request: Request, application_id: str, chat_id: str): + return result.success( + ChatSerializers.Operate( + data={'application_id': application_id, 'user_id': request.user.id, + 'chat_id': chat_id}).logic_delete()) + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_("Client modifies dialogue summary"), + operation_id=_("Client modifies dialogue summary"), + request_body=ChatClientHistoryApi.Operate.ReAbstract.get_request_body_api(), + responses=result.get_default_response(), + tags=[_("Application/Conversation Log")]) + @has_permissions(ViewPermission( + [RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + compare=CompareConstants.AND), + compare=CompareConstants.AND) + @log(menu='Conversation Log', operate="Client modifies dialogue summary", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def put(self, request: Request, application_id: str, chat_id: str): + return result.success( + ChatSerializers.Operate( + data={'application_id': application_id, 'user_id': request.user.id, + 'chat_id': chat_id}).re_abstract(request.data)) + class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="分页获取对话列表", - operation_id="分页获取对话列表", + @swagger_auto_schema(operation_summary=_("Get the conversation list by page"), + operation_id=_("Get the conversation list by page"), manual_parameters=result.get_page_request_params(ChatApi.get_request_params_api()), responses=result.get_page_api_response(ChatApi.get_response_body_api()), - tags=["应用/对话日志"] + tags=[_("Application/Conversation Log")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY], @@ -165,11 +285,11 @@ class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取对话记录详情", - operation_id="获取对话记录详情", + @swagger_auto_schema(operation_summary=_("Get conversation record details"), + operation_id=_("Get conversation record details"), manual_parameters=ChatRecordApi.get_request_params_api(), responses=result.get_api_array_response(ChatRecordApi.get_response_body_api()), - tags=["应用/对话日志"] + tags=[_("Application/Conversation Log")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY, @@ -184,11 +304,11 @@ def get(self, request: Request, application_id: str, chat_id: str, chat_record_i 'chat_record_id': chat_record_id}).one(request.auth.current_role)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取对话记录列表", - operation_id="获取对话记录列表", + @swagger_auto_schema(operation_summary=_("Get a list of conversation records"), + operation_id=_("Get a list of conversation records"), manual_parameters=ChatRecordApi.get_request_params_api(), responses=result.get_api_array_response(ChatRecordApi.get_response_body_api()), - tags=["应用/对话日志"] + tags=[_("Application/Conversation Log")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY], @@ -198,18 +318,18 @@ def get(self, request: Request, application_id: str, chat_id: str, chat_record_i def get(self, request: Request, application_id: str, chat_id: str): return result.success(ChatRecordSerializer.Query( data={'application_id': application_id, - 'chat_id': chat_id}).list()) + 'chat_id': chat_id, 'order_asc': request.query_params.get('order_asc')}).list()) class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取对话记录列表", - operation_id="获取对话记录列表", + @swagger_auto_schema(operation_summary=_("Get the conversation history list by page"), + operation_id=_("Get the conversation history list by page"), manual_parameters=result.get_page_request_params( ChatRecordApi.get_request_params_api()), responses=result.get_page_api_response(ChatRecordApi.get_response_body_api()), - tags=["应用/对话日志"] + tags=[_("Application/Conversation Log")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY], @@ -219,18 +339,19 @@ class Page(APIView): def get(self, request: Request, application_id: str, chat_id: str, current_page: int, page_size: int): return result.success(ChatRecordSerializer.Query( data={'application_id': application_id, - 'chat_id': chat_id}).page(current_page, page_size)) + 'chat_id': chat_id, 'order_asc': request.query_params.get('order_asc')}).page(current_page, + page_size)) class Vote(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="点赞,点踩", - operation_id="点赞,点踩", + @swagger_auto_schema(operation_summary=_("Like, Dislike"), + operation_id=_("Like, Dislike"), manual_parameters=VoteApi.get_request_params_api(), request_body=VoteApi.get_request_body_api(), responses=result.get_default_response(), - tags=["应用/会话"] + tags=[_("Application/Chat")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY, @@ -238,6 +359,8 @@ class Vote(APIView): [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, dynamic_tag=keywords.get('application_id'))]) ) + @log(menu='Conversation Log', operate="Like, Dislike", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def put(self, request: Request, application_id: str, chat_id: str, chat_record_id: str): return result.success(ChatRecordSerializer.Vote( data={'vote_status': request.data.get('vote_status'), 'chat_id': chat_id, @@ -247,11 +370,11 @@ class ChatRecordImprove(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取标注段落列表信息", - operation_id="获取标注段落列表信息", + @swagger_auto_schema(operation_summary=_("Get the list of marked paragraphs"), + operation_id=_("Get the list of marked paragraphs"), manual_parameters=ChatRecordImproveApi.get_request_params_api(), responses=result.get_api_response(ChatRecordImproveApi.get_response_body_api()), - tags=["应用/对话日志/标注"] + tags=[_("Application/Conversation Log/Annotation")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER], @@ -266,12 +389,12 @@ class Improve(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="标注", - operation_id="标注", + @swagger_auto_schema(operation_summary=_("Annotation"), + operation_id=_("Annotation"), manual_parameters=ImproveApi.get_request_params_api(), request_body=ImproveApi.get_request_body_api(), responses=result.get_api_response(ChatRecordApi.get_response_body_api()), - tags=["应用/对话日志/标注"] + tags=[_("Application/Conversation Log/Annotation")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER], @@ -285,21 +408,48 @@ class Improve(APIView): 'dataset_id'))], compare=CompareConstants.AND ), compare=CompareConstants.AND) + @log(menu='Conversation Log', operate="Annotation", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def put(self, request: Request, application_id: str, chat_id: str, chat_record_id: str, dataset_id: str, document_id: str): return result.success(ChatRecordSerializer.Improve( data={'chat_id': chat_id, 'chat_record_id': chat_record_id, 'dataset_id': dataset_id, 'document_id': document_id}).improve(request.data)) + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("Add to Knowledge Base"), + operation_id=_("Add to Knowledge Base"), + manual_parameters=ImproveApi.get_request_params_api_post(), + request_body=ImproveApi.get_request_body_api_post(), + responses=result.get_default_response(), + tags=[_("Application/Conversation Log/Add to Knowledge Base")] + ) + @has_permissions( + ViewPermission([RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))], + + ), ViewPermission([RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.DATASET, + operate=Operate.MANAGE, + dynamic_tag=keywords.get( + 'dataset_id'))], + compare=CompareConstants.AND + ), compare=CompareConstants.AND) + @log(menu='Conversation Log', operate="Add to Knowledge Base", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) + def post(self, request: Request, application_id: str, dataset_id: str): + return result.success(ChatRecordSerializer.PostImprove().post_improve(request.data)) + class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="标注", - operation_id="标注", + @swagger_auto_schema(operation_summary=_("Delete a Annotation"), + operation_id=_("Delete a Annotation"), manual_parameters=ImproveApi.get_request_params_api(), responses=result.get_api_response(ChatRecordApi.get_response_body_api()), - tags=["应用/对话日志/标注"] + tags=[_("Application/Conversation Log/Annotation")] ) @has_permissions( ViewPermission([RoleConstants.ADMIN, RoleConstants.USER], @@ -313,6 +463,8 @@ class Operate(APIView): 'dataset_id'))], compare=CompareConstants.AND ), compare=CompareConstants.AND) + @log(menu='Conversation Log', operate="Delete a Annotation", + get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id'))) def delete(self, request: Request, application_id: str, chat_id: str, chat_record_id: str, dataset_id: str, document_id: str, paragraph_id: str): @@ -320,3 +472,45 @@ def delete(self, request: Request, application_id: str, chat_id: str, chat_recor data={'chat_id': chat_id, 'chat_record_id': chat_record_id, 'dataset_id': dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id}).delete()) + + class UploadFile(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("Upload files"), + operation_id=_("Upload files"), + manual_parameters=[ + openapi.Parameter(name='application_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Application ID')), + openapi.Parameter(name='chat_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Conversation ID')), + openapi.Parameter(name='file', + in_=openapi.IN_FORM, + type=openapi.TYPE_FILE, + required=True, + description=_('Upload file')) + ], + tags=[_("Application/Conversation Log")] + ) + @has_permissions( + ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY, + RoleConstants.APPLICATION_ACCESS_TOKEN], + [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE, + dynamic_tag=keywords.get('application_id'))]) + ) + def post(self, request: Request, application_id: str, chat_id: str): + files = request.FILES.getlist('file') + file_ids = [] + debug = request.data.get("debug", "false").lower() == "true" + meta = {'application_id': application_id, 'chat_id': chat_id, 'debug': debug} + for file in files: + file_url = FileSerializer(data={'file': file, 'meta': meta}).upload() + file_ids.append({'name': file.name, 'url': file_url, 'file_id': file_url.split('/')[-1]}) + return result.success(file_ids) diff --git a/apps/application/views/common.py b/apps/application/views/common.py new file mode 100644 index 00000000000..7773039c23b --- /dev/null +++ b/apps/application/views/common.py @@ -0,0 +1,21 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: common.py + @date:2025/3/25 16:56 + @desc: +""" + +from django.db.models import QuerySet + +from application.models import Application + + +def get_application_operation_object(application_id): + application_model = QuerySet(model=Application).filter(id=application_id).first() + if application_model is not None: + return { + "name": application_model.name + } + return {} diff --git a/apps/common/auth/authenticate.py b/apps/common/auth/authenticate.py index 3d2a2258ea0..6eddb76b4c3 100644 --- a/apps/common/auth/authenticate.py +++ b/apps/common/auth/authenticate.py @@ -7,16 +7,16 @@ @desc: 认证类 """ import traceback +from importlib import import_module +from django.conf import settings from django.core import cache from django.core import signing from rest_framework.authentication import TokenAuthentication -from common.auth.handle.impl.application_key import ApplicationKey -from common.auth.handle.impl.public_access_token import PublicAccessToken -from common.auth.handle.impl.user_token import UserToken -from common.exception.app_exception import AppAuthenticationFailed, AppEmbedIdentityFailed, AppChatNumOutOfBoundsFailed - +from common.exception.app_exception import AppAuthenticationFailed, AppEmbedIdentityFailed, AppChatNumOutOfBoundsFailed, \ + ChatException, AppApiException +from django.utils.translation import gettext_lazy as _ token_cache = cache.caches['token_cache'] @@ -25,7 +25,16 @@ def authenticate(self, request): return None, None -handles = [UserToken(), PublicAccessToken(), ApplicationKey()] +def new_instance_by_class_path(class_path: str): + parts = class_path.rpartition('.') + package_path = parts[0] + class_name = parts[2] + module = import_module(package_path) + HandlerClass = getattr(module, class_name) + return HandlerClass() + + +handles = [new_instance_by_class_path(class_path) for class_path in settings.AUTH_HANDLES] class TokenDetails: @@ -44,21 +53,43 @@ def get_token_details(self): return self.token_details +class OpenAIKeyAuth(TokenAuthentication): + def authenticate(self, request): + auth = request.META.get('HTTP_AUTHORIZATION') + auth = auth.replace('Bearer ', '') + # 未认证 + if auth is None: + raise AppAuthenticationFailed(1003, _('Not logged in, please log in first')) + try: + token_details = TokenDetails(auth) + for handle in handles: + if handle.support(request, auth, token_details.get_token_details): + return handle.handle(request, auth, token_details.get_token_details) + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) + except Exception as e: + traceback.format_exc() + if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed) or isinstance(e, + AppApiException): + raise e + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) + + class TokenAuth(TokenAuthentication): # 重新 authenticate 方法,自定义认证规则 def authenticate(self, request): auth = request.META.get('HTTP_AUTHORIZATION') # 未认证 if auth is None: - raise AppAuthenticationFailed(1003, '未登录,请先登录') + raise AppAuthenticationFailed(1003, _('Not logged in, please log in first')) try: token_details = TokenDetails(auth) for handle in handles: if handle.support(request, auth, token_details.get_token_details): return handle.handle(request, auth, token_details.get_token_details) - raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) except Exception as e: traceback.format_exc() - if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed): + if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed) or isinstance(e, + AppApiException): raise e - raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) diff --git a/apps/common/auth/authentication.py b/apps/common/auth/authentication.py index b27e1d1eabc..e11c9d552fb 100644 --- a/apps/common/auth/authentication.py +++ b/apps/common/auth/authentication.py @@ -11,7 +11,7 @@ from common.constants.permission_constants import ViewPermission, CompareConstants, RoleConstants, PermissionConstants, \ Permission from common.exception.app_exception import AppUnauthorizedFailed - +from django.utils.translation import gettext_lazy as _ def exist_permissions_by_permission_constants(user_permission: List[PermissionConstants], permission_list: List[PermissionConstants]): @@ -59,11 +59,11 @@ def exist_permissions(user_role: List[RoleConstants], user_permission: List[Perm **kwargs): if isinstance(permission, ViewPermission): return exist_permissions_by_view_permission(user_role, user_permission, permission, request, **kwargs) - elif isinstance(permission, RoleConstants): + if isinstance(permission, RoleConstants): return exist_role_by_role_constants(user_role, [permission]) - elif isinstance(permission, PermissionConstants): + if isinstance(permission, PermissionConstants): return exist_permissions_by_permission_constants(user_permission, [permission]) - elif isinstance(permission, Permission): + if isinstance(permission, Permission): return user_permission.__contains__(permission) return False @@ -72,8 +72,7 @@ def exist(user_role: List[RoleConstants], user_permission: List[PermissionConsta if callable(permission): p = permission(request, kwargs) return exist_permissions(user_role, user_permission, p, request) - else: - return exist_permissions(user_role, user_permission, permission, request, **kwargs) + return exist_permissions(user_role, user_permission, permission, request, **kwargs) def has_permissions(*permission, compare=CompareConstants.OR): @@ -92,8 +91,7 @@ def run(view, request, **kwargs): # 判断是否有权限 if any(exit_list) if compare == CompareConstants.OR else all(exit_list): return func(view, request, **kwargs) - else: - raise AppUnauthorizedFailed(403, "没有权限访问") + raise AppUnauthorizedFailed(403, _('No permission to access')) return run diff --git a/apps/common/auth/handle/impl/application_key.py b/apps/common/auth/handle/impl/application_key.py index b35ef80fc80..bddcfd43a09 100644 --- a/apps/common/auth/handle/impl/application_key.py +++ b/apps/common/auth/handle/impl/application_key.py @@ -13,15 +13,16 @@ from common.constants.authentication_type import AuthenticationType from common.constants.permission_constants import Permission, Group, Operate, RoleConstants, Auth from common.exception.app_exception import AppAuthenticationFailed +from django.utils.translation import gettext_lazy as _ class ApplicationKey(AuthBaseHandle): def handle(self, request, token: str, get_token_details): application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=token).first() if application_api_key is None: - raise AppAuthenticationFailed(500, "secret_key 无效") + raise AppAuthenticationFailed(500, _('Secret key is invalid')) if not application_api_key.is_active: - raise AppAuthenticationFailed(500, "secret_key 无效") + raise AppAuthenticationFailed(500, _('Secret key is invalid')) permission_list = [Permission(group=Group.APPLICATION, operate=Operate.USE, dynamic_tag=str( diff --git a/apps/common/auth/handle/impl/public_access_token.py b/apps/common/auth/handle/impl/public_access_token.py index 1655187a83b..fdcff4021fe 100644 --- a/apps/common/auth/handle/impl/public_access_token.py +++ b/apps/common/auth/handle/impl/public_access_token.py @@ -12,8 +12,10 @@ from common.auth.handle.auth_base_handle import AuthBaseHandle from common.constants.authentication_type import AuthenticationType from common.constants.permission_constants import RoleConstants, Permission, Group, Operate, Auth -from common.exception.app_exception import AppAuthenticationFailed - +from common.exception.app_exception import AppAuthenticationFailed, ChatException +from common.models.db_model_manage import DBModelManage +from common.util.common import password_encrypt +from django.utils.translation import gettext_lazy as _ class PublicAccessToken(AuthBaseHandle): def support(self, request, token: str, get_token_details): @@ -29,12 +31,27 @@ def handle(self, request, token: str, get_token_details): auth_details = get_token_details() application_access_token = QuerySet(ApplicationAccessToken).filter( application_id=auth_details.get('application_id')).first() + if request.path != '/api/application/profile': + application_setting_model = DBModelManage.get_model('application_setting') + xpack_cache = DBModelManage.get_model('xpack_cache') + X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False) + if application_setting_model is not None and X_PACK_LICENSE_IS_VALID: + application_setting = QuerySet(application_setting_model).filter(application_id=str( + application_access_token.application_id)).first() + if application_setting.authentication: + authentication = auth_details.get('authentication', {}) + if authentication is None: + authentication = {} + if application_setting.authentication_value.get('type') != authentication.get( + 'type') or password_encrypt( + application_setting.authentication_value.get('value')) != authentication.get('value'): + raise ChatException(1002, _('Authentication information is incorrect')) if application_access_token is None: - raise AppAuthenticationFailed(1002, "身份验证信息不正确") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect')) if not application_access_token.is_active: - raise AppAuthenticationFailed(1002, "身份验证信息不正确") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect')) if not application_access_token.access_token == auth_details.get('access_token'): - raise AppAuthenticationFailed(1002, "身份验证信息不正确") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect')) return application_access_token.application.user, Auth( role_list=[RoleConstants.APPLICATION_ACCESS_TOKEN], diff --git a/apps/common/auth/handle/impl/user_token.py b/apps/common/auth/handle/impl/user_token.py index 6559797ba3f..dbb6bd2b51a 100644 --- a/apps/common/auth/handle/impl/user_token.py +++ b/apps/common/auth/handle/impl/user_token.py @@ -17,7 +17,7 @@ from django.core import cache from users.models.user import get_user_dynamics_permission - +from django.utils.translation import gettext_lazy as _ token_cache = cache.caches['token_cache'] @@ -31,7 +31,7 @@ def support(self, request, token: str, get_token_details): def handle(self, request, token: str, get_token_details): cache_token = token_cache.get(token) if cache_token is None: - raise AppAuthenticationFailed(1002, "登录过期") + raise AppAuthenticationFailed(1002, _('Login expired')) auth_details = get_token_details() user = QuerySet(User).get(id=auth_details['id']) # 续期 diff --git a/apps/common/cache/file_cache.py b/apps/common/cache/file_cache.py index 72b1201d104..45b5a73497e 100644 --- a/apps/common/cache/file_cache.py +++ b/apps/common/cache/file_cache.py @@ -12,7 +12,7 @@ import time from diskcache import Cache -from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache +from django.core.cache.backends.base import BaseCache class FileCache(BaseCache): @@ -29,35 +29,58 @@ def _createdir(self): finally: os.umask(old_umask) - def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + def add(self, key, value, timeout=None, version=None): expire = timeout if isinstance(timeout, int) or isinstance(timeout, - float) else timeout.total_seconds() - return self.cache.add(key, value=value, expire=expire) + float) or timeout is None else timeout.total_seconds() + return self.cache.add(self.get_key(key, version), value=value, expire=expire) - def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + def set(self, key, value, timeout=None, version=None): expire = timeout if isinstance(timeout, int) or isinstance(timeout, - float) else timeout.total_seconds() - return self.cache.set(key, value=value, expire=expire) + float) or timeout is None else timeout.total_seconds() + return self.cache.set(self.get_key(key, version), value=value, expire=expire) def get(self, key, default=None, version=None): - return self.cache.get(key, default=default) + return self.cache.get(self.get_key(key, version), default=default) + + @staticmethod + def get_key(key, version): + if version is None: + return f"default:{key}" + return f"{version}:{key}" def delete(self, key, version=None): - return self.cache.delete(key) + return self.cache.delete(self.get_key(key, version)) - def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None): + def touch(self, key, timeout=None, version=None): expire = timeout if isinstance(timeout, int) or isinstance(timeout, float) else timeout.total_seconds() - return self.cache.touch(key, expire=expire) + return self.cache.touch(self.get_key(key, version), expire=expire) - def ttl(self, key): + def ttl(self, key, version=None): """ 获取key的剩余时间 :param key: key :return: 剩余时间 + @param version: """ - value, expire_time = self.cache.get(key, expire_time=True) + value, expire_time = self.cache.get(self.get_key(key, version), expire_time=True) if value is None: return None return datetime.timedelta(seconds=math.ceil(expire_time - time.time())) + + def clear_by_application_id(self, application_id): + delete_keys = [] + for key in self.cache.iterkeys(): + value = self.cache.get(key) + if (hasattr(value, + 'application') and value.application is not None and value.application.id is not None and + str( + value.application.id) == application_id): + delete_keys.append(key) + for key in delete_keys: + self.cache.delete(key) + + def clear_timeout_data(self): + for key in self.cache.iterkeys(): + self.get(key) diff --git a/apps/common/cache/mem_cache.py b/apps/common/cache/mem_cache.py index 9bb6c45ba1b..5afb1e56265 100644 --- a/apps/common/cache/mem_cache.py +++ b/apps/common/cache/mem_cache.py @@ -29,3 +29,19 @@ def get(self, key, default=None, version=None): pickled = self._cache[key] self._cache.move_to_end(key, last=False) return pickled + + def clear_by_application_id(self, application_id): + delete_keys = [] + for key in self._cache.keys(): + value = self._cache.get(key) + if (hasattr(value, + 'application') and value.application is not None and value.application.id is not None and + str( + value.application.id) == application_id): + delete_keys.append(key) + for key in delete_keys: + self._delete(key) + + def clear_timeout_data(self): + for key in self._cache.keys(): + self.get(key) diff --git a/apps/common/cache_data/application_access_token_cache.py b/apps/common/cache_data/application_access_token_cache.py new file mode 100644 index 00000000000..54f2a7e5405 --- /dev/null +++ b/apps/common/cache_data/application_access_token_cache.py @@ -0,0 +1,31 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: application_access_token_cache.py + @date:2024/7/25 11:34 + @desc: +""" +from django.core.cache import cache +from django.db.models import QuerySet + +from application.models.api_key_model import ApplicationAccessToken +from common.constants.cache_code_constants import CacheCodeConstants +from common.util.cache_util import get_cache + + +@get_cache(cache_key=lambda access_token, use_get_data: access_token, + use_get_data=lambda access_token, use_get_data: use_get_data, + version=CacheCodeConstants.APPLICATION_ACCESS_TOKEN_CACHE.value) +def get_application_access_token(access_token, use_get_data): + application_access_token = QuerySet(ApplicationAccessToken).filter(access_token=access_token).first() + if application_access_token is None: + return None + return {'white_active': application_access_token.white_active, + 'white_list': application_access_token.white_list, + 'application_icon': application_access_token.application.icon, + 'application_name': application_access_token.application.name} + + +def del_application_access_token(access_token): + cache.delete(access_token, version=CacheCodeConstants.APPLICATION_ACCESS_TOKEN_CACHE.value) diff --git a/apps/common/cache_data/application_api_key_cache.py b/apps/common/cache_data/application_api_key_cache.py new file mode 100644 index 00000000000..a7d810cee38 --- /dev/null +++ b/apps/common/cache_data/application_api_key_cache.py @@ -0,0 +1,27 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: application_api_key_cache.py + @date:2024/7/25 11:30 + @desc: +""" +from django.core.cache import cache +from django.db.models import QuerySet + +from application.models.api_key_model import ApplicationApiKey +from common.constants.cache_code_constants import CacheCodeConstants +from common.util.cache_util import get_cache + + +@get_cache(cache_key=lambda secret_key, use_get_data: secret_key, + use_get_data=lambda secret_key, use_get_data: use_get_data, + version=CacheCodeConstants.APPLICATION_API_KEY_CACHE.value) +def get_application_api_key(secret_key, use_get_data): + application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=secret_key).first() + return {'allow_cross_domain': application_api_key.allow_cross_domain, + 'cross_domain_list': application_api_key.cross_domain_list} + + +def del_application_api_key(secret_key): + cache.delete(secret_key, version=CacheCodeConstants.APPLICATION_API_KEY_CACHE.value) diff --git a/apps/common/cache_data/static_resource_cache.py b/apps/common/cache_data/static_resource_cache.py new file mode 100644 index 00000000000..1bb84e96729 --- /dev/null +++ b/apps/common/cache_data/static_resource_cache.py @@ -0,0 +1,19 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: static_resource_cache.py + @date:2024/7/25 11:30 + @desc: +""" +from common.constants.cache_code_constants import CacheCodeConstants +from common.util.cache_util import get_cache + + +@get_cache(cache_key=lambda index_path: index_path, + version=CacheCodeConstants.STATIC_RESOURCE_CACHE.value) +def get_index_html(index_path): + file = open(index_path, "r", encoding='utf-8') + content = file.read() + file.close() + return content diff --git a/apps/common/chunk/__init__.py b/apps/common/chunk/__init__.py new file mode 100644 index 00000000000..a4babde7680 --- /dev/null +++ b/apps/common/chunk/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/7/23 17:03 + @desc: +""" +from common.chunk.impl.mark_chunk_handle import MarkChunkHandle + +handles = [MarkChunkHandle()] + + +def text_to_chunk(text: str): + chunk_list = [text] + for handle in handles: + chunk_list = handle.handle(chunk_list) + return chunk_list diff --git a/apps/common/chunk/i_chunk_handle.py b/apps/common/chunk/i_chunk_handle.py new file mode 100644 index 00000000000..d53575d11a8 --- /dev/null +++ b/apps/common/chunk/i_chunk_handle.py @@ -0,0 +1,16 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: i_chunk_handle.py + @date:2024/7/23 16:51 + @desc: +""" +from abc import ABC, abstractmethod +from typing import List + + +class IChunkHandle(ABC): + @abstractmethod + def handle(self, chunk_list: List[str]): + pass diff --git a/apps/common/chunk/impl/mark_chunk_handle.py b/apps/common/chunk/impl/mark_chunk_handle.py new file mode 100644 index 00000000000..5bca2f4450b --- /dev/null +++ b/apps/common/chunk/impl/mark_chunk_handle.py @@ -0,0 +1,40 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: mark_chunk_handle.py + @date:2024/7/23 16:52 + @desc: +""" +import re +from typing import List + +from common.chunk.i_chunk_handle import IChunkHandle + +max_chunk_len = 256 +split_chunk_pattern = r'.{1,%d}[。| |\\.|!|;|;|!|\n]' % max_chunk_len +max_chunk_pattern = r'.{1,%d}' % max_chunk_len + + +class MarkChunkHandle(IChunkHandle): + def handle(self, chunk_list: List[str]): + result = [] + for chunk in chunk_list: + chunk_result = re.findall(split_chunk_pattern, chunk, flags=re.DOTALL) + for c_r in chunk_result: + if len(c_r.strip()) > 0: + result.append(c_r.strip()) + + other_chunk_list = re.split(split_chunk_pattern, chunk, flags=re.DOTALL) + for other_chunk in other_chunk_list: + if len(other_chunk) > 0: + if len(other_chunk) < max_chunk_len: + if len(other_chunk.strip()) > 0: + result.append(other_chunk.strip()) + else: + max_chunk_list = re.findall(max_chunk_pattern, other_chunk, flags=re.DOTALL) + for m_c in max_chunk_list: + if len(m_c.strip()) > 0: + result.append(m_c.strip()) + + return result diff --git a/apps/common/config/embedding_config.py b/apps/common/config/embedding_config.py index 367dce7f877..69081be055d 100644 --- a/apps/common/config/embedding_config.py +++ b/apps/common/config/embedding_config.py @@ -6,30 +6,60 @@ @date:2023/10/23 16:03 @desc: """ -from langchain_community.embeddings import HuggingFaceEmbeddings +import threading +import time -from smartdoc.const import CONFIG +from common.cache.mem_cache import MemCache +_lock = threading.Lock() +locks = {} -class EmbeddingModel: - instance = None + +class ModelManage: + cache = MemCache('model', {}) + up_clear_time = time.time() + + @staticmethod + def _get_lock(_id): + lock = locks.get(_id) + if lock is None: + with _lock: + lock = locks.get(_id) + if lock is None: + lock = threading.Lock() + locks[_id] = lock + + return lock + + @staticmethod + def get_model(_id, get_model): + model_instance = ModelManage.cache.get(_id) + if model_instance is None: + lock = ModelManage._get_lock(_id) + with lock: + model_instance = ModelManage.cache.get(_id) + if model_instance is None: + model_instance = get_model(_id) + ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8) + else: + if model_instance.is_cache_model(): + ModelManage.cache.touch(_id, timeout=60 * 60 * 8) + else: + model_instance = get_model(_id) + ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8) + ModelManage.clear_timeout_cache() + return model_instance + + @staticmethod + def clear_timeout_cache(): + if time.time() - ModelManage.up_clear_time > 60 * 60: + threading.Thread(target=lambda: ModelManage.cache.clear_timeout_data()).start() + ModelManage.up_clear_time = time.time() @staticmethod - def get_embedding_model(): - """ - 获取向量化模型 - :return: - """ - if EmbeddingModel.instance is None: - model_name = CONFIG.get('EMBEDDING_MODEL_NAME') - cache_folder = CONFIG.get('EMBEDDING_MODEL_PATH') - device = CONFIG.get('EMBEDDING_DEVICE') - e = HuggingFaceEmbeddings( - model_name=model_name, - cache_folder=cache_folder, - model_kwargs={'device': device}) - EmbeddingModel.instance = e - return EmbeddingModel.instance + def delete_key(_id): + if ModelManage.cache.has_key(_id): + ModelManage.cache.delete(_id) class VectorStore: diff --git a/apps/common/config/swagger_conf.py b/apps/common/config/swagger_conf.py index b3ba2720de5..d17486d532d 100644 --- a/apps/common/config/swagger_conf.py +++ b/apps/common/config/swagger_conf.py @@ -6,7 +6,7 @@ @date:2023/9/5 14:01 @desc: 用于swagger 分组 """ - +from drf_yasg.generators import OpenAPISchemaGenerator from drf_yasg.inspectors import SwaggerAutoSchema tags_dict = { @@ -20,3 +20,10 @@ def get_tags(self, operation_keys=None): if "api" in tags and operation_keys: return [tags_dict.get(operation_keys[1]) if operation_keys[1] in tags_dict else operation_keys[1]] return tags + + +class CustomOpenAPISchemaGenerator(OpenAPISchemaGenerator): + def get_schema(self, request=None, public=False): + schema = super().get_schema(request, public) + schema.schemes = ['https', 'http'] + return schema diff --git a/apps/common/constants/authentication_type.py b/apps/common/constants/authentication_type.py index 33163003efa..83586ee3b84 100644 --- a/apps/common/constants/authentication_type.py +++ b/apps/common/constants/authentication_type.py @@ -16,3 +16,5 @@ class AuthenticationType(Enum): APPLICATION_ACCESS_TOKEN = "APPLICATION_ACCESS_TOKEN" # key API API_KEY = "API_KEY" + # 第三方对接 + PLATFORM = 'PLATFORM' diff --git a/apps/common/constants/cache_code_constants.py b/apps/common/constants/cache_code_constants.py new file mode 100644 index 00000000000..dd64805f0fd --- /dev/null +++ b/apps/common/constants/cache_code_constants.py @@ -0,0 +1,18 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: cache_code_constants.py + @date:2024/7/24 18:20 + @desc: +""" +from enum import Enum + + +class CacheCodeConstants(Enum): + # 应用ACCESS_TOKEN缓存 + APPLICATION_ACCESS_TOKEN_CACHE = 'APPLICATION_ACCESS_TOKEN_CACHE' + # 静态资源缓存 + STATIC_RESOURCE_CACHE = 'STATIC_RESOURCE_CACHE' + # 应用API_KEY缓存 + APPLICATION_API_KEY_CACHE = 'APPLICATION_API_KEY_CACHE' diff --git a/apps/common/constants/exception_code_constants.py b/apps/common/constants/exception_code_constants.py index ba7a8105f14..821318d239e 100644 --- a/apps/common/constants/exception_code_constants.py +++ b/apps/common/constants/exception_code_constants.py @@ -9,6 +9,7 @@ from enum import Enum from common.exception.app_exception import AppApiException +from django.utils.translation import gettext_lazy as _ class ExceptionCodeConstantsValue: @@ -27,13 +28,16 @@ def to_app_api_exception(self): class ExceptionCodeConstants(Enum): - INCORRECT_USERNAME_AND_PASSWORD = ExceptionCodeConstantsValue(1000, "用户名或者密码不正确") - NOT_AUTHENTICATION = ExceptionCodeConstantsValue(1001, "请先登录,并携带用户Token") - EMAIL_SEND_ERROR = ExceptionCodeConstantsValue(1002, "邮件发送失败") - EMAIL_FORMAT_ERROR = ExceptionCodeConstantsValue(1003, "邮箱格式错误") - EMAIL_IS_EXIST = ExceptionCodeConstantsValue(1004, "邮箱已经被注册,请勿重复注册") - EMAIL_IS_NOT_EXIST = ExceptionCodeConstantsValue(1005, "邮箱尚未注册,请先注册") - CODE_ERROR = ExceptionCodeConstantsValue(1005, "验证码不正确,或者验证码过期") - USERNAME_IS_EXIST = ExceptionCodeConstantsValue(1006, "用户名已被使用,请使用其他用户名") - USERNAME_ERROR = ExceptionCodeConstantsValue(1006, "用户名不能为空,并且长度在6-20") - PASSWORD_NOT_EQ_RE_PASSWORD = ExceptionCodeConstantsValue(1007, "密码与确认密码不一致") + INCORRECT_USERNAME_AND_PASSWORD = ExceptionCodeConstantsValue(1000, _('The username or password is incorrect')) + NOT_AUTHENTICATION = ExceptionCodeConstantsValue(1001, _('Please log in first and bring the user Token')) + EMAIL_SEND_ERROR = ExceptionCodeConstantsValue(1002, _('Email sending failed')) + EMAIL_FORMAT_ERROR = ExceptionCodeConstantsValue(1003, _('Email format error')) + EMAIL_IS_EXIST = ExceptionCodeConstantsValue(1004, _('The email has been registered, please log in directly')) + EMAIL_IS_NOT_EXIST = ExceptionCodeConstantsValue(1005, _('The email is not registered, please register first')) + CODE_ERROR = ExceptionCodeConstantsValue(1005, + _('The verification code is incorrect or the verification code has expired')) + USERNAME_IS_EXIST = ExceptionCodeConstantsValue(1006, _('The username has been registered, please log in directly')) + USERNAME_ERROR = ExceptionCodeConstantsValue(1006, + _('The username cannot be empty and must be between 6 and 20 characters long.')) + PASSWORD_NOT_EQ_RE_PASSWORD = ExceptionCodeConstantsValue(1007, + _('Password and confirmation password are inconsistent')) diff --git a/apps/common/constants/permission_constants.py b/apps/common/constants/permission_constants.py index 04f86bbc796..a5c198da7e5 100644 --- a/apps/common/constants/permission_constants.py +++ b/apps/common/constants/permission_constants.py @@ -7,7 +7,7 @@ """ from enum import Enum from typing import List - +from django.utils.translation import gettext_lazy as _ class Group(Enum): """ @@ -58,10 +58,10 @@ def __init__(self, name: str, decs: str, group: RoleGroup): class RoleConstants(Enum): - ADMIN = Role("管理员", "管理员,预制目前不会使用", RoleGroup.USER) - USER = Role("用户", "用户所有权限", RoleGroup.USER) - APPLICATION_ACCESS_TOKEN = Role("会话", "只拥有应用会话框接口权限", RoleGroup.APPLICATION_ACCESS_TOKEN), - APPLICATION_KEY = Role("应用私钥", "应用私钥", RoleGroup.APPLICATION_KEY) + ADMIN = Role(_("ADMIN"), _('Admin, prefabs are not currently used'), RoleGroup.USER) + USER = Role(_("USER"), _('All user permissions'), RoleGroup.USER) + APPLICATION_ACCESS_TOKEN = Role(_('chat'), _('Only has application dialog interface permissions'), RoleGroup.APPLICATION_ACCESS_TOKEN), + APPLICATION_KEY = Role(_('Apply private key'), _('Apply private key'), RoleGroup.APPLICATION_KEY) class Permission: diff --git a/apps/common/db/compiler.py b/apps/common/db/compiler.py index 9a65f93e1b7..69640c8a082 100644 --- a/apps/common/db/compiler.py +++ b/apps/common/db/compiler.py @@ -7,9 +7,10 @@ @desc: """ -from django.core.exceptions import EmptyResultSet +from django.core.exceptions import EmptyResultSet, FullResultSet from django.db import NotSupportedError from django.db.models.sql.compiler import SQLCompiler +from django.db.transaction import TransactionManagementError class AppSQLCompiler(SQLCompiler): @@ -19,15 +20,16 @@ def __init__(self, query, connection, using, elide_empty=True, field_replace_dic field_replace_dict = {} self.field_replace_dict = field_replace_dict - def get_query_str(self, with_limits=True, with_table_name=False): + def get_query_str(self, with_limits=True, with_table_name=False, with_col_aliases=False): refcounts_before = self.query.alias_refcount.copy() try: - extra_select, order_by, group_by = self.pre_sql_setup() + combinator = self.query.combinator + extra_select, order_by, group_by = self.pre_sql_setup( + with_col_aliases=with_col_aliases or bool(combinator), + ) for_update_part = None # Is a LIMIT/OFFSET clause needed? - with_limit_offset = with_limits and ( - self.query.high_mark is not None or self.query.low_mark - ) + with_limit_offset = with_limits and self.query.is_sliced combinator = self.query.combinator features = self.connection.features if combinator: @@ -40,8 +42,14 @@ def get_query_str(self, with_limits=True, with_table_name=False): result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) + elif self.qualify: + result, params = self.get_qualify_sql() + order_by = None else: distinct_fields, distinct_params = self.get_distinct() + # This must come after 'select', 'ordering', and 'distinct' + # (see docstring of get_from_clause() for details). + from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) @@ -51,11 +59,92 @@ def get_query_str(self, with_limits=True, with_table_name=False): raise # Use a predicate that's always False. where, w_params = "0 = 1", [] - having, h_params = ( - self.compile(self.having) if self.having is not None else ("", []) - ) + except FullResultSet: + where, w_params = "", [] + try: + having, h_params = ( + self.compile(self.having) + if self.having is not None + else ("", []) + ) + except FullResultSet: + having, h_params = "", [] result = [] params = [] + + if self.query.distinct: + distinct_result, distinct_params = self.connection.ops.distinct_sql( + distinct_fields, + distinct_params, + ) + result += distinct_result + params += distinct_params + + out_cols = [] + for _, (s_sql, s_params), alias in self.select + extra_select: + if alias: + s_sql = "%s AS %s" % ( + s_sql, + self.connection.ops.quote_name(alias), + ) + params.extend(s_params) + out_cols.append(s_sql) + + params.extend(f_params) + + if self.query.select_for_update and features.has_select_for_update: + if ( + self.connection.get_autocommit() + # Don't raise an exception when database doesn't + # support transactions, as it's a noop. + and features.supports_transactions + ): + raise TransactionManagementError( + "select_for_update cannot be used outside of a transaction." + ) + + if ( + with_limit_offset + and not features.supports_select_for_update_with_limit + ): + raise NotSupportedError( + "LIMIT/OFFSET is not supported with " + "select_for_update on this database backend." + ) + nowait = self.query.select_for_update_nowait + skip_locked = self.query.select_for_update_skip_locked + of = self.query.select_for_update_of + no_key = self.query.select_for_no_key_update + # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the + # backend doesn't support it, raise NotSupportedError to + # prevent a possible deadlock. + if nowait and not features.has_select_for_update_nowait: + raise NotSupportedError( + "NOWAIT is not supported on this database backend." + ) + elif skip_locked and not features.has_select_for_update_skip_locked: + raise NotSupportedError( + "SKIP LOCKED is not supported on this database backend." + ) + elif of and not features.has_select_for_update_of: + raise NotSupportedError( + "FOR UPDATE OF is not supported on this database backend." + ) + elif no_key and not features.has_select_for_no_key_update: + raise NotSupportedError( + "FOR NO KEY UPDATE is not supported on this " + "database backend." + ) + for_update_part = self.connection.ops.for_update_sql( + nowait=nowait, + skip_locked=skip_locked, + of=self.get_select_for_update_of_arguments(), + no_key=no_key, + ) + + if for_update_part and features.for_update_after_from: + result.append(for_update_part) + if where: result.append("WHERE %s" % where) params.extend(w_params) @@ -91,7 +180,11 @@ def get_query_str(self, with_limits=True, with_table_name=False): for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) - result.append("ORDER BY %s" % ", ".join(ordering)) + order_by_sql = "ORDER BY %s" % ", ".join(ordering) + if combinator and features.requires_compound_order_by_subquery: + result = ["SELECT * FROM (", *result, ")", order_by_sql] + else: + result.append(order_by_sql) if with_limit_offset: result.append( @@ -102,6 +195,7 @@ def get_query_str(self, with_limits=True, with_table_name=False): if for_update_part and not features.for_update_after_from: result.append(for_update_part) + from_, f_params = self.get_from_clause() sql = " ".join(result) if not with_table_name: diff --git a/apps/common/db/search.py b/apps/common/db/search.py index 76366715439..bef42a1414a 100644 --- a/apps/common/db/search.py +++ b/apps/common/db/search.py @@ -12,7 +12,7 @@ from django.db.models import QuerySet from common.db.compiler import AppSQLCompiler -from common.db.sql_execute import select_one, select_list +from common.db.sql_execute import select_one, select_list, update_execute from common.response.result import Page @@ -109,6 +109,24 @@ def native_search(queryset: QuerySet | Dict[str, QuerySet], select_string: str, return select_list(exec_sql, exec_params) +def native_update(queryset: QuerySet | Dict[str, QuerySet], select_string: str, + field_replace_dict: None | Dict[str, Dict[str, str]] | Dict[str, str] = None, + with_table_name=False): + """ + 复杂查询 + :param with_table_name: 生成sql是否包含表名 + :param queryset: 查询条件构造器 + :param select_string: 查询前缀 不包括 where limit 等信息 + :param field_replace_dict: 需要替换的字段 + :return: 查询结果 + """ + if isinstance(queryset, Dict): + exec_sql, exec_params = generate_sql_by_query_dict(queryset, select_string, field_replace_dict, with_table_name) + else: + exec_sql, exec_params = generate_sql_by_query(queryset, select_string, field_replace_dict, with_table_name) + return update_execute(exec_sql, exec_params) + + def page_search(current_page: int, page_size: int, queryset: QuerySet, post_records_handler): """ 分页查询 diff --git a/apps/common/db/sql_execute.py b/apps/common/db/sql_execute.py index 79e7de46a18..b12297e1f9a 100644 --- a/apps/common/db/sql_execute.py +++ b/apps/common/db/sql_execute.py @@ -36,8 +36,9 @@ def update_execute(sql: str, params): """ with connection.cursor() as cursor: cursor.execute(sql, params) + affected_rows = cursor.rowcount cursor.close() - return None + return affected_rows def select_list(sql: str, params: List): diff --git a/apps/common/encoder/encoder.py b/apps/common/encoder/encoder.py new file mode 100644 index 00000000000..02d0ec88cc9 --- /dev/null +++ b/apps/common/encoder/encoder.py @@ -0,0 +1,30 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: SystemEncoder.py + @date:2025/3/17 16:38 + @desc: +""" +import datetime +import decimal +import json +import uuid + +from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile + + +class SystemEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, uuid.UUID): + return str(obj) + if isinstance(obj, datetime.datetime): + return obj.strftime("%Y-%m-%d %H:%M:%S") + if isinstance(obj, decimal.Decimal): + return float(obj) + if isinstance(obj, InMemoryUploadedFile): + return {'name': obj.name, 'size': obj.size} + if isinstance(obj, TemporaryUploadedFile): + return {'name': obj.name, 'size': obj.size} + else: + return json.JSONEncoder.default(self, obj) diff --git a/apps/common/event/__init__.py b/apps/common/event/__init__.py index 909740b7ab2..ddf6dd6f9f2 100644 --- a/apps/common/event/__init__.py +++ b/apps/common/event/__init__.py @@ -9,10 +9,25 @@ import setting.models from setting.models import Model from .listener_manage import * +from django.utils.translation import gettext as _ + +from ..db.sql_execute import update_execute +from common.lock.impl.file_lock import FileLock + +lock = FileLock() +update_document_status_sql = """ +UPDATE "public"."document" +SET status ="replace"("replace"("replace"(status, '1', '3'), '0', '3'), '4', '3') +WHERE status ~ '1|0|4' +""" def run(): - listener_manage.ListenerManagement().run() - QuerySet(Document).filter(status=Status.embedding).update(**{'status': Status.error}) - QuerySet(Model).filter(status=setting.models.Status.DOWNLOAD).update(status=setting.models.Status.ERROR, - meta={'message': "下载程序被中断,请重试"}) + if lock.try_lock('event_init', 30 * 30): + try: + QuerySet(Model).filter(status=setting.models.Status.DOWNLOAD).update(status=setting.models.Status.ERROR, + meta={'message': _( + 'The download process was interrupted, please try again')}) + update_execute(update_document_status_sql, []) + finally: + lock.un_lock('event_init') diff --git a/apps/common/event/common.py b/apps/common/event/common.py index e35123758cf..a54d24df23c 100644 --- a/apps/common/event/common.py +++ b/apps/common/event/common.py @@ -8,20 +8,43 @@ """ from concurrent.futures import ThreadPoolExecutor +from django.core.cache.backends.locmem import LocMemCache + work_thread_pool = ThreadPoolExecutor(5) embedding_thread_pool = ThreadPoolExecutor(3) +memory_cache = LocMemCache('task', {"OPTIONS": {"MAX_ENTRIES": 1000}}) + def poxy(poxy_function): - def inner(args): - work_thread_pool.submit(poxy_function, args) + def inner(args, **keywords): + work_thread_pool.submit(poxy_function, args, **keywords) return inner +def get_cache_key(poxy_function, args): + return poxy_function.__name__ + str(args) + + +def get_cache_poxy_function(poxy_function, cache_key): + def fun(args, **keywords): + try: + poxy_function(args, **keywords) + finally: + memory_cache.delete(cache_key) + + return fun + + def embedding_poxy(poxy_function): - def inner(args): - embedding_thread_pool.submit(poxy_function, args) + def inner(*args, **keywords): + key = get_cache_key(poxy_function, args) + if memory_cache.has_key(key): + return + memory_cache.add(key, None) + f = get_cache_poxy_function(poxy_function, key) + embedding_thread_pool.submit(f, args, **keywords) return inner diff --git a/apps/common/event/listener_manage.py b/apps/common/event/listener_manage.py index 415d20562ec..dd2a54a0c67 100644 --- a/apps/common/event/listener_manage.py +++ b/apps/common/event/listener_manage.py @@ -8,25 +8,29 @@ """ import logging import os +import threading +import datetime import traceback from typing import List import django.db.models -from blinker import signal from django.db.models import QuerySet +from django.db.models.functions import Substr, Reverse +from langchain_core.embeddings import Embeddings -from common.config.embedding_config import VectorStore, EmbeddingModel -from common.db.search import native_search, get_dynamics_model -from common.event.common import poxy, embedding_poxy +from common.config.embedding_config import VectorStore +from common.db.search import native_search, get_dynamics_model, native_update from common.util.file_util import get_file_content -from common.util.fork import ForkManage, Fork from common.util.lock import try_lock, un_lock -from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping -from embedding.models import SourceType +from common.util.page_utils import page_desc +from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping, TaskType, State +from embedding.models import SourceType, SearchMode from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ -max_kb_error = logging.getLogger("max_kb_error") -max_kb = logging.getLogger("max_kb") +max_kb_error = logging.getLogger(__file__) +max_kb = logging.getLogger(__file__) +lock = threading.Lock() class SyncWebDatasetArgs: @@ -45,9 +49,10 @@ def __init__(self, source_url_list: List[str], selector: str, handler): class UpdateProblemArgs: - def __init__(self, problem_id: str, problem_content: str): + def __init__(self, problem_id: str, problem_content: str, embedding_model: Embeddings): self.problem_id = problem_id self.problem_content = problem_content + self.embedding_model = embedding_model class UpdateEmbeddingDatasetIdArgs: @@ -57,45 +62,67 @@ def __init__(self, paragraph_id_list: List[str], target_dataset_id: str): class UpdateEmbeddingDocumentIdArgs: - def __init__(self, paragraph_id_list: List[str], target_document_id: str, target_dataset_id: str): + def __init__(self, paragraph_id_list: List[str], target_document_id: str, target_dataset_id: str, + target_embedding_model: Embeddings = None): self.paragraph_id_list = paragraph_id_list self.target_document_id = target_document_id self.target_dataset_id = target_dataset_id + self.target_embedding_model = target_embedding_model class ListenerManagement: - embedding_by_problem_signal = signal("embedding_by_problem") - embedding_by_paragraph_signal = signal("embedding_by_paragraph") - embedding_by_dataset_signal = signal("embedding_by_dataset") - embedding_by_document_signal = signal("embedding_by_document") - delete_embedding_by_document_signal = signal("delete_embedding_by_document") - delete_embedding_by_document_list_signal = signal("delete_embedding_by_document_list") - delete_embedding_by_dataset_signal = signal("delete_embedding_by_dataset") - delete_embedding_by_paragraph_signal = signal("delete_embedding_by_paragraph") - delete_embedding_by_source_signal = signal("delete_embedding_by_source") - enable_embedding_by_paragraph_signal = signal('enable_embedding_by_paragraph') - disable_embedding_by_paragraph_signal = signal('disable_embedding_by_paragraph') - init_embedding_model_signal = signal('init_embedding_model') - sync_web_dataset_signal = signal('sync_web_dataset') - sync_web_document_signal = signal('sync_web_document') - update_problem_signal = signal('update_problem') - delete_embedding_by_source_ids_signal = signal('delete_embedding_by_source_ids') - delete_embedding_by_dataset_id_list_signal = signal("delete_embedding_by_dataset_id_list") @staticmethod - def embedding_by_problem(args): - VectorStore.get_embedding_vector().save(**args) + def embedding_by_problem(args, embedding_model: Embeddings): + VectorStore.get_embedding_vector().save(**args, embedding=embedding_model) @staticmethod - @embedding_poxy - def embedding_by_paragraph(paragraph_id): + def embedding_by_paragraph_list(paragraph_id_list, embedding_model: Embeddings): + try: + data_list = native_search( + {'problem': QuerySet(get_dynamics_model({'paragraph.id': django.db.models.CharField()})).filter( + **{'paragraph.id__in': paragraph_id_list}), + 'paragraph': QuerySet(Paragraph).filter(id__in=paragraph_id_list)}, + select_string=get_file_content( + os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql'))) + ListenerManagement.embedding_by_paragraph_data_list(data_list, paragraph_id_list=paragraph_id_list, + embedding_model=embedding_model) + except Exception as e: + max_kb_error.error(_('Query vector data: {paragraph_id_list} error {error} {traceback}').format( + paragraph_id_list=paragraph_id_list, error=str(e), traceback=traceback.format_exc())) + + @staticmethod + def embedding_by_paragraph_data_list(data_list, paragraph_id_list, embedding_model: Embeddings): + max_kb.info(_('Start--->Embedding paragraph: {paragraph_id_list}').format(paragraph_id_list=paragraph_id_list)) + status = Status.success + try: + # 删除段落 + VectorStore.get_embedding_vector().delete_by_paragraph_ids(paragraph_id_list) + + def is_save_function(): + return QuerySet(Paragraph).filter(id__in=paragraph_id_list).exists() + + # 批量向量化 + VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_save_function) + except Exception as e: + max_kb_error.error(_('Vectorized paragraph: {paragraph_id_list} error {error} {traceback}').format( + paragraph_id_list=paragraph_id_list, error=str(e), traceback=traceback.format_exc())) + status = Status.error + finally: + QuerySet(Paragraph).filter(id__in=paragraph_id_list).update(**{'status': status}) + max_kb.info( + _('End--->Embedding paragraph: {paragraph_id_list}').format(paragraph_id_list=paragraph_id_list)) + + @staticmethod + def embedding_by_paragraph(paragraph_id, embedding_model: Embeddings): """ 向量化段落 根据段落id - :param paragraph_id: 段落id - :return: None + @param paragraph_id: 段落id + @param embedding_model: 向量模型 """ - max_kb.info(f"开始--->向量化段落:{paragraph_id}") - status = Status.success + max_kb.info(_('Start--->Embedding paragraph: {paragraph_id}').format(paragraph_id=paragraph_id)) + # 更新到开始状态 + ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, State.STARTED) try: data_list = native_search( {'problem': QuerySet(get_dynamics_model({'paragraph.id': django.db.models.CharField()})).filter( @@ -105,64 +132,184 @@ def embedding_by_paragraph(paragraph_id): os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql'))) # 删除段落 VectorStore.get_embedding_vector().delete_by_paragraph_id(paragraph_id) + + def is_the_task_interrupted(): + _paragraph = QuerySet(Paragraph).filter(id=paragraph_id).first() + if _paragraph is None or Status(_paragraph.status)[TaskType.EMBEDDING] == State.REVOKE: + return True + return False + # 批量向量化 - VectorStore.get_embedding_vector().batch_save(data_list) + VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_the_task_interrupted) + # 更新到开始状态 + ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, + State.SUCCESS) except Exception as e: - max_kb_error.error(f'向量化段落:{paragraph_id}出现错误{str(e)}{traceback.format_exc()}') - status = Status.error + max_kb_error.error(_('Vectorized paragraph: {paragraph_id} error {error} {traceback}').format( + paragraph_id=paragraph_id, error=str(e), traceback=traceback.format_exc())) + ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, + State.FAILURE) finally: - QuerySet(Paragraph).filter(id=paragraph_id).update(**{'status': status}) - max_kb.info(f'结束--->向量化段落:{paragraph_id}') + max_kb.info(_('End--->Embedding paragraph: {paragraph_id}').format(paragraph_id=paragraph_id)) @staticmethod - @embedding_poxy - def embedding_by_document(document_id): + def embedding_by_data_list(data_list: List, embedding_model: Embeddings): + # 批量向量化 + VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, lambda: True) + + @staticmethod + def get_embedding_paragraph_apply(embedding_model, is_the_task_interrupted, post_apply=lambda: None): + def embedding_paragraph_apply(paragraph_list): + for paragraph in paragraph_list: + if is_the_task_interrupted(): + break + ListenerManagement.embedding_by_paragraph(str(paragraph.get('id')), embedding_model) + post_apply() + + return embedding_paragraph_apply + + @staticmethod + def get_aggregation_document_status(document_id): + def aggregation_document_status(): + pass + sql = get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_document_status_meta.sql')) + native_update({'document_custom_sql': QuerySet(Document).filter(id=document_id)}, sql, with_table_name=True) + + return aggregation_document_status + + @staticmethod + def get_aggregation_document_status_by_dataset_id(dataset_id): + def aggregation_document_status(): + sql = get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_document_status_meta.sql')) + native_update({'document_custom_sql': QuerySet(Document).filter(dataset_id=dataset_id)}, sql, + with_table_name=True) + + return aggregation_document_status + + @staticmethod + def get_aggregation_document_status_by_query_set(queryset): + def aggregation_document_status(): + sql = get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_document_status_meta.sql')) + native_update({'document_custom_sql': queryset}, sql, with_table_name=True) + + return aggregation_document_status + + @staticmethod + def post_update_document_status(document_id, task_type: TaskType): + _document = QuerySet(Document).filter(id=document_id).first() + + status = Status(_document.status) + if status[task_type] == State.REVOKE: + status[task_type] = State.REVOKED + else: + status[task_type] = State.SUCCESS + for item in _document.status_meta.get('aggs', []): + agg_status = item.get('status') + agg_count = item.get('count') + if Status(agg_status)[task_type] == State.FAILURE and agg_count > 0: + status[task_type] = State.FAILURE + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), task_type, status[task_type]) + + ListenerManagement.update_status(QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', task_type.value, + task_type.value), + ).filter(task_type_status=State.REVOKE.value).filter(document_id=document_id).values('id'), + task_type, + State.REVOKED) + + @staticmethod + def update_status(query_set: QuerySet, taskType: TaskType, state: State): + exec_sql = get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_paragraph_status.sql')) + bit_number = len(TaskType) + up_index = taskType.value - 1 + next_index = taskType.value + 1 + current_index = taskType.value + status_number = state.value + current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + '+00' + params_dict = {'${bit_number}': bit_number, '${up_index}': up_index, + '${status_number}': status_number, '${next_index}': next_index, + '${table_name}': query_set.model._meta.db_table, '${current_index}': current_index, + '${current_time}': current_time} + for key in params_dict: + _value_ = params_dict[key] + exec_sql = exec_sql.replace(key, str(_value_)) + with lock: + native_update(query_set, exec_sql) + + @staticmethod + def embedding_by_document(document_id, embedding_model: Embeddings, state_list=None): """ 向量化文档 - :param document_id: 文档id + @param state_list: + @param document_id: 文档id + @param embedding_model 向量模型 :return: None """ - max_kb.info(f"开始--->向量化文档:{document_id}") - status = Status.success + if state_list is None: + state_list = [State.PENDING, State.SUCCESS, State.FAILURE, State.REVOKE, State.REVOKED] + if not try_lock('embedding' + str(document_id)): + return try: - data_list = native_search( - {'problem': QuerySet( - get_dynamics_model({'paragraph.document_id': django.db.models.CharField()})).filter( - **{'paragraph.document_id': document_id}), - 'paragraph': QuerySet(Paragraph).filter(document_id=document_id)}, - select_string=get_file_content( - os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql'))) - # 删除文档向量数据 - VectorStore.get_embedding_vector().delete_by_document_id(document_id) - # 批量向量化 - VectorStore.get_embedding_vector().batch_save(data_list) + def is_the_task_interrupted(): + document = QuerySet(Document).filter(id=document_id).first() + if document is None or Status(document.status)[TaskType.EMBEDDING] == State.REVOKE: + return True + return False + + if is_the_task_interrupted(): + return + max_kb.info(_('Start--->Embedding document: {document_id}').format(document_id=document_id) + ) + # 批量修改状态为PADDING + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING, + State.STARTED) + + # 根据段落进行向量化处理 + page_desc(QuerySet(Paragraph) + .annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType.EMBEDDING.value, + 1), + ).filter(task_type_status__in=state_list, document_id=document_id) + .values('id'), 5, + ListenerManagement.get_embedding_paragraph_apply(embedding_model, is_the_task_interrupted, + ListenerManagement.get_aggregation_document_status( + document_id)), + is_the_task_interrupted) except Exception as e: - max_kb_error.error(f'向量化文档:{document_id}出现错误{str(e)}{traceback.format_exc()}') - status = Status.error + max_kb_error.error(_('Vectorized document: {document_id} error {error} {traceback}').format( + document_id=document_id, error=str(e), traceback=traceback.format_exc())) finally: - # 修改状态 - QuerySet(Document).filter(id=document_id).update(**{'status': status}) - QuerySet(Paragraph).filter(document_id=document_id).update(**{'status': status}) - max_kb.info(f"结束--->向量化文档:{document_id}") + ListenerManagement.post_update_document_status(document_id, TaskType.EMBEDDING) + ListenerManagement.get_aggregation_document_status(document_id)() + max_kb.info(_('End--->Embedding document: {document_id}').format(document_id=document_id)) + un_lock('embedding' + str(document_id)) @staticmethod - @embedding_poxy - def embedding_by_dataset(dataset_id): + def embedding_by_dataset(dataset_id, embedding_model: Embeddings): """ 向量化知识库 - :param dataset_id: 知识库id + @param dataset_id: 知识库id + @param embedding_model 向量模型 :return: None """ - max_kb.info(f"开始--->向量化数据集:{dataset_id}") + max_kb.info(_('Start--->Embedding dataset: {dataset_id}').format(dataset_id=dataset_id)) try: + ListenerManagement.delete_embedding_by_dataset(dataset_id) document_list = QuerySet(Document).filter(dataset_id=dataset_id) - max_kb.info(f"数据集文档:{[d.name for d in document_list]}") + max_kb.info(_('Start--->Embedding document: {document_list}').format(document_list=document_list)) for document in document_list: - ListenerManagement.embedding_by_document(document.id) + ListenerManagement.embedding_by_document(document.id, embedding_model=embedding_model) except Exception as e: - max_kb_error.error(f'向量化数据集:{dataset_id}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Vectorized dataset: {dataset_id} error {error} {traceback}').format( + dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc())) finally: - max_kb.info(f"结束--->向量化数据集:{dataset_id}") + max_kb.info(_('End--->Embedding dataset: {dataset_id}').format(dataset_id=dataset_id)) @staticmethod def delete_embedding_by_document(document_id): @@ -170,7 +317,7 @@ def delete_embedding_by_document(document_id): @staticmethod def delete_embedding_by_document_list(document_id_list: List[str]): - VectorStore.get_embedding_vector().delete_bu_document_id_list(document_id_list) + VectorStore.get_embedding_vector().delete_by_document_id_list(document_id_list) @staticmethod def delete_embedding_by_dataset(dataset_id): @@ -192,29 +339,10 @@ def disable_embedding_by_paragraph(paragraph_id): def enable_embedding_by_paragraph(paragraph_id): VectorStore.get_embedding_vector().update_by_paragraph_id(paragraph_id, {'is_active': True}) - @staticmethod - @poxy - def sync_web_document(args: SyncWebDocumentArgs): - for source_url in args.source_url_list: - result = Fork(base_fork_url=source_url, selector_list=args.selector.split(' ')).fork() - args.handler(source_url, args.selector, result) - - @staticmethod - @poxy - def sync_web_dataset(args: SyncWebDatasetArgs): - if try_lock('sync_web_dataset' + args.lock_key): - try: - ForkManage(args.url, args.selector.split(" ") if args.selector is not None else []).fork(2, set(), - args.handler) - except Exception as e: - logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') - finally: - un_lock('sync_web_dataset' + args.lock_key) - @staticmethod def update_problem(args: UpdateProblemArgs): problem_paragraph_mapping_list = QuerySet(ProblemParagraphMapping).filter(problem_id=args.problem_id) - embed_value = VectorStore.get_embedding_vector().embed_query(args.problem_content) + embed_value = args.embedding_model.embed_query(args.problem_content) VectorStore.get_embedding_vector().update_by_source_ids([v.id for v in problem_paragraph_mapping_list], {'embedding': embed_value}) @@ -225,9 +353,13 @@ def update_embedding_dataset_id(args: UpdateEmbeddingDatasetIdArgs): @staticmethod def update_embedding_document_id(args: UpdateEmbeddingDocumentIdArgs): - VectorStore.get_embedding_vector().update_by_paragraph_ids(args.paragraph_id_list, - {'document_id': args.target_document_id, - 'dataset_id': args.target_dataset_id}) + if args.target_embedding_model is None: + VectorStore.get_embedding_vector().update_by_paragraph_ids(args.paragraph_id_list, + {'document_id': args.target_document_id, + 'dataset_id': args.target_dataset_id}) + else: + ListenerManagement.embedding_by_paragraph_list(args.paragraph_id_list, + embedding_model=args.target_embedding_model) @staticmethod def delete_embedding_by_source_ids(source_ids: List[str]): @@ -242,43 +374,9 @@ def delete_embedding_by_dataset_id_list(source_ids: List[str]): VectorStore.get_embedding_vector().delete_by_dataset_id_list(source_ids) @staticmethod - @poxy - def init_embedding_model(ages): - EmbeddingModel.get_embedding_model() - - def run(self): - # 添加向量 根据问题id - ListenerManagement.embedding_by_problem_signal.connect(self.embedding_by_problem) - # 添加向量 根据段落id - ListenerManagement.embedding_by_paragraph_signal.connect(self.embedding_by_paragraph) - # 添加向量 根据知识库id - ListenerManagement.embedding_by_dataset_signal.connect( - self.embedding_by_dataset) - # 添加向量 根据文档id - ListenerManagement.embedding_by_document_signal.connect( - self.embedding_by_document) - # 删除 向量 根据文档 - ListenerManagement.delete_embedding_by_document_signal.connect(self.delete_embedding_by_document) - # 删除 向量 根据文档id列表 - ListenerManagement.delete_embedding_by_document_list_signal.connect(self.delete_embedding_by_document_list) - # 删除 向量 根据知识库id - ListenerManagement.delete_embedding_by_dataset_signal.connect(self.delete_embedding_by_dataset) - # 删除向量 根据段落id - ListenerManagement.delete_embedding_by_paragraph_signal.connect( - self.delete_embedding_by_paragraph) - # 删除向量 根据资源id - ListenerManagement.delete_embedding_by_source_signal.connect(self.delete_embedding_by_source) - # 禁用段落 - ListenerManagement.disable_embedding_by_paragraph_signal.connect(self.disable_embedding_by_paragraph) - # 启动段落向量 - ListenerManagement.enable_embedding_by_paragraph_signal.connect(self.enable_embedding_by_paragraph) - # 初始化向量化模型 - ListenerManagement.init_embedding_model_signal.connect(self.init_embedding_model) - # 同步web站点知识库 - ListenerManagement.sync_web_dataset_signal.connect(self.sync_web_dataset) - # 同步web站点 文档 - ListenerManagement.sync_web_document_signal.connect(self.sync_web_document) - # 更新问题向量 - ListenerManagement.update_problem_signal.connect(self.update_problem) - ListenerManagement.delete_embedding_by_source_ids_signal.connect(self.delete_embedding_by_source_ids) - ListenerManagement.delete_embedding_by_dataset_id_list_signal.connect(self.delete_embedding_by_dataset_id_list) + def hit_test(query_text, dataset_id: list[str], exclude_document_id_list: list[str], top_number: int, + similarity: float, + search_mode: SearchMode, + embedding: Embeddings): + return VectorStore.get_embedding_vector().hit_test(query_text, dataset_id, exclude_document_id_list, top_number, + similarity, search_mode, embedding) diff --git a/apps/common/exception/app_exception.py b/apps/common/exception/app_exception.py index 3646efb0cbc..b8f5602e705 100644 --- a/apps/common/exception/app_exception.py +++ b/apps/common/exception/app_exception.py @@ -73,3 +73,11 @@ class AppChatNumOutOfBoundsFailed(AppApiException): def __init__(self, code, message): self.code = code self.message = message + + +class ChatException(AppApiException): + status_code = 500 + + def __init__(self, code, message): + self.code = code + self.message = message diff --git a/apps/common/field/common.py b/apps/common/field/common.py index c615e587a1a..61c852d357d 100644 --- a/apps/common/field/common.py +++ b/apps/common/field/common.py @@ -7,6 +7,21 @@ @desc: """ from rest_framework import serializers +from django.utils.translation import gettext_lazy as _ + +class ObjectField(serializers.Field): + def __init__(self, model_type_list, **kwargs): + self.model_type_list = model_type_list + super().__init__(**kwargs) + + def to_internal_value(self, data): + for model_type in self.model_type_list: + if isinstance(data, model_type): + return data + self.fail(_('Message type error'), value=data) + + def to_representation(self, value): + return value class InstanceField(serializers.Field): @@ -16,7 +31,7 @@ def __init__(self, model_type, **kwargs): def to_internal_value(self, data): if not isinstance(data, self.model_type): - self.fail('message类型错误', value=data) + self.fail(_('Message type error'), value=data) return data def to_representation(self, value): @@ -27,7 +42,7 @@ class FunctionField(serializers.Field): def to_internal_value(self, data): if not callable(data): - self.fail('不是一个函數', value=data) + self.fail(_('not a function'), value=data) return data def to_representation(self, value): @@ -40,3 +55,11 @@ def __init__(self, **kwargs): def to_representation(self, value): return value + + +class UploadedFileField(serializers.FileField): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def to_representation(self, value): + return value diff --git a/apps/common/forms/__init__.py b/apps/common/forms/__init__.py index cda6fe04046..251f01df092 100644 --- a/apps/common/forms/__init__.py +++ b/apps/common/forms/__init__.py @@ -20,3 +20,6 @@ from .radio_button_field import * from .table_checkbox import * from .radio_card_field import * +from .label import * +from .slider_field import * +from .switch_field import * diff --git a/apps/common/forms/base_field.py b/apps/common/forms/base_field.py index d12ae77a723..b0cf0f20240 100644 --- a/apps/common/forms/base_field.py +++ b/apps/common/forms/base_field.py @@ -9,6 +9,10 @@ from enum import Enum from typing import List, Dict +from common.exception.app_exception import AppApiException +from common.forms.label.base_label import BaseLabel +from django.utils.translation import gettext_lazy as _ + class TriggerType(Enum): # 执行函数获取 OptionList数据 @@ -20,7 +24,7 @@ class TriggerType(Enum): class BaseField: def __init__(self, input_type: str, - label: str, + label: str or BaseLabel, required: bool = False, default_value: object = None, relation_show_field_dict: Dict = None, @@ -53,10 +57,16 @@ def __init__(self, self.required = required self.trigger_type = trigger_type - def to_dict(self): + def is_valid(self, value): + field_label = self.label.label if hasattr(self.label, 'to_dict') else self.label + if self.required and value is None: + raise AppApiException(500, + _('The field {field_label} is required').format(field_label=field_label)) + + def to_dict(self, **kwargs): return { 'input_type': self.input_type, - 'label': self.label, + 'label': self.label.to_dict(**kwargs) if hasattr(self.label, 'to_dict') else self.label, 'required': self.required, 'default_value': self.default_value, 'relation_show_field_dict': self.relation_show_field_dict, @@ -64,6 +74,7 @@ def to_dict(self): 'trigger_type': self.trigger_type.value, 'attrs': self.attrs, 'props_info': self.props_info, + **kwargs } @@ -97,8 +108,8 @@ def __init__(self, input_type: str, self.value_field = value_field self.option_list = option_list - def to_dict(self): - return {**super().to_dict(), 'text_field': self.text_field, 'value_field': self.value_field, + def to_dict(self, **kwargs): + return {**super().to_dict(**kwargs), 'text_field': self.text_field, 'value_field': self.value_field, 'option_list': self.option_list} @@ -141,6 +152,6 @@ def __init__(self, self.provider = provider self.method = method - def to_dict(self): - return {**super().to_dict(), 'text_field': self.text_field, 'value_field': self.value_field, + def to_dict(self, **kwargs): + return {**super().to_dict(**kwargs), 'text_field': self.text_field, 'value_field': self.value_field, 'provider': self.provider, 'method': self.method} diff --git a/apps/common/forms/base_form.py b/apps/common/forms/base_form.py index 93984b8c61b..5ef92c5c1e6 100644 --- a/apps/common/forms/base_form.py +++ b/apps/common/forms/base_form.py @@ -6,11 +6,25 @@ @date:2023/11/1 16:04 @desc: """ +from typing import Dict + from common.forms import BaseField class BaseForm: - def to_form_list(self): - return [{**self.__getattribute__(key).to_dict(), 'field': key} for key in + def to_form_list(self, **kwargs): + return [{**self.__getattribute__(key).to_dict(**kwargs), 'field': key} for key in list(filter(lambda key: isinstance(self.__getattribute__(key), BaseField), [attr for attr in vars(self.__class__) if not attr.startswith("__")]))] + + def valid_form(self, form_data): + field_keys = list(filter(lambda key: isinstance(self.__getattribute__(key), BaseField), + [attr for attr in vars(self.__class__) if not attr.startswith("__")])) + for field_key in field_keys: + self.__getattribute__(field_key).is_valid(form_data.get(field_key)) + + def get_default_form_data(self): + return {key: self.__getattribute__(key).default_value for key in + [attr for attr in vars(self.__class__) if not attr.startswith("__")] if + isinstance(self.__getattribute__(key), BaseField) and self.__getattribute__( + key).default_value is not None} diff --git a/apps/common/forms/label/__init__.py b/apps/common/forms/label/__init__.py new file mode 100644 index 00000000000..81c1b329874 --- /dev/null +++ b/apps/common/forms/label/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/22 17:19 + @desc: +""" +from .base_label import * +from .tooltip_label import * diff --git a/apps/common/forms/label/base_label.py b/apps/common/forms/label/base_label.py new file mode 100644 index 00000000000..59e4d372267 --- /dev/null +++ b/apps/common/forms/label/base_label.py @@ -0,0 +1,28 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_label.py + @date:2024/8/22 17:11 + @desc: +""" + + +class BaseLabel: + def __init__(self, + input_type: str, + label: str, + attrs=None, + props_info=None): + self.input_type = input_type + self.label = label + self.attrs = attrs + self.props_info = props_info + + def to_dict(self, **kwargs): + return { + 'input_type': self.input_type, + 'label': self.label, + 'attrs': {} if self.attrs is None else self.attrs, + 'props_info': {} if self.props_info is None else self.props_info, + } diff --git a/apps/common/forms/label/tooltip_label.py b/apps/common/forms/label/tooltip_label.py new file mode 100644 index 00000000000..885345dafbc --- /dev/null +++ b/apps/common/forms/label/tooltip_label.py @@ -0,0 +1,14 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: tooltip_label.py + @date:2024/8/22 17:19 + @desc: +""" +from common.forms.label.base_label import BaseLabel + + +class TooltipLabel(BaseLabel): + def __init__(self, label, tooltip): + super().__init__('TooltipLabel', label, attrs={'tooltip': tooltip}, props_info={}) diff --git a/apps/common/forms/single_select_field.py b/apps/common/forms/single_select_field.py index cf3d5040965..21bd5de5750 100644 --- a/apps/common/forms/single_select_field.py +++ b/apps/common/forms/single_select_field.py @@ -8,6 +8,7 @@ """ from typing import List, Dict +from common.forms import BaseLabel from common.forms.base_field import TriggerType, BaseExecField @@ -17,7 +18,7 @@ class SingleSelect(BaseExecField): """ def __init__(self, - label: str, + label: str or BaseLabel, text_field: str, value_field: str, option_list: List[str:object], diff --git a/apps/common/forms/slider_field.py b/apps/common/forms/slider_field.py new file mode 100644 index 00000000000..3919891fda4 --- /dev/null +++ b/apps/common/forms/slider_field.py @@ -0,0 +1,65 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: slider_field.py + @date:2024/8/22 17:06 + @desc: +""" +from typing import Dict + +from common.exception.app_exception import AppApiException +from common.forms import BaseField, TriggerType, BaseLabel +from django.utils.translation import gettext_lazy as _ + + +class SliderField(BaseField): + """ + 滑块输入框 + """ + + def __init__(self, label: str or BaseLabel, + _min, + _max, + _step, + precision, + required: bool = False, + default_value=None, + relation_show_field_dict: Dict = None, + attrs=None, props_info=None): + """ + @param label: 提示 + @param _min: 最小值 + @param _max: 最大值 + @param _step: 步长 + @param precision: 保留多少小数 + @param required: 是否必填 + @param default_value: 默认值 + @param relation_show_field_dict: + @param attrs: + @param props_info: + """ + _attrs = {'min': _min, 'max': _max, 'step': _step, + 'precision': precision, 'show-input-controls': False, 'show-input': True} + if attrs is not None: + _attrs.update(attrs) + super().__init__('Slider', label, required, default_value, relation_show_field_dict, + {}, + TriggerType.OPTION_LIST, _attrs, props_info) + + def is_valid(self, value): + super().is_valid(value) + field_label = self.label.label if hasattr(self.label, 'to_dict') else self.label + if value is not None: + if value < self.attrs.get('min'): + raise AppApiException(500, + _("The {field_label} cannot be less than {min}").format(field_label=field_label, + min=self.attrs.get( + 'min'))) + + if value > self.attrs.get('max'): + raise AppApiException(500, + _("The {field_label} cannot be greater than {max}").format( + field_label=field_label, + max=self.attrs.get( + 'max'))) diff --git a/apps/common/forms/switch_field.py b/apps/common/forms/switch_field.py new file mode 100644 index 00000000000..ea119c3ecfb --- /dev/null +++ b/apps/common/forms/switch_field.py @@ -0,0 +1,33 @@ +""" + @project: MaxKB + @Author:虎 + @file: switch_field.py + @date:2024/10/13 19:43 + @desc: +""" +from typing import Dict +from common.forms import BaseField, TriggerType, BaseLabel + + +class SwitchField(BaseField): + """ + 滑块输入框 + """ + + def __init__(self, label: str or BaseLabel, + required: bool = False, + default_value=None, + relation_show_field_dict: Dict = None, + + attrs=None, props_info=None): + """ + @param required: 是否必填 + @param default_value: 默认值 + @param relation_show_field_dict: + @param attrs: + @param props_info: + """ + + super().__init__('SwitchInput', label, required, default_value, relation_show_field_dict, + {}, + TriggerType.OPTION_LIST, attrs, props_info) diff --git a/apps/common/forms/text_input_field.py b/apps/common/forms/text_input_field.py index 28a821e1570..2b8b2ce04a5 100644 --- a/apps/common/forms/text_input_field.py +++ b/apps/common/forms/text_input_field.py @@ -8,6 +8,7 @@ """ from typing import Dict +from common.forms import BaseLabel from common.forms.base_field import BaseField, TriggerType @@ -16,7 +17,7 @@ class TextInputField(BaseField): 文本输入框 """ - def __init__(self, label: str, + def __init__(self, label: str or BaseLabel, required: bool = False, default_value=None, relation_show_field_dict: Dict = None, diff --git a/apps/common/handle/base_parse_qa_handle.py b/apps/common/handle/base_parse_qa_handle.py new file mode 100644 index 00000000000..8cd1cd1cdb2 --- /dev/null +++ b/apps/common/handle/base_parse_qa_handle.py @@ -0,0 +1,52 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_parse_qa_handle.py + @date:2024/5/21 14:56 + @desc: +""" +from abc import ABC, abstractmethod + + +def get_row_value(row, title_row_index_dict, field): + index = title_row_index_dict.get(field) + if index is None: + return None + if (len(row) - 1) >= index: + return row[index] + return None + + +def get_title_row_index_dict(title_row_list): + title_row_index_dict = {} + if len(title_row_list) == 1: + title_row_index_dict['content'] = 0 + elif len(title_row_list) == 1: + title_row_index_dict['title'] = 0 + title_row_index_dict['content'] = 1 + else: + title_row_index_dict['title'] = 0 + title_row_index_dict['content'] = 1 + title_row_index_dict['problem_list'] = 2 + for index in range(len(title_row_list)): + title_row = title_row_list[index] + if title_row is None: + title_row = '' + if title_row.startswith('分段标题'): + title_row_index_dict['title'] = index + if title_row.startswith('分段内容'): + title_row_index_dict['content'] = index + if title_row.startswith('问题'): + title_row_index_dict['problem_list'] = index + return title_row_index_dict + + +class BaseParseQAHandle(ABC): + @abstractmethod + def support(self, file, get_buffer): + pass + + @abstractmethod + def handle(self, file, get_buffer, save_image): + pass diff --git a/apps/common/handle/base_parse_table_handle.py b/apps/common/handle/base_parse_table_handle.py new file mode 100644 index 00000000000..65eaf897f1f --- /dev/null +++ b/apps/common/handle/base_parse_table_handle.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: base_parse_qa_handle.py + @date:2024/5/21 14:56 + @desc: +""" +from abc import ABC, abstractmethod + + +class BaseParseTableHandle(ABC): + @abstractmethod + def support(self, file, get_buffer): + pass + + @abstractmethod + def handle(self, file, get_buffer,save_image): + pass + + @abstractmethod + def get_content(self, file, save_image): + pass \ No newline at end of file diff --git a/apps/common/handle/base_split_handle.py b/apps/common/handle/base_split_handle.py index f9b573f0f79..bedaad5e10c 100644 --- a/apps/common/handle/base_split_handle.py +++ b/apps/common/handle/base_split_handle.py @@ -18,3 +18,7 @@ def support(self, file, get_buffer): @abstractmethod def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): pass + + @abstractmethod + def get_content(self, file, save_image): + pass diff --git a/apps/common/handle/base_to_response.py b/apps/common/handle/base_to_response.py new file mode 100644 index 00000000000..376d1a9ddd7 --- /dev/null +++ b/apps/common/handle/base_to_response.py @@ -0,0 +1,30 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_to_response.py + @date:2024/9/6 16:04 + @desc: +""" +from abc import ABC, abstractmethod + +from rest_framework import status + + +class BaseToResponse(ABC): + + @abstractmethod + def to_block_response(self, chat_id, chat_record_id, content, is_end, completion_tokens, + prompt_tokens, other_params: dict = None, + _status=status.HTTP_200_OK): + pass + + @abstractmethod + def to_stream_chunk_response(self, chat_id, chat_record_id, node_id, up_node_id_list, content, is_end, + completion_tokens, + prompt_tokens, other_params: dict = None): + pass + + @staticmethod + def format_stream_chunk(response_str): + return 'data: ' + response_str + '\n\n' diff --git a/apps/common/handle/handle_exception.py b/apps/common/handle/handle_exception.py index d3e86401bc2..21e8c8ef1e9 100644 --- a/apps/common/handle/handle_exception.py +++ b/apps/common/handle/handle_exception.py @@ -14,7 +14,7 @@ from common.exception.app_exception import AppApiException from common.response import result - +from django.utils.translation import gettext_lazy as _ def to_result(key, args, parent_key=None): """ @@ -27,7 +27,7 @@ def to_result(key, args, parent_key=None): error_detail = list(filter( lambda d: True if isinstance(d, ErrorDetail) else True if isinstance(d, dict) and len( d.keys()) > 0 else False, - (args[0] if len(args) > 0 else {key: [ErrorDetail('未知异常', code='unknown')]}).get(key)))[0] + (args[0] if len(args) > 0 else {key: [ErrorDetail(_('Unknown exception'), code='unknown')]}).get(key)))[0] if isinstance(error_detail, dict): return list(map(lambda k: to_result(k, args=[error_detail], @@ -63,13 +63,15 @@ def find_err_detail(exc_detail): _value = exc_detail[key] if isinstance(_value, list): return find_err_detail(_value) - elif isinstance(_value, ErrorDetail): + if isinstance(_value, ErrorDetail): return _value - elif isinstance(_value, dict): + if isinstance(_value, dict) and len(_value.keys()) > 0: return find_err_detail(_value) if isinstance(exc_detail, list): for v in exc_detail: - return find_err_detail(v) + r = find_err_detail(v) + if r is not None: + return r def handle_exception(exc, context): diff --git a/apps/common/handle/impl/csv_split_handle.py b/apps/common/handle/impl/csv_split_handle.py new file mode 100644 index 00000000000..3ea690e0e65 --- /dev/null +++ b/apps/common/handle/impl/csv_split_handle.py @@ -0,0 +1,72 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: csv_parse_qa_handle.py + @date:2024/5/21 14:59 + @desc: +""" +import csv +import io +import os +from typing import List + +from charset_normalizer import detect + +from common.handle.base_split_handle import BaseSplitHandle + + +def post_cell(cell_value): + return cell_value.replace('\n', '
').replace('|', '|') + + +def row_to_md(row): + return '| ' + ' | '.join( + [post_cell(cell) if cell is not None else '' for cell in row]) + ' |\n' + + +class CsvSplitHandle(BaseSplitHandle): + def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): + buffer = get_buffer(file) + paragraphs = [] + file_name = os.path.basename(file.name) + result = {'name': file_name, 'content': paragraphs} + try: + reader = csv.reader(io.TextIOWrapper(io.BytesIO(buffer), encoding=detect(buffer)['encoding'])) + try: + title_row_list = reader.__next__() + title_md_content = row_to_md(title_row_list) + title_md_content += '| ' + ' | '.join( + ['---' if cell is not None else '' for cell in title_row_list]) + ' |\n' + except Exception as e: + return result + if len(title_row_list) == 0: + return result + result_item_content = '' + for row in reader: + next_md_content = row_to_md(row) + next_md_content_len = len(next_md_content) + result_item_content_len = len(result_item_content) + if len(result_item_content) == 0: + result_item_content += title_md_content + result_item_content += next_md_content + else: + if result_item_content_len + next_md_content_len < limit: + result_item_content += next_md_content + else: + paragraphs.append({'content': result_item_content, 'title': ''}) + result_item_content = title_md_content + next_md_content + if len(result_item_content) > 0: + paragraphs.append({'content': result_item_content, 'title': ''}) + return result + except Exception as e: + return result + + def get_content(self, file, save_image): + pass + + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".csv"): + return True + return False diff --git a/apps/common/handle/impl/doc_split_handle.py b/apps/common/handle/impl/doc_split_handle.py index 25f5d694af9..4161f13a19d 100644 --- a/apps/common/handle/impl/doc_split_handle.py +++ b/apps/common/handle/impl/doc_split_handle.py @@ -7,18 +7,22 @@ @desc: """ import io +import os import re import traceback import uuid +from functools import reduce from typing import List from docx import Document, ImagePart +from docx.oxml import ns from docx.table import Table from docx.text.paragraph import Paragraph from common.handle.base_split_handle import BaseSplitHandle from common.util.split_model import SplitModel from dataset.models import Image +from django.utils.translation import gettext_lazy as _ default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'), re.compile('(?<=\\n)(? 0: + for image in _images: + images.append({'image': image, 'get_image_id_handle': get_image_id_handle}) + except Exception as e: + pass + return images + + +def images_to_string(images, doc: Document, images_list, get_image_id): + return "".join( + [item for item in [image_to_mode(image, doc, images_list, get_image_id) for image in images] if + item is not None]) + + def get_paragraph_element_txt(paragraph_element, doc: Document, images_list, get_image_id): try: - images = paragraph_element.xpath(".//pic:pic") + images = get_paragraph_element_images(paragraph_element, doc, images_list, get_image_id) if len(images) > 0: - return "".join( - [item for item in [image_to_mode(image, doc, images_list, get_image_id) for image in images] if - item is not None]) + return images_to_string(images, doc, images_list, get_image_id) elif paragraph_element.text is not None: return paragraph_element.text return "" @@ -83,14 +110,49 @@ def get_image_id(image_id): return get_image_id +title_font_list = [ + [36, 100], + [30, 36] +] + + +def get_title_level(paragraph: Paragraph): + try: + if paragraph.style is not None: + psn = paragraph.style.name + if psn.startswith('Heading') or psn.startswith('TOC 标题') or psn.startswith('标题'): + return int(psn.replace("Heading ", '').replace('TOC 标题', '').replace('标题', + '')) + if len(paragraph.runs) == 1: + font_size = paragraph.runs[0].font.size + pt = font_size.pt + if pt >= 30: + for _value, index in zip(title_font_list, range(len(title_font_list))): + if pt >= _value[0] and pt < _value[1]: + return index + 1 + except Exception as e: + pass + return None + + class DocSplitHandle(BaseSplitHandle): @staticmethod def paragraph_to_md(paragraph: Paragraph, doc: Document, images_list, get_image_id): try: - psn = paragraph.style.name - if psn.startswith('Heading'): - return "".join(["#" for i in range(int(psn.replace("Heading ", '')))]) + " " + paragraph.text + title_level = get_title_level(paragraph) + if title_level is not None: + title = "".join(["#" for i in range(title_level)]) + " " + paragraph.text + images = reduce(lambda x, y: [*x, *y], + [get_paragraph_element_images(e, doc, images_list, get_image_id) for e in + paragraph._element], + []) + if len(images) > 0: + return title + '\n' + images_to_string(images, doc, images_list, get_image_id) if len( + paragraph.text) > 0 else images_to_string(images, doc, images_list, get_image_id) + return title + except Exception as e: + traceback.print_exc() return paragraph.text return get_paragraph_txt(paragraph, doc, images_list, get_image_id) @@ -110,11 +172,12 @@ def table_to_md(table, doc: Document, images_list, get_image_id): def to_md(self, doc, images_list, get_image_id): elements = [] for element in doc.element.body: - if element.tag.endswith('tbl'): + tag = str(element.tag) + if tag.endswith('tbl'): # 处理表格 table = Table(element, doc) elements.append(table) - elif element.tag.endswith('p'): + elif tag.endswith('p'): # 处理段落 paragraph = Paragraph(element, doc) elements.append(paragraph) @@ -128,6 +191,7 @@ def to_md(self, doc, images_list, get_image_id): in elements]) def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): + file_name = os.path.basename(file.name) try: image_list = [] buffer = get_buffer(file) @@ -141,14 +205,29 @@ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_bu split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit) except BaseException as e: traceback.print_exception(e) - return {'name': file.name, + return {'name': file_name, 'content': []} - return {'name': file.name, + return {'name': file_name, 'content': split_model.parse(content) } def support(self, file, get_buffer): file_name: str = file.name.lower() - if file_name.endswith(".docx") or file_name.endswith(".doc"): + if file_name.endswith(".docx") or file_name.endswith(".doc") or file_name.endswith( + ".DOC") or file_name.endswith(".DOCX"): return True return False + + def get_content(self, file, save_image): + try: + image_list = [] + buffer = file.read() + doc = Document(io.BytesIO(buffer)) + content = self.to_md(doc, image_list, get_image_id_func()) + if len(image_list) > 0: + content = content.replace('/api/image/', '/api/file/') + save_image(image_list) + return content + except BaseException as e: + traceback.print_exception(e) + return f'{e}' diff --git a/apps/common/handle/impl/html_split_handle.py b/apps/common/handle/impl/html_split_handle.py new file mode 100644 index 00000000000..90e59ebcb5c --- /dev/null +++ b/apps/common/handle/impl/html_split_handle.py @@ -0,0 +1,73 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: html_split_handle.py + @date:2024/5/23 10:58 + @desc: +""" +import re +import traceback +from typing import List + +from bs4 import BeautifulSoup +from charset_normalizer import detect +from html2text import html2text + +from common.handle.base_split_handle import BaseSplitHandle +from common.util.split_model import SplitModel + +default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'), + re.compile('(?<=\\n)(? 0: + charset = charset_list[0] + return charset + return detect(buffer)['encoding'] + + +class HTMLSplitHandle(BaseSplitHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".html") or file_name.endswith(".HTML"): + return True + return False + + def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): + buffer = get_buffer(file) + + if pattern_list is not None and len(pattern_list) > 0: + split_model = SplitModel(pattern_list, with_filter, limit) + else: + split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit) + try: + encoding = get_encoding(buffer) + content = buffer.decode(encoding) + content = html2text(content) + except BaseException as e: + return {'name': file.name, + 'content': []} + return {'name': file.name, + 'content': split_model.parse(content) + } + + def get_content(self, file, save_image): + buffer = file.read() + + try: + encoding = get_encoding(buffer) + content = buffer.decode(encoding) + return html2text(content) + except BaseException as e: + traceback.print_exception(e) + return f'{e}' \ No newline at end of file diff --git a/apps/common/handle/impl/pdf_split_handle.py b/apps/common/handle/impl/pdf_split_handle.py index 4d835ca11a4..abdac5e19c9 100644 --- a/apps/common/handle/impl/pdf_split_handle.py +++ b/apps/common/handle/impl/pdf_split_handle.py @@ -6,13 +6,20 @@ @date:2024/3/27 18:19 @desc: """ +import logging +import os import re +import tempfile +import time +import traceback from typing import List import fitz +from langchain_community.document_loaders import PyPDFLoader from common.handle.base_split_handle import BaseSplitHandle from common.util.split_model import SplitModel +from django.utils.translation import gettext_lazy as _ default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'), re.compile('(?<=\\n)(? 0: + return {'name': file.name, 'content': result} + + # 没有目录的pdf + content = self.handle_pdf_content(file, pdf_document) + if pattern_list is not None and len(pattern_list) > 0: split_model = SplitModel(pattern_list, with_filter, limit) else: split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit) except BaseException as e: + max_kb.error(f"File: {file.name}, error: {e}") return {'name': file.name, 'content': []} + finally: + pdf_document.close() + # 处理完后可以删除临时文件 + os.remove(temp_file_path) + return {'name': file.name, 'content': split_model.parse(content) } + @staticmethod + def handle_pdf_content(file, pdf_document): + content = "" + for page_num in range(len(pdf_document)): + start_time = time.time() + page = pdf_document.load_page(page_num) + text = page.get_text() + + if text and text.strip(): # 如果页面中有文本内容 + page_content = text + else: + try: + new_doc = fitz.open() + new_doc.insert_pdf(pdf_document, from_page=page_num, to_page=page_num) + page_num_pdf = tempfile.gettempdir() + f"/{file.name}_{page_num}.pdf" + new_doc.save(page_num_pdf) + new_doc.close() + + loader = PyPDFLoader(page_num_pdf, extract_images=True) + page_content = "\n" + loader.load()[0].page_content + except NotImplementedError as e: + # 文件格式不支持,直接退出 + raise e + except BaseException as e: + # 当页出错继续进行下一页,防止一个页面出错导致整个文件解析失败 + max_kb.error(f"File: {file.name}, Page: {page_num + 1}, error: {e}") + continue + finally: + os.remove(page_num_pdf) + + content += page_content + + # Null characters are not allowed. + content = content.replace('\0', '') + + elapsed_time = time.time() - start_time + max_kb.debug( + f"File: {file.name}, Page: {page_num + 1}, Time : {elapsed_time: .3f}s, content-length: {len(page_content)}") + + return content + + @staticmethod + def handle_toc(doc, limit): + # 找到目录 + toc = doc.get_toc() + if toc is None or len(toc) == 0: + return None + + # 创建存储章节内容的数组 + chapters = [] + + # 遍历目录并按章节提取文本 + for i, entry in enumerate(toc): + level, title, start_page = entry + start_page -= 1 # PyMuPDF 页码从 0 开始,书签页码从 1 开始 + chapter_title = title + # 确定结束页码,如果是最后一个章节则到文档末尾 + if i + 1 < len(toc): + end_page = toc[i + 1][2] - 1 + else: + end_page = doc.page_count - 1 + + # 去掉标题中的符号 + title = PdfSplitHandle.handle_chapter_title(title) + + # 提取该章节的文本内容 + chapter_text = "" + for page_num in range(start_page, end_page + 1): + page = doc.load_page(page_num) # 加载页面 + text = page.get_text("text") + text = re.sub(r'(? -1: + text = text[idx + len(title):] + + if i + 1 < len(toc): + l, next_title, next_start_page = toc[i + 1] + next_title = PdfSplitHandle.handle_chapter_title(next_title) + # print(f'next_title: {next_title}') + idx = text.find(next_title) + if idx > -1: + text = text[:idx] + + chapter_text += text # 提取文本 + + # Null characters are not allowed. + chapter_text = chapter_text.replace('\0', '') + # 限制标题长度 + real_chapter_title = chapter_title[:256] + # 限制章节内容长度 + if 0 < limit < len(chapter_text): + split_text = PdfSplitHandle.split_text(chapter_text, limit) + for text in split_text: + chapters.append({"title": real_chapter_title, "content": text}) + else: + chapters.append({"title": real_chapter_title, "content": chapter_text if chapter_text else real_chapter_title}) + # 保存章节内容和章节标题 + return chapters + + @staticmethod + def handle_links(doc, pattern_list, with_filter, limit): + # 检查文档是否包含内部链接 + if not check_links_in_pdf(doc): + return + # 创建存储章节内容的数组 + chapters = [] + toc_start_page = -1 + page_content = "" + handle_pre_toc = True + # 遍历 PDF 的每一页,查找带有目录链接的页 + for page_num in range(doc.page_count): + page = doc.load_page(page_num) + links = page.get_links() + # 如果目录开始页码未设置,则设置为当前页码 + if len(links) > 0: + toc_start_page = page_num + if toc_start_page < 0: + page_content += page.get_text('text') + # 检查该页是否包含内部链接(即指向文档内部的页面) + for num in range(len(links)): + link = links[num] + if link['kind'] == 1: # 'kind' 为 1 表示内部链接 + # 获取链接目标的页面 + dest_page = link['page'] + rect = link['from'] # 获取链接的矩形区域 + # 如果目录开始页码包括前言部分,则不处理前言部分 + if dest_page < toc_start_page: + handle_pre_toc = False + + # 提取链接区域的文本作为标题 + link_title = page.get_text("text", clip=rect).strip().split("\n")[0].replace('.', '').strip() + # print(f'link_title: {link_title}') + # 提取目标页面内容作为章节开始 + start_page = dest_page + end_page = dest_page + # 下一个link + next_link = links[num + 1] if num + 1 < len(links) else None + next_link_title = None + if next_link is not None and next_link['kind'] == 1: + rect = next_link['from'] + next_link_title = page.get_text("text", clip=rect).strip() \ + .split("\n")[0].replace('.', '').strip() + # print(f'next_link_title: {next_link_title}') + end_page = next_link['page'] + + # 提取章节内容 + chapter_text = "" + for p_num in range(start_page, end_page + 1): + p = doc.load_page(p_num) + text = p.get_text("text") + text = re.sub(r'(? -1: + text = text[idx + len(link_title):] + + if next_link_title is not None: + idx = text.find(next_link_title) + if idx > -1: + text = text[:idx] + chapter_text += text + + # Null characters are not allowed. + chapter_text = chapter_text.replace('\0', '') + + # 限制章节内容长度 + if 0 < limit < len(chapter_text): + split_text = PdfSplitHandle.split_text(chapter_text, limit) + for text in split_text: + chapters.append({"title": link_title, "content": text}) + else: + # 保存章节信息 + chapters.append({"title": link_title, "content": chapter_text}) + + # 目录中没有前言部分,手动处理 + if handle_pre_toc: + pre_toc = [] + lines = page_content.strip().split('\n') + try: + for line in lines: + if re.match(r'^前\s*言', line): + pre_toc.append({'title': line, 'content': ''}) + else: + pre_toc[-1]['content'] += line + for i in range(len(pre_toc)): + pre_toc[i]['content'] = re.sub(r'(? 0: + split_model = SplitModel(pattern_list, with_filter, limit) + else: + split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit) + # 插入目录前的部分 + page_content = re.sub(r'(?= length: + # 查找最近的句号 + last_period_index = current_segment.rfind('.') + if last_period_index != -1: + segments.append(current_segment[:last_period_index + 1]) + current_segment = current_segment[last_period_index + 1:] # 更新当前段落 + else: + segments.append(current_segment) + current_segment = "" + + # 处理剩余的部分 + if current_segment: + segments.append(current_segment) + + return segments + + @staticmethod + def handle_chapter_title(title): + title = re.sub(r'[一二三四五六七八九十\s*]、\s*', '', title) + title = re.sub(r'第[一二三四五六七八九十]章\s*', '', title) + return title + def support(self, file, get_buffer): file_name: str = file.name.lower() - if file_name.endswith(".pdf"): + if file_name.endswith(".pdf") or file_name.endswith(".PDF"): return True return False + + def get_content(self, file, save_image): + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + # 将上传的文件保存到临时文件中 + temp_file.write(file.read()) + # 获取临时文件的路径 + temp_file_path = temp_file.name + + pdf_document = fitz.open(temp_file_path) + try: + return self.handle_pdf_content(file, pdf_document) + except BaseException as e: + traceback.print_exception(e) + return f'{e}' \ No newline at end of file diff --git a/apps/common/handle/impl/qa/csv_parse_qa_handle.py b/apps/common/handle/impl/qa/csv_parse_qa_handle.py new file mode 100644 index 00000000000..75c22cbdafd --- /dev/null +++ b/apps/common/handle/impl/qa/csv_parse_qa_handle.py @@ -0,0 +1,59 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: csv_parse_qa_handle.py + @date:2024/5/21 14:59 + @desc: +""" +import csv +import io + +from charset_normalizer import detect + +from common.handle.base_parse_qa_handle import BaseParseQAHandle, get_title_row_index_dict, get_row_value + + +def read_csv_standard(file_path): + data = [] + with open(file_path, 'r') as file: + reader = csv.reader(file) + for row in reader: + data.append(row) + return data + + +class CsvParseQAHandle(BaseParseQAHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".csv"): + return True + return False + + def handle(self, file, get_buffer, save_image): + buffer = get_buffer(file) + try: + reader = csv.reader(io.TextIOWrapper(io.BytesIO(buffer), encoding=detect(buffer)['encoding'])) + try: + title_row_list = reader.__next__() + except Exception as e: + return [{'name': file.name, 'paragraphs': []}] + if len(title_row_list) == 0: + return [{'name': file.name, 'paragraphs': []}] + title_row_index_dict = get_title_row_index_dict(title_row_list) + paragraph_list = [] + for row in reader: + content = get_row_value(row, title_row_index_dict, 'content') + if content is None: + continue + problem = get_row_value(row, title_row_index_dict, 'problem_list') + problem = str(problem) if problem is not None else '' + problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0] + title = get_row_value(row, title_row_index_dict, 'title') + title = str(title) if title is not None else '' + paragraph_list.append({'title': title[0:255], + 'content': content[0:102400], + 'problem_list': problem_list}) + return [{'name': file.name, 'paragraphs': paragraph_list}] + except Exception as e: + return [{'name': file.name, 'paragraphs': []}] diff --git a/apps/common/handle/impl/qa/xls_parse_qa_handle.py b/apps/common/handle/impl/qa/xls_parse_qa_handle.py new file mode 100644 index 00000000000..06edb1fb300 --- /dev/null +++ b/apps/common/handle/impl/qa/xls_parse_qa_handle.py @@ -0,0 +1,61 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: xls_parse_qa_handle.py + @date:2024/5/21 14:59 + @desc: +""" + +import xlrd + +from common.handle.base_parse_qa_handle import BaseParseQAHandle, get_title_row_index_dict, get_row_value + + +def handle_sheet(file_name, sheet): + rows = iter([sheet.row_values(i) for i in range(sheet.nrows)]) + try: + title_row_list = next(rows) + except Exception as e: + return {'name': file_name, 'paragraphs': []} + if len(title_row_list) == 0: + return {'name': file_name, 'paragraphs': []} + title_row_index_dict = get_title_row_index_dict(title_row_list) + paragraph_list = [] + for row in rows: + content = get_row_value(row, title_row_index_dict, 'content') + if content is None: + continue + problem = get_row_value(row, title_row_index_dict, 'problem_list') + problem = str(problem) if problem is not None else '' + problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0] + title = get_row_value(row, title_row_index_dict, 'title') + title = str(title) if title is not None else '' + content = str(content) + paragraph_list.append({'title': title[0:255], + 'content': content[0:102400], + 'problem_list': problem_list}) + return {'name': file_name, 'paragraphs': paragraph_list} + + +class XlsParseQAHandle(BaseParseQAHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + buffer = get_buffer(file) + if file_name.endswith(".xls") and xlrd.inspect_format(content=buffer): + return True + return False + + def handle(self, file, get_buffer, save_image): + buffer = get_buffer(file) + try: + workbook = xlrd.open_workbook(file_contents=buffer) + worksheets = workbook.sheets() + worksheets_size = len(worksheets) + return [row for row in + [handle_sheet(file.name, + sheet) if worksheets_size == 1 and sheet.name == 'Sheet1' else handle_sheet( + sheet.name, sheet) for sheet + in worksheets] if row is not None] + except Exception as e: + return [{'name': file.name, 'paragraphs': []}] diff --git a/apps/common/handle/impl/qa/xlsx_parse_qa_handle.py b/apps/common/handle/impl/qa/xlsx_parse_qa_handle.py new file mode 100644 index 00000000000..c3ee40d5360 --- /dev/null +++ b/apps/common/handle/impl/qa/xlsx_parse_qa_handle.py @@ -0,0 +1,72 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: xlsx_parse_qa_handle.py + @date:2024/5/21 14:59 + @desc: +""" +import io + +import openpyxl + +from common.handle.base_parse_qa_handle import BaseParseQAHandle, get_title_row_index_dict, get_row_value +from common.handle.impl.tools import xlsx_embed_cells_images + + +def handle_sheet(file_name, sheet, image_dict): + rows = sheet.rows + try: + title_row_list = next(rows) + title_row_list = [row.value for row in title_row_list] + except Exception as e: + return {'name': file_name, 'paragraphs': []} + if len(title_row_list) == 0: + return {'name': file_name, 'paragraphs': []} + title_row_index_dict = get_title_row_index_dict(title_row_list) + paragraph_list = [] + for row in rows: + content = get_row_value(row, title_row_index_dict, 'content') + if content is None or content.value is None: + continue + problem = get_row_value(row, title_row_index_dict, 'problem_list') + problem = str(problem.value) if problem is not None and problem.value is not None else '' + problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0] + title = get_row_value(row, title_row_index_dict, 'title') + title = str(title.value) if title is not None and title.value is not None else '' + content = str(content.value) + image = image_dict.get(content, None) + if image is not None: + content = f'![](/api/image/{image.id})' + paragraph_list.append({'title': title[0:255], + 'content': content[0:102400], + 'problem_list': problem_list}) + return {'name': file_name, 'paragraphs': paragraph_list} + + +class XlsxParseQAHandle(BaseParseQAHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".xlsx"): + return True + return False + + def handle(self, file, get_buffer, save_image): + buffer = get_buffer(file) + try: + workbook = openpyxl.load_workbook(io.BytesIO(buffer)) + try: + image_dict: dict = xlsx_embed_cells_images(io.BytesIO(buffer)) + save_image([item for item in image_dict.values()]) + except Exception as e: + image_dict = {} + worksheets = workbook.worksheets + worksheets_size = len(worksheets) + return [row for row in + [handle_sheet(file.name, + sheet, + image_dict) if worksheets_size == 1 and sheet.title == 'Sheet1' else handle_sheet( + sheet.title, sheet, image_dict) for sheet + in worksheets] if row is not None] + except Exception as e: + return [{'name': file.name, 'paragraphs': []}] diff --git a/apps/common/handle/impl/qa/zip_parse_qa_handle.py b/apps/common/handle/impl/qa/zip_parse_qa_handle.py new file mode 100644 index 00000000000..6f2763516fb --- /dev/null +++ b/apps/common/handle/impl/qa/zip_parse_qa_handle.py @@ -0,0 +1,163 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: text_split_handle.py + @date:2024/3/27 18:19 + @desc: +""" +import io +import os +import re +import uuid +import zipfile +from typing import List +from urllib.parse import urljoin + +from django.db.models import QuerySet + +from common.handle.base_parse_qa_handle import BaseParseQAHandle +from common.handle.impl.qa.csv_parse_qa_handle import CsvParseQAHandle +from common.handle.impl.qa.xls_parse_qa_handle import XlsParseQAHandle +from common.handle.impl.qa.xlsx_parse_qa_handle import XlsxParseQAHandle +from common.util.common import parse_md_image +from dataset.models import Image +from django.utils.translation import gettext_lazy as _ + +class FileBufferHandle: + buffer = None + + def get_buffer(self, file): + if self.buffer is None: + self.buffer = file.read() + return self.buffer + + +split_handles = [XlsParseQAHandle(), XlsxParseQAHandle(), CsvParseQAHandle()] + + +def save_inner_image(image_list): + """ + 子模块插入图片逻辑 + @param image_list: + @return: + """ + if image_list is not None and len(image_list) > 0: + QuerySet(Image).bulk_create(image_list) + + +def file_to_paragraph(file): + """ + 文件转换为段落列表 + @param file: 文件 + @return: { + name:文件名 + paragraphs:段落列表 + } + """ + get_buffer = FileBufferHandle().get_buffer + for split_handle in split_handles: + if split_handle.support(file, get_buffer): + return split_handle.handle(file, get_buffer, save_inner_image) + raise Exception(_("Unsupported file format")) + + +def is_valid_uuid(uuid_str: str): + """ + 校验字符串是否是uuid + @param uuid_str: 需要校验的字符串 + @return: bool + """ + try: + uuid.UUID(uuid_str) + except ValueError: + return False + return True + + +def get_image_list(result_list: list, zip_files: List[str]): + """ + 获取图片文件列表 + @param result_list: + @param zip_files: + @return: + """ + image_file_list = [] + for result in result_list: + for p in result.get('paragraphs', []): + content: str = p.get('content', '') + image_list = parse_md_image(content) + for image in image_list: + search = re.search("\(.*\)", image) + if search: + new_image_id = str(uuid.uuid1()) + source_image_path = search.group().replace('(', '').replace(')', '') + image_path = urljoin(result.get('name'), '.' + source_image_path if source_image_path.startswith( + '/') else source_image_path) + if not zip_files.__contains__(image_path): + continue + if image_path.startswith('api/file/') or image_path.startswith('api/image/'): + image_id = image_path.replace('api/file/', '').replace('api/image/', '') + if is_valid_uuid(image_id): + image_file_list.append({'source_file': image_path, + 'image_id': image_id}) + else: + image_file_list.append({'source_file': image_path, + 'image_id': new_image_id}) + content = content.replace(source_image_path, f'/api/image/{new_image_id}') + p['content'] = content + else: + image_file_list.append({'source_file': image_path, + 'image_id': new_image_id}) + content = content.replace(source_image_path, f'/api/image/{new_image_id}') + p['content'] = content + + return image_file_list + + +def filter_image_file(result_list: list, image_list): + image_source_file_list = [image.get('source_file') for image in image_list] + return [r for r in result_list if not image_source_file_list.__contains__(r.get('name', ''))] + + +class ZipParseQAHandle(BaseParseQAHandle): + + def handle(self, file, get_buffer, save_image): + buffer = get_buffer(file) + bytes_io = io.BytesIO(buffer) + result = [] + # 打开zip文件 + with zipfile.ZipFile(bytes_io, 'r') as zip_ref: + # 获取压缩包中的文件名列表 + files = zip_ref.namelist() + # 读取压缩包中的文件内容 + for file in files: + # 跳过 macOS 特有的元数据目录和文件 + if file.endswith('/') or file.startswith('__MACOSX'): + continue + with zip_ref.open(file) as f: + # 对文件内容进行处理 + try: + value = file_to_paragraph(f) + if isinstance(value, list): + result = [*result, *value] + else: + result.append(value) + except Exception: + pass + image_list = get_image_list(result, files) + result = filter_image_file(result, image_list) + image_mode_list = [] + for image in image_list: + with zip_ref.open(image.get('source_file')) as f: + i = Image(id=image.get('image_id'), image=f.read(), + image_name=os.path.basename(image.get('source_file'))) + image_mode_list.append(i) + save_image(image_mode_list) + return result + + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".zip") or file_name.endswith(".ZIP"): + return True + return False diff --git a/apps/common/handle/impl/response/openai_to_response.py b/apps/common/handle/impl/response/openai_to_response.py new file mode 100644 index 00000000000..f2b69384e50 --- /dev/null +++ b/apps/common/handle/impl/response/openai_to_response.py @@ -0,0 +1,52 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: openai_to_response.py + @date:2024/9/6 16:08 + @desc: +""" +import datetime + +from django.http import JsonResponse +from openai.types import CompletionUsage +from openai.types.chat import ChatCompletionChunk, ChatCompletionMessage, ChatCompletion +from openai.types.chat.chat_completion import Choice as BlockChoice +from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta +from rest_framework import status + +from common.handle.base_to_response import BaseToResponse + + +class OpenaiToResponse(BaseToResponse): + def to_block_response(self, chat_id, chat_record_id, content, is_end, completion_tokens, prompt_tokens, + other_params: dict = None, + _status=status.HTTP_200_OK): + if other_params is None: + other_params = {} + data = ChatCompletion(id=chat_record_id, choices=[ + BlockChoice(finish_reason='stop', index=0, chat_id=chat_id, + answer_list=other_params.get('answer_list', ""), + message=ChatCompletionMessage(role='assistant', content=content))], + created=datetime.datetime.now().second, model='', object='chat.completion', + usage=CompletionUsage(completion_tokens=completion_tokens, + prompt_tokens=prompt_tokens, + total_tokens=completion_tokens + prompt_tokens) + ).dict() + return JsonResponse(data=data, status=_status) + + def to_stream_chunk_response(self, chat_id, chat_record_id, node_id, up_node_id_list, content, is_end, + completion_tokens, + prompt_tokens, other_params: dict = None): + if other_params is None: + other_params = {} + chunk = ChatCompletionChunk(id=chat_record_id, model='', object='chat.completion.chunk', + created=datetime.datetime.now().second, choices=[ + Choice(delta=ChoiceDelta(content=content, reasoning_content=other_params.get('reasoning_content', ""), + chat_id=chat_id), + finish_reason='stop' if is_end else None, + index=0)], + usage=CompletionUsage(completion_tokens=completion_tokens, + prompt_tokens=prompt_tokens, + total_tokens=completion_tokens + prompt_tokens)).json() + return super().format_stream_chunk(chunk) diff --git a/apps/common/handle/impl/response/system_to_response.py b/apps/common/handle/impl/response/system_to_response.py new file mode 100644 index 00000000000..8df5ce1394b --- /dev/null +++ b/apps/common/handle/impl/response/system_to_response.py @@ -0,0 +1,41 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: system_to_response.py + @date:2024/9/6 18:03 + @desc: +""" +import json + +from rest_framework import status + +from common.handle.base_to_response import BaseToResponse +from common.response import result + + +class SystemToResponse(BaseToResponse): + def to_block_response(self, chat_id, chat_record_id, content, is_end, completion_tokens, + prompt_tokens, other_params: dict = None, + _status=status.HTTP_200_OK): + if other_params is None: + other_params = {} + return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True, + 'content': content, 'is_end': is_end, **other_params, + 'completion_tokens': completion_tokens, 'prompt_tokens': prompt_tokens}, + response_status=_status, + code=_status) + + def to_stream_chunk_response(self, chat_id, chat_record_id, node_id, up_node_id_list, content, is_end, + completion_tokens, + prompt_tokens, other_params: dict = None): + if other_params is None: + other_params = {} + chunk = json.dumps({'chat_id': str(chat_id), 'chat_record_id': str(chat_record_id), 'operate': True, + 'content': content, 'node_id': node_id, 'up_node_id_list': up_node_id_list, + 'is_end': is_end, + 'usage': {'completion_tokens': completion_tokens, + 'prompt_tokens': prompt_tokens, + 'total_tokens': completion_tokens + prompt_tokens}, + **other_params}) + return super().format_stream_chunk(chunk) diff --git a/apps/common/handle/impl/table/csv_parse_table_handle.py b/apps/common/handle/impl/table/csv_parse_table_handle.py new file mode 100644 index 00000000000..e2fc7ce863e --- /dev/null +++ b/apps/common/handle/impl/table/csv_parse_table_handle.py @@ -0,0 +1,44 @@ +# coding=utf-8 +import logging + +from charset_normalizer import detect + +from common.handle.base_parse_table_handle import BaseParseTableHandle + +max_kb = logging.getLogger("max_kb") + + +class CsvSplitHandle(BaseParseTableHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".csv"): + return True + return False + + def handle(self, file, get_buffer,save_image): + buffer = get_buffer(file) + try: + content = buffer.decode(detect(buffer)['encoding']) + except BaseException as e: + max_kb.error(f'csv split handle error: {e}') + return [{'name': file.name, 'paragraphs': []}] + + csv_model = content.split('\n') + paragraphs = [] + # 第一行为标题 + title = csv_model[0].split(',') + for row in csv_model[1:]: + if not row: + continue + line = '; '.join([f'{key}:{value}' for key, value in zip(title, row.split(','))]) + paragraphs.append({'title': '', 'content': line}) + + return [{'name': file.name, 'paragraphs': paragraphs}] + + def get_content(self, file, save_image): + buffer = file.read() + try: + return buffer.decode(detect(buffer)['encoding']) + except BaseException as e: + max_kb.error(f'csv split handle error: {e}') + return f'error: {e}' \ No newline at end of file diff --git a/apps/common/handle/impl/table/xls_parse_table_handle.py b/apps/common/handle/impl/table/xls_parse_table_handle.py new file mode 100644 index 00000000000..897e347e8a8 --- /dev/null +++ b/apps/common/handle/impl/table/xls_parse_table_handle.py @@ -0,0 +1,94 @@ +# coding=utf-8 +import logging + +import xlrd + +from common.handle.base_parse_table_handle import BaseParseTableHandle + +max_kb = logging.getLogger("max_kb") + + +class XlsSplitHandle(BaseParseTableHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + buffer = get_buffer(file) + if file_name.endswith(".xls") and xlrd.inspect_format(content=buffer): + return True + return False + + def handle(self, file, get_buffer, save_image): + buffer = get_buffer(file) + try: + wb = xlrd.open_workbook(file_contents=buffer, formatting_info=True) + result = [] + sheets = wb.sheets() + for sheet in sheets: + # 获取合并单元格的范围信息 + merged_cells = sheet.merged_cells + print(merged_cells) + data = [] + paragraphs = [] + # 获取第一行作为标题行 + headers = [sheet.cell_value(0, col_idx) for col_idx in range(sheet.ncols)] + # 从第二行开始遍历每一行(跳过标题行) + for row_idx in range(1, sheet.nrows): + row_data = {} + for col_idx in range(sheet.ncols): + cell_value = sheet.cell_value(row_idx, col_idx) + + # 检查是否为空单元格,如果为空检查是否在合并区域中 + if cell_value == "": + # 检查当前单元格是否在合并区域 + for (rlo, rhi, clo, chi) in merged_cells: + if rlo <= row_idx < rhi and clo <= col_idx < chi: + # 使用合并区域的左上角单元格的值 + cell_value = sheet.cell_value(rlo, clo) + break + + # 将标题作为键,单元格的值作为值存入字典 + row_data[headers[col_idx]] = cell_value + data.append(row_data) + + for row in data: + row_output = "; ".join([f"{key}: {value}" for key, value in row.items()]) + # print(row_output) + paragraphs.append({'title': '', 'content': row_output}) + + result.append({'name': sheet.name, 'paragraphs': paragraphs}) + + except BaseException as e: + max_kb.error(f'excel split handle error: {e}') + return [{'name': file.name, 'paragraphs': []}] + return result + + def get_content(self, file, save_image): + # 打开 .xls 文件 + try: + workbook = xlrd.open_workbook(file_contents=file.read(), formatting_info=True) + sheets = workbook.sheets() + md_tables = '' + for sheet in sheets: + # 过滤空白的sheet + if sheet.nrows == 0 or sheet.ncols == 0: + continue + + # 获取表头和内容 + headers = sheet.row_values(0) + data = [sheet.row_values(row_idx) for row_idx in range(1, sheet.nrows)] + + # 构建 Markdown 表格 + md_table = '| ' + ' | '.join(headers) + ' |\n' + md_table += '| ' + ' | '.join(['---'] * len(headers)) + ' |\n' + for row in data: + # 将每个单元格中的内容替换换行符为
以保留原始格式 + md_table += '| ' + ' | '.join( + [str(cell) + .replace('\r\n', '
') + .replace('\n', '
') + if cell else '' for cell in row]) + ' |\n' + md_tables += md_table + '\n\n' + + return md_tables + except Exception as e: + max_kb.error(f'excel split handle error: {e}') + return f'error: {e}' diff --git a/apps/common/handle/impl/table/xlsx_parse_table_handle.py b/apps/common/handle/impl/table/xlsx_parse_table_handle.py new file mode 100644 index 00000000000..a68eb14f1a1 --- /dev/null +++ b/apps/common/handle/impl/table/xlsx_parse_table_handle.py @@ -0,0 +1,107 @@ +# coding=utf-8 +import io +import logging + +from openpyxl import load_workbook + +from common.handle.base_parse_table_handle import BaseParseTableHandle +from common.handle.impl.tools import xlsx_embed_cells_images + +max_kb = logging.getLogger("max_kb") + + +class XlsxSplitHandle(BaseParseTableHandle): + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith('.xlsx'): + return True + return False + + def fill_merged_cells(self, sheet, image_dict): + data = [] + # 从第二行开始遍历每一行 + for row in sheet.iter_rows(values_only=False): + row_data = [] + for col_idx, cell in enumerate(row): + cell_value = cell.value + image = image_dict.get(cell_value, None) + if image is not None: + cell_value = f'![](/api/image/{image.id})' + + # 使用标题作为键,单元格的值作为值存入字典 + row_data.insert(col_idx, cell_value) + data.append(row_data) + + for merged_range in sheet.merged_cells.ranges: + cell_value = data[merged_range.min_row - 1][merged_range.min_col - 1] + for row_index in range(merged_range.min_row, merged_range.max_row + 1): + for col_index in range(merged_range.min_col, merged_range.max_col + 1): + data[row_index - 1][col_index - 1] = cell_value + return data + + def handle(self, file, get_buffer, save_image): + buffer = get_buffer(file) + try: + wb = load_workbook(io.BytesIO(buffer)) + try: + image_dict: dict = xlsx_embed_cells_images(io.BytesIO(buffer)) + save_image([item for item in image_dict.values()]) + except Exception as e: + image_dict = {} + result = [] + for sheetname in wb.sheetnames: + paragraphs = [] + ws = wb[sheetname] + data = self.fill_merged_cells(ws, image_dict) + if len(data) >= 2: + head_list = data[0] + for row_index in range(1, len(data)): + row_output = "; ".join( + [f"{head_list[col_index]}: {data[row_index][col_index]}" for col_index in + range(0, len(data[row_index]))]) + paragraphs.append({'title': '', 'content': row_output}) + + result.append({'name': sheetname, 'paragraphs': paragraphs}) + + except BaseException as e: + max_kb.error(f'excel split handle error: {e}') + return [{'name': file.name, 'paragraphs': []}] + return result + + def get_content(self, file, save_image): + try: + # 加载 Excel 文件 + workbook = load_workbook(file) + try: + image_dict: dict = xlsx_embed_cells_images(file) + if len(image_dict) > 0: + save_image(image_dict.values()) + except Exception as e: + print(f'{e}') + image_dict = {} + md_tables = '' + # 如果未指定 sheet_name,则使用第一个工作表 + for sheetname in workbook.sheetnames: + sheet = workbook[sheetname] if sheetname else workbook.active + data = self.fill_merged_cells(sheet, image_dict) + if len(data) == 0: + continue + # 提取表头和内容 + + headers = [f"{value}" for value in data[0]] + + # 构建 Markdown 表格 + md_table = '| ' + ' | '.join(headers) + ' |\n' + md_table += '| ' + ' | '.join(['---'] * len(headers)) + ' |\n' + for row_index in range(1, len(data)): + r = [f'{value}' for value in data[row_index]] + md_table += '| ' + ' | '.join( + [str(cell).replace('\n', '
') if cell is not None else '' for cell in r]) + ' |\n' + + md_tables += md_table + '\n\n' + + md_tables = md_tables.replace('/api/image/', '/api/file/') + return md_tables + except Exception as e: + max_kb.error(f'excel split handle error: {e}') + return f'error: {e}' diff --git a/apps/common/handle/impl/text_split_handle.py b/apps/common/handle/impl/text_split_handle.py index a773b3bbb56..9d91d874d3d 100644 --- a/apps/common/handle/impl/text_split_handle.py +++ b/apps/common/handle/impl/text_split_handle.py @@ -7,6 +7,7 @@ @desc: """ import re +import traceback from typing import List from charset_normalizer import detect @@ -26,7 +27,8 @@ class TextSplitHandle(BaseSplitHandle): def support(self, file, get_buffer): buffer = get_buffer(file) file_name: str = file.name.lower() - if file_name.endswith(".md") or file_name.endswith('.txt'): + if file_name.endswith(".md") or file_name.endswith('.txt') or file_name.endswith('.TXT') or file_name.endswith( + '.MD'): return True result = detect(buffer) if result['encoding'] is not None and result['confidence'] is not None and result['encoding'] != 'ascii' and \ @@ -48,3 +50,11 @@ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_bu return {'name': file.name, 'content': split_model.parse(content) } + + def get_content(self, file, save_image): + buffer = file.read() + try: + return buffer.decode(detect(buffer)['encoding']) + except BaseException as e: + traceback.print_exception(e) + return f'{e}' \ No newline at end of file diff --git a/apps/common/handle/impl/tools.py b/apps/common/handle/impl/tools.py new file mode 100644 index 00000000000..d041397a7ee --- /dev/null +++ b/apps/common/handle/impl/tools.py @@ -0,0 +1,118 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: tools.py + @date:2024/9/11 16:41 + @desc: +""" +import io +import uuid +from functools import reduce +from io import BytesIO +from xml.etree.ElementTree import fromstring +from zipfile import ZipFile + +from PIL import Image as PILImage +from openpyxl.drawing.image import Image as openpyxl_Image +from openpyxl.packaging.relationship import get_rels_path, get_dependents +from openpyxl.xml.constants import SHEET_DRAWING_NS, REL_NS, SHEET_MAIN_NS + +from common.handle.base_parse_qa_handle import get_title_row_index_dict, get_row_value +from dataset.models import Image + + +def parse_element(element) -> {}: + data = {} + xdr_namespace = "{%s}" % SHEET_DRAWING_NS + targets = level_order_traversal(element, xdr_namespace + "nvPicPr") + for target in targets: + cNvPr = embed = "" + for child in target: + if child.tag == xdr_namespace + "nvPicPr": + cNvPr = child[0].attrib["name"] + elif child.tag == xdr_namespace + "blipFill": + _rel_embed = "{%s}embed" % REL_NS + embed = child[0].attrib[_rel_embed] + if cNvPr: + data[cNvPr] = embed + return data + + +def parse_element_sheet_xml(element) -> []: + data = [] + xdr_namespace = "{%s}" % SHEET_MAIN_NS + targets = level_order_traversal(element, xdr_namespace + "f") + for target in targets: + for child in target: + if child.tag == xdr_namespace + "f": + data.append(child.text) + return data + + +def level_order_traversal(root, flag: str) -> []: + queue = [root] + targets = [] + while queue: + node = queue.pop(0) + children = [child.tag for child in node] + if flag in children: + targets.append(node) + continue + for child in node: + queue.append(child) + return targets + + +def handle_images(deps, archive: ZipFile) -> []: + images = [] + if not PILImage: # Pillow not installed, drop images + return images + for dep in deps: + try: + image_io = archive.read(dep.target) + image = openpyxl_Image(BytesIO(image_io)) + except Exception as e: + print(e) + continue + image.embed = dep.id # 文件rId + image.target = dep.target # 文件地址 + images.append(image) + return images + + +def xlsx_embed_cells_images(buffer) -> {}: + archive = ZipFile(buffer) + # 解析cellImage.xml文件 + deps = get_dependents(archive, get_rels_path("xl/cellimages.xml")) + image_rel = handle_images(deps=deps, archive=archive) + # 工作表及其中图片ID + sheet_list = {} + for item in archive.namelist(): + if not item.startswith('xl/worksheets/sheet'): + continue + key = item.split('/')[-1].split('.')[0].split('sheet')[-1] + sheet_list[key] = parse_element_sheet_xml(fromstring(archive.read(item))) + cell_images_xml = parse_element(fromstring(archive.read("xl/cellimages.xml"))) + cell_images_rel = {} + for image in image_rel: + cell_images_rel[image.embed] = image + for cnv, embed in cell_images_xml.items(): + cell_images_xml[cnv] = cell_images_rel.get(embed) + result = {} + for key, img in cell_images_xml.items(): + image_excel_id_list = [_xl for _xl in + reduce(lambda x, y: [*x, *y], [sheet for sheet_id, sheet in sheet_list.items()], []) if + key in _xl] + if len(image_excel_id_list) > 0: + image_excel_id = image_excel_id_list[-1] + f = archive.open(img.target) + img_byte = io.BytesIO() + im = PILImage.open(f).convert('RGB') + im.save(img_byte, format='JPEG') + image = Image(id=uuid.uuid1(), image=img_byte.getvalue(), image_name=img.path) + result['=' + image_excel_id] = image + archive.close() + return result + + diff --git a/apps/common/handle/impl/xls_split_handle.py b/apps/common/handle/impl/xls_split_handle.py new file mode 100644 index 00000000000..dbdcc95506d --- /dev/null +++ b/apps/common/handle/impl/xls_split_handle.py @@ -0,0 +1,80 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: xls_parse_qa_handle.py + @date:2024/5/21 14:59 + @desc: +""" +from typing import List + +import xlrd + +from common.handle.base_split_handle import BaseSplitHandle + + +def post_cell(cell_value): + return cell_value.replace('\r\n', '
').replace('\n', '
').replace('|', '|') + + +def row_to_md(row): + return '| ' + ' | '.join( + [post_cell(str(cell)) if cell is not None else '' for cell in row]) + ' |\n' + + +def handle_sheet(file_name, sheet, limit: int): + rows = iter([sheet.row_values(i) for i in range(sheet.nrows)]) + paragraphs = [] + result = {'name': file_name, 'content': paragraphs} + try: + title_row_list = next(rows) + title_md_content = row_to_md(title_row_list) + title_md_content += '| ' + ' | '.join( + ['---' if cell is not None else '' for cell in title_row_list]) + ' |\n' + except Exception as e: + return result + if len(title_row_list) == 0: + return result + result_item_content = '' + for row in rows: + next_md_content = row_to_md(row) + next_md_content_len = len(next_md_content) + result_item_content_len = len(result_item_content) + if len(result_item_content) == 0: + result_item_content += title_md_content + result_item_content += next_md_content + else: + if result_item_content_len + next_md_content_len < limit: + result_item_content += next_md_content + else: + paragraphs.append({'content': result_item_content, 'title': ''}) + result_item_content = title_md_content + next_md_content + if len(result_item_content) > 0: + paragraphs.append({'content': result_item_content, 'title': ''}) + return result + + +class XlsSplitHandle(BaseSplitHandle): + def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): + buffer = get_buffer(file) + try: + workbook = xlrd.open_workbook(file_contents=buffer) + worksheets = workbook.sheets() + worksheets_size = len(worksheets) + return [row for row in + [handle_sheet(file.name, + sheet, limit) if worksheets_size == 1 and sheet.name == 'Sheet1' else handle_sheet( + sheet.name, sheet, limit) for sheet + in worksheets] if row is not None] + except Exception as e: + return [{'name': file.name, 'content': []}] + + def get_content(self, file, save_image): + pass + + def support(self, file, get_buffer): + file_name: str = file.name.lower() + buffer = get_buffer(file) + if file_name.endswith(".xls") and xlrd.inspect_format(content=buffer): + return True + return False diff --git a/apps/common/handle/impl/xlsx_split_handle.py b/apps/common/handle/impl/xlsx_split_handle.py new file mode 100644 index 00000000000..22ad23146f4 --- /dev/null +++ b/apps/common/handle/impl/xlsx_split_handle.py @@ -0,0 +1,92 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: xlsx_parse_qa_handle.py + @date:2024/5/21 14:59 + @desc: +""" +import io +from typing import List + +import openpyxl + +from common.handle.base_split_handle import BaseSplitHandle +from common.handle.impl.tools import xlsx_embed_cells_images + + +def post_cell(image_dict, cell_value): + image = image_dict.get(cell_value, None) + if image is not None: + return f'![](/api/image/{image.id})' + return cell_value.replace('\n', '
').replace('|', '|') + + +def row_to_md(row, image_dict): + return '| ' + ' | '.join( + [post_cell(image_dict, str(cell.value if cell.value is not None else '')) if cell is not None else '' for cell + in row]) + ' |\n' + + +def handle_sheet(file_name, sheet, image_dict, limit: int): + rows = sheet.rows + paragraphs = [] + result = {'name': file_name, 'content': paragraphs} + try: + title_row_list = next(rows) + title_md_content = row_to_md(title_row_list, image_dict) + title_md_content += '| ' + ' | '.join( + ['---' if cell is not None else '' for cell in title_row_list]) + ' |\n' + except Exception as e: + return result + if len(title_row_list) == 0: + return result + result_item_content = '' + for row in rows: + next_md_content = row_to_md(row, image_dict) + next_md_content_len = len(next_md_content) + result_item_content_len = len(result_item_content) + if len(result_item_content) == 0: + result_item_content += title_md_content + result_item_content += next_md_content + else: + if result_item_content_len + next_md_content_len < limit: + result_item_content += next_md_content + else: + paragraphs.append({'content': result_item_content, 'title': ''}) + result_item_content = title_md_content + next_md_content + if len(result_item_content) > 0: + paragraphs.append({'content': result_item_content, 'title': ''}) + return result + + +class XlsxSplitHandle(BaseSplitHandle): + def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): + buffer = get_buffer(file) + try: + workbook = openpyxl.load_workbook(io.BytesIO(buffer)) + try: + image_dict: dict = xlsx_embed_cells_images(io.BytesIO(buffer)) + save_image([item for item in image_dict.values()]) + except Exception as e: + image_dict = {} + worksheets = workbook.worksheets + worksheets_size = len(worksheets) + return [row for row in + [handle_sheet(file.name, + sheet, + image_dict, + limit) if worksheets_size == 1 and sheet.title == 'Sheet1' else handle_sheet( + sheet.title, sheet, image_dict, limit) for sheet + in worksheets] if row is not None] + except Exception as e: + return [{'name': file.name, 'content': []}] + + def get_content(self, file, save_image): + pass + + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".xlsx"): + return True + return False diff --git a/apps/common/handle/impl/zip_split_handle.py b/apps/common/handle/impl/zip_split_handle.py new file mode 100644 index 00000000000..8d931c9e78e --- /dev/null +++ b/apps/common/handle/impl/zip_split_handle.py @@ -0,0 +1,161 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: text_split_handle.py + @date:2024/3/27 18:19 + @desc: +""" +import io +import os +import re +import uuid +import zipfile +from typing import List +from urllib.parse import urljoin + +from charset_normalizer import detect +from django.db.models import QuerySet + +from common.handle.base_split_handle import BaseSplitHandle +from common.handle.impl.csv_split_handle import CsvSplitHandle +from common.handle.impl.doc_split_handle import DocSplitHandle +from common.handle.impl.html_split_handle import HTMLSplitHandle +from common.handle.impl.pdf_split_handle import PdfSplitHandle +from common.handle.impl.text_split_handle import TextSplitHandle +from common.handle.impl.xls_split_handle import XlsSplitHandle +from common.handle.impl.xlsx_split_handle import XlsxSplitHandle +from common.util.common import parse_md_image +from dataset.models import Image +from django.utils.translation import gettext_lazy as _ + + +class FileBufferHandle: + buffer = None + + def get_buffer(self, file): + if self.buffer is None: + self.buffer = file.read() + return self.buffer + + +default_split_handle = TextSplitHandle() +split_handles = [HTMLSplitHandle(), DocSplitHandle(), PdfSplitHandle(), XlsxSplitHandle(), XlsSplitHandle(), + CsvSplitHandle(), + default_split_handle] + + +def save_inner_image(image_list): + if image_list is not None and len(image_list) > 0: + QuerySet(Image).bulk_create(image_list) + + +def file_to_paragraph(file, pattern_list: List, with_filter: bool, limit: int): + get_buffer = FileBufferHandle().get_buffer + for split_handle in split_handles: + if split_handle.support(file, get_buffer): + return split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_inner_image) + raise Exception(_('Unsupported file format')) + + +def is_valid_uuid(uuid_str: str): + try: + uuid.UUID(uuid_str) + except ValueError: + return False + return True + + +def get_image_list(result_list: list, zip_files: List[str]): + image_file_list = [] + for result in result_list: + for p in result.get('content', []): + content: str = p.get('content', '') + image_list = parse_md_image(content) + for image in image_list: + search = re.search("\(.*\)", image) + if search: + new_image_id = str(uuid.uuid1()) + source_image_path = search.group().replace('(', '').replace(')', '') + source_image_path = source_image_path.strip().split(" ")[0] + image_path = urljoin(result.get('name'), '.' + source_image_path if source_image_path.startswith( + '/') else source_image_path) + if not zip_files.__contains__(image_path): + continue + if image_path.startswith('api/file/') or image_path.startswith('api/image/'): + image_id = image_path.replace('api/file/', '').replace('api/image/', '') + if is_valid_uuid(image_id): + image_file_list.append({'source_file': image_path, + 'image_id': image_id}) + else: + image_file_list.append({'source_file': image_path, + 'image_id': new_image_id}) + content = content.replace(source_image_path, f'/api/image/{new_image_id}') + p['content'] = content + else: + image_file_list.append({'source_file': image_path, + 'image_id': new_image_id}) + content = content.replace(source_image_path, f'/api/image/{new_image_id}') + p['content'] = content + + return image_file_list + + +def get_file_name(file_name): + try: + file_name_code = file_name.encode('cp437') + charset = detect(file_name_code)['encoding'] + return file_name_code.decode(charset) + except Exception as e: + return file_name + + +def filter_image_file(result_list: list, image_list): + image_source_file_list = [image.get('source_file') for image in image_list] + return [r for r in result_list if not image_source_file_list.__contains__(r.get('name', ''))] + + +class ZipSplitHandle(BaseSplitHandle): + def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image): + buffer = get_buffer(file) + bytes_io = io.BytesIO(buffer) + result = [] + # 打开zip文件 + with zipfile.ZipFile(bytes_io, 'r') as zip_ref: + # 获取压缩包中的文件名列表 + files = zip_ref.namelist() + # 读取压缩包中的文件内容 + for file in files: + if file.endswith('/') or file.startswith('__MACOSX'): + continue + with zip_ref.open(file) as f: + # 对文件内容进行处理 + try: + # 处理一下文件名 + f.name = get_file_name(f.name) + value = file_to_paragraph(f, pattern_list, with_filter, limit) + if isinstance(value, list): + result = [*result, *value] + else: + result.append(value) + except Exception: + pass + image_list = get_image_list(result, files) + result = filter_image_file(result, image_list) + image_mode_list = [] + for image in image_list: + with zip_ref.open(image.get('source_file')) as f: + i = Image(id=image.get('image_id'), image=f.read(), + image_name=os.path.basename(image.get('source_file'))) + image_mode_list.append(i) + save_image(image_mode_list) + return result + + def support(self, file, get_buffer): + file_name: str = file.name.lower() + if file_name.endswith(".zip") or file_name.endswith(".ZIP"): + return True + return False + + def get_content(self, file, save_image): + return "" diff --git a/apps/common/init/init_doc.py b/apps/common/init/init_doc.py new file mode 100644 index 00000000000..d66b0666370 --- /dev/null +++ b/apps/common/init/init_doc.py @@ -0,0 +1,93 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: init_doc.py + @date:2024/5/24 14:11 + @desc: +""" +import hashlib + +from django.urls import re_path, path, URLPattern +from drf_yasg import openapi +from drf_yasg.views import get_schema_view +from rest_framework import permissions + +from common.auth import AnonymousAuthentication +from smartdoc.const import CONFIG +from django.utils.translation import gettext_lazy as _ + + +def init_app_doc(application_urlpatterns): + schema_view = get_schema_view( + openapi.Info( + title="Python API", + default_version='v1', + description=_('Intelligent customer service platform'), + ), + public=True, + permission_classes=[permissions.AllowAny], + authentication_classes=[AnonymousAuthentication] + ) + application_urlpatterns += [ + re_path(r'^doc(?P\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), + name='schema-json'), # 导出 + path('doc/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), + path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), + ] + + +def init_chat_doc(application_urlpatterns, patterns): + chat_schema_view = get_schema_view( + openapi.Info( + title="Python API", + default_version='/chat', + description=_('Intelligent customer service platform'), + ), + public=True, + permission_classes=[permissions.AllowAny], + authentication_classes=[AnonymousAuthentication], + patterns=[ + URLPattern(pattern='api/' + str(url.pattern), callback=url.callback, default_args=url.default_args, + name=url.name) + for url in patterns if + url.name is not None and ['application/message', 'application/open', + 'application/profile'].__contains__( + url.name)] + ) + + application_urlpatterns += [ + path('doc/chat/', chat_schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), + path('redoc/chat/', chat_schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), + ] + + +def encrypt(text): + md5 = hashlib.md5() + md5.update(text.encode()) + result = md5.hexdigest() + return result + + +def get_call(application_urlpatterns, patterns, params, func): + def run(): + if params['valid'](): + func(*params['get_params'](application_urlpatterns, patterns)) + + return run + + +init_list = [(init_app_doc, {'valid': lambda: CONFIG.get('DOC_PASSWORD') is not None and encrypt( + CONFIG.get('DOC_PASSWORD')) == 'd4fc097197b4b90a122b92cbd5bbe867', + 'get_call': get_call, + 'get_params': lambda application_urlpatterns, patterns: (application_urlpatterns,)}), + (init_chat_doc, {'valid': lambda: CONFIG.get('DOC_PASSWORD') is not None and encrypt( + CONFIG.get('DOC_PASSWORD')) == 'd4fc097197b4b90a122b92cbd5bbe867' or True, 'get_call': get_call, + 'get_params': lambda application_urlpatterns, patterns: ( + application_urlpatterns, patterns)})] + + +def init_doc(application_urlpatterns, patterns): + for init, params in init_list: + if params['valid'](): + get_call(application_urlpatterns, patterns, params, init)() diff --git a/apps/common/job/__init__.py b/apps/common/job/__init__.py index 895bf7f5d97..286c81cae74 100644 --- a/apps/common/job/__init__.py +++ b/apps/common/job/__init__.py @@ -7,7 +7,11 @@ @desc: """ from .client_access_num_job import * +from .clean_chat_job import * +from .clean_debug_file_job import * def run(): client_access_num_job.run() + clean_chat_job.run() + clean_debug_file_job.run() diff --git a/apps/common/job/clean_chat_job.py b/apps/common/job/clean_chat_job.py new file mode 100644 index 00000000000..fb95c3a9fd0 --- /dev/null +++ b/apps/common/job/clean_chat_job.py @@ -0,0 +1,83 @@ +# coding=utf-8 + +import logging +import datetime + +from django.db import transaction +from django.utils import timezone +from apscheduler.schedulers.background import BackgroundScheduler +from django_apscheduler.jobstores import DjangoJobStore +from application.models import Application, Chat, ChatRecord +from django.db.models import Q, Max +from common.lock.impl.file_lock import FileLock +from dataset.models import File + + +from django.db import connection + +scheduler = BackgroundScheduler() +scheduler.add_jobstore(DjangoJobStore(), "default") +lock = FileLock() + + +def clean_chat_log_job(): + from django.utils.translation import gettext_lazy as _ + logging.getLogger("max_kb").info(_('start clean chat log')) + now = timezone.now() + + applications = Application.objects.all().values('id', 'clean_time') + cutoff_dates = { + app['id']: now - datetime.timedelta(days=app['clean_time'] or 180) + for app in applications + } + + query_conditions = Q() + for app_id, cutoff_date in cutoff_dates.items(): + query_conditions |= Q(chat__application_id=app_id, create_time__lt=cutoff_date) + batch_size = 500 + while True: + with transaction.atomic(): + chat_records = ChatRecord.objects.filter(query_conditions).select_related('chat').only('id', 'chat_id', + 'create_time')[ + :batch_size] + if not chat_records: + break + chat_record_ids = [record.id for record in chat_records] + chat_ids = {record.chat_id for record in chat_records} + + # 计算每个 chat_id 的最大 create_time + max_create_times = ChatRecord.objects.filter(id__in=chat_record_ids).values('chat_id').annotate( + max_create_time=Max('create_time')) + + # 收集需要删除的文件 + files_to_delete = [] + for record in chat_records: + max_create_time = next( + (item['max_create_time'] for item in max_create_times if item['chat_id'] == record.chat_id), None) + if max_create_time: + files_to_delete.extend( + File.objects.filter(meta__chat_id=str(record.chat_id), create_time__lt=max_create_time) + ) + # 删除 ChatRecord + deleted_count = ChatRecord.objects.filter(id__in=chat_record_ids).delete()[0] + + # 删除没有关联 ChatRecord 的 Chat + Chat.objects.filter(chatrecord__isnull=True, id__in=chat_ids).delete() + File.objects.filter(loid__in=[file.loid for file in files_to_delete]).delete() + + if deleted_count < batch_size: + break + + logging.getLogger("max_kb").info(_('end clean chat log')) + + +def run(): + if lock.try_lock('clean_chat_log_job', 30 * 30): + try: + scheduler.start() + existing_job = scheduler.get_job(job_id='clean_chat_log') + if existing_job is not None: + existing_job.remove() + scheduler.add_job(clean_chat_log_job, 'cron', hour='0', minute='5', id='clean_chat_log') + finally: + lock.un_lock('clean_chat_log_job') diff --git a/apps/common/job/clean_debug_file_job.py b/apps/common/job/clean_debug_file_job.py new file mode 100644 index 00000000000..c701dd7d2f0 --- /dev/null +++ b/apps/common/job/clean_debug_file_job.py @@ -0,0 +1,37 @@ +# coding=utf-8 + +import logging +from datetime import timedelta + +from apscheduler.schedulers.background import BackgroundScheduler +from django.db.models import Q +from django.utils import timezone +from django_apscheduler.jobstores import DjangoJobStore + +from common.lock.impl.file_lock import FileLock +from dataset.models import File + +scheduler = BackgroundScheduler() +scheduler.add_jobstore(DjangoJobStore(), "default") +lock = FileLock() + + +def clean_debug_file(): + from django.utils.translation import gettext_lazy as _ + logging.getLogger("max_kb").info(_('start clean debug file')) + two_hours_ago = timezone.now() - timedelta(hours=2) + # 删除对应的文件 + File.objects.filter(Q(create_time__lt=two_hours_ago) & Q(meta__debug=True)).delete() + logging.getLogger("max_kb").info(_('end clean debug file')) + + +def run(): + if lock.try_lock('clean_debug_file', 30 * 30): + try: + scheduler.start() + clean_debug_file_job = scheduler.get_job(job_id='clean_debug_file') + if clean_debug_file_job is not None: + clean_debug_file_job.remove() + scheduler.add_job(clean_debug_file, 'cron', hour='2', minute='0', second='0', id='clean_debug_file') + finally: + lock.un_lock('clean_debug_file') diff --git a/apps/common/job/client_access_num_job.py b/apps/common/job/client_access_num_job.py index 4c03fd2100a..6488a602555 100644 --- a/apps/common/job/client_access_num_job.py +++ b/apps/common/job/client_access_num_job.py @@ -13,21 +13,28 @@ from django_apscheduler.jobstores import DjangoJobStore from application.models.api_key_model import ApplicationPublicAccessClient +from common.lock.impl.file_lock import FileLock scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") +lock = FileLock() def client_access_num_reset_job(): - logging.getLogger("max_kb").info('开始重置access_num') + from django.utils.translation import gettext_lazy as _ + logging.getLogger("max_kb").info(_('start reset access_num')) QuerySet(ApplicationPublicAccessClient).update(intraday_access_num=0) - logging.getLogger("max_kb").info('结束重置access_num') + logging.getLogger("max_kb").info(_('end reset access_num')) def run(): - scheduler.start() - access_num_reset = scheduler.get_job(job_id='access_num_reset') - if access_num_reset is not None: - access_num_reset.remove() - scheduler.add_job(client_access_num_reset_job, 'cron', hour='0', minute='0', second='0', - id='access_num_reset') + if lock.try_lock('client_access_num_reset_job', 30 * 30): + try: + scheduler.start() + access_num_reset = scheduler.get_job(job_id='access_num_reset') + if access_num_reset is not None: + access_num_reset.remove() + scheduler.add_job(client_access_num_reset_job, 'cron', hour='0', minute='0', second='0', + id='access_num_reset') + finally: + lock.un_lock('client_access_num_reset_job') diff --git a/apps/common/lock/base_lock.py b/apps/common/lock/base_lock.py new file mode 100644 index 00000000000..2ca5b21dada --- /dev/null +++ b/apps/common/lock/base_lock.py @@ -0,0 +1,20 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_lock.py + @date:2024/8/20 10:33 + @desc: +""" + +from abc import ABC, abstractmethod + + +class BaseLock(ABC): + @abstractmethod + def try_lock(self, key, timeout): + pass + + @abstractmethod + def un_lock(self, key): + pass diff --git a/apps/common/lock/impl/file_lock.py b/apps/common/lock/impl/file_lock.py new file mode 100644 index 00000000000..f8ea6396cf5 --- /dev/null +++ b/apps/common/lock/impl/file_lock.py @@ -0,0 +1,77 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: file_lock.py + @date:2024/8/20 10:48 + @desc: +""" +import errno +import hashlib +import os +import time + +import six + +from common.lock.base_lock import BaseLock +from smartdoc.const import PROJECT_DIR + + +def key_to_lock_name(key): + """ + Combine part of a key with its hash to prevent very long filenames + """ + MAX_LENGTH = 50 + key_hash = hashlib.md5(six.b(key)).hexdigest() + lock_name = key[:MAX_LENGTH - len(key_hash) - 1] + '_' + key_hash + return lock_name + + +class FileLock(BaseLock): + """ + File locking backend. + """ + + def __init__(self, settings=None): + if settings is None: + settings = {} + self.location = settings.get('location') + if self.location is None: + self.location = os.path.join(PROJECT_DIR, 'data', 'lock') + try: + os.makedirs(self.location) + except OSError as error: + # Directory exists? + if error.errno != errno.EEXIST: + # Re-raise unexpected OSError + raise + + def _get_lock_path(self, key): + lock_name = key_to_lock_name(key) + return os.path.join(self.location, lock_name) + + def try_lock(self, key, timeout): + lock_path = self._get_lock_path(key) + try: + # 创建锁文件,如果没创建成功则拿不到 + fd = os.open(lock_path, os.O_CREAT | os.O_EXCL) + except OSError as error: + if error.errno == errno.EEXIST: + # File already exists, check its modification time + mtime = os.path.getmtime(lock_path) + ttl = mtime + timeout - time.time() + if ttl > 0: + return False + else: + # 如果超时时间已到,直接上锁成功继续执行 + os.utime(lock_path, None) + return True + else: + return False + else: + os.close(fd) + return True + + def un_lock(self, key): + lock_path = self._get_lock_path(key) + os.remove(lock_path) diff --git a/apps/common/log/log.py b/apps/common/log/log.py new file mode 100644 index 00000000000..1942b4f994e --- /dev/null +++ b/apps/common/log/log.py @@ -0,0 +1,100 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: log.py + @date:2025/3/14 16:09 + @desc: +""" +from gettext import gettext + +from setting.models.log_management import Log + + +def _get_ip_address(request): + """ + 获取ip地址 + @param request: + @return: + """ + x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') + if x_forwarded_for: + ip = x_forwarded_for.split(',')[0] + else: + ip = request.META.get('REMOTE_ADDR') + return ip + + +def _get_user(request): + """ + 获取用户 + @param request: + @return: + """ + user = request.user + if user is None: + return { + + } + return { + "id": str(user.id), + "email": user.email, + "phone": user.phone, + "nick_name": user.nick_name, + "username": user.username, + "role": user.role, + } + + +def _get_details(request): + path = request.path + body = request.data + query = request.query_params + return { + 'path': path, + 'body': body, + 'query': query + } + + +def log(menu: str, operate, get_user=_get_user, get_ip_address=_get_ip_address, get_details=_get_details, + get_operation_object=None): + """ + 记录审计日志 + @param menu: 操作菜单 str + @param operate: 操作 str|func 如果是一个函数 入参将是一个request 响应为str def operate(request): return "操作菜单" + @param get_user: 获取用户 + @param get_ip_address:获取IP地址 + @param get_details: 获取执行详情 + @param get_operation_object: 获取操作对象 + @return: + """ + + def inner(func): + def run(view, request, **kwargs): + status = 200 + operation_object = {} + try: + if get_operation_object is not None: + operation_object = get_operation_object(request, kwargs) + except Exception as e: + pass + try: + return func(view, request, **kwargs) + except Exception as e: + status = 500 + raise e + finally: + ip = get_ip_address(request) + user = get_user(request) + details = get_details(request) + _operate = operate + if callable(operate): + _operate = operate(request) + # 插入审计日志 + Log(menu=menu, operate=_operate, user=user, status=status, ip_address=ip, details=details, + operation_object=operation_object).save() + + return run + + return inner diff --git a/apps/common/management/__init__.py b/apps/common/management/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/common/management/commands/__init__.py b/apps/common/management/commands/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/common/management/commands/celery.py b/apps/common/management/commands/celery.py new file mode 100644 index 00000000000..a26b43597de --- /dev/null +++ b/apps/common/management/commands/celery.py @@ -0,0 +1,46 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: celery.py + @date:2024/8/19 11:57 + @desc: +""" +import os +import subprocess + +from django.core.management.base import BaseCommand + +from smartdoc.const import BASE_DIR + + +class Command(BaseCommand): + help = 'celery' + + def add_arguments(self, parser): + parser.add_argument( + 'service', nargs='+', type=str, choices=("celery", "model"), help='Service', + ) + + def handle(self, *args, **options): + service = options.get('service') + os.environ.setdefault('CELERY_NAME', ','.join(service)) + server_hostname = os.environ.get("SERVER_HOSTNAME") + if hasattr(os, 'getuid') and os.getuid() == 0: + os.environ.setdefault('C_FORCE_ROOT', '1') + if not server_hostname: + server_hostname = '%h' + cmd = [ + 'celery', + '-A', 'ops', + 'worker', + '-P', 'threads', + '-l', 'info', + '-c', '10', + '-Q', ','.join(service), + '--heartbeat-interval', '10', + '-n', f'{",".join(service)}@{server_hostname}', + '--without-mingle', + ] + kwargs = {'cwd': BASE_DIR} + subprocess.run(cmd, **kwargs) diff --git a/apps/common/management/commands/restart.py b/apps/common/management/commands/restart.py new file mode 100644 index 00000000000..57285f9c993 --- /dev/null +++ b/apps/common/management/commands/restart.py @@ -0,0 +1,6 @@ +from .services.command import BaseActionCommand, Action + + +class Command(BaseActionCommand): + help = 'Restart services' + action = Action.restart.value diff --git a/apps/common/management/commands/services/__init__.py b/apps/common/management/commands/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/common/management/commands/services/command.py b/apps/common/management/commands/services/command.py new file mode 100644 index 00000000000..0c97d4af378 --- /dev/null +++ b/apps/common/management/commands/services/command.py @@ -0,0 +1,134 @@ +import math + +from django.core.management.base import BaseCommand +from django.db.models import TextChoices + +from .hands import * +from .utils import ServicesUtil +import os + + +class Services(TextChoices): + gunicorn = 'gunicorn', 'gunicorn' + celery_default = 'celery_default', 'celery_default' + local_model = 'local_model', 'local_model' + web = 'web', 'web' + celery = 'celery', 'celery' + celery_model = 'celery_model', 'celery_model' + task = 'task', 'task' + all = 'all', 'all' + + @classmethod + def get_service_object_class(cls, name): + from . import services + services_map = { + cls.gunicorn.value: services.GunicornService, + cls.celery_default: services.CeleryDefaultService, + cls.local_model: services.GunicornLocalModelService + } + return services_map.get(name) + + @classmethod + def web_services(cls): + return [cls.gunicorn, cls.local_model] + + @classmethod + def celery_services(cls): + return [cls.celery_default, cls.celery_model] + + @classmethod + def task_services(cls): + return cls.celery_services() + + @classmethod + def all_services(cls): + return cls.web_services() + cls.task_services() + + @classmethod + def export_services_values(cls): + return [cls.all.value, cls.web.value, cls.task.value] + [s.value for s in cls.all_services()] + + @classmethod + def get_service_objects(cls, service_names, **kwargs): + services = set() + for name in service_names: + method_name = f'{name}_services' + if hasattr(cls, method_name): + _services = getattr(cls, method_name)() + elif hasattr(cls, name): + _services = [getattr(cls, name)] + else: + continue + services.update(set(_services)) + + service_objects = [] + for s in services: + service_class = cls.get_service_object_class(s.value) + if not service_class: + continue + kwargs.update({ + 'name': s.value + }) + service_object = service_class(**kwargs) + service_objects.append(service_object) + return service_objects + + +class Action(TextChoices): + start = 'start', 'start' + status = 'status', 'status' + stop = 'stop', 'stop' + restart = 'restart', 'restart' + + +class BaseActionCommand(BaseCommand): + help = 'Service Base Command' + + action = None + util = None + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def add_arguments(self, parser): + parser.add_argument( + 'services', nargs='+', choices=Services.export_services_values(), help='Service', + ) + parser.add_argument('-d', '--daemon', nargs="?", const=True) + parser.add_argument('-w', '--worker', type=int, nargs="?", + default=3 if os.cpu_count() > 6 else math.floor(os.cpu_count() / 2)) + parser.add_argument('-f', '--force', nargs="?", const=True) + + def initial_util(self, *args, **options): + service_names = options.get('services') + service_kwargs = { + 'worker_gunicorn': options.get('worker') + } + services = Services.get_service_objects(service_names=service_names, **service_kwargs) + + kwargs = { + 'services': services, + 'run_daemon': options.get('daemon', False), + 'stop_daemon': self.action == Action.stop.value and Services.all.value in service_names, + 'force_stop': options.get('force') or False, + } + self.util = ServicesUtil(**kwargs) + + def handle(self, *args, **options): + self.initial_util(*args, **options) + assert self.action in Action.values, f'The action {self.action} is not in the optional list' + _handle = getattr(self, f'_handle_{self.action}', lambda: None) + _handle() + + def _handle_start(self): + self.util.start_and_watch() + os._exit(0) + + def _handle_stop(self): + self.util.stop() + + def _handle_restart(self): + self.util.restart() + + def _handle_status(self): + self.util.show_status() diff --git a/apps/common/management/commands/services/hands.py b/apps/common/management/commands/services/hands.py new file mode 100644 index 00000000000..82447024ef2 --- /dev/null +++ b/apps/common/management/commands/services/hands.py @@ -0,0 +1,26 @@ +import logging +import os +import sys + +from smartdoc.const import CONFIG, PROJECT_DIR + +try: + from apps.smartdoc import const + + __version__ = const.VERSION +except ImportError as e: + print("Not found __version__: {}".format(e)) + print("Python is: ") + logging.info(sys.executable) + __version__ = 'Unknown' + sys.exit(1) + +HTTP_HOST = '0.0.0.0' +HTTP_PORT = CONFIG.HTTP_LISTEN_PORT or 8080 +DEBUG = CONFIG.DEBUG or False + +LOG_DIR = os.path.join(PROJECT_DIR, 'data', 'logs') +APPS_DIR = os.path.join(PROJECT_DIR, 'apps') +TMP_DIR = os.path.join(PROJECT_DIR, 'tmp') +if not os.path.exists(TMP_DIR): + os.makedirs(TMP_DIR) diff --git a/apps/common/management/commands/services/services/__init__.py b/apps/common/management/commands/services/services/__init__.py new file mode 100644 index 00000000000..1027392060c --- /dev/null +++ b/apps/common/management/commands/services/services/__init__.py @@ -0,0 +1,3 @@ +from .celery_default import * +from .gunicorn import * +from .local_model import * \ No newline at end of file diff --git a/apps/common/management/commands/services/services/base.py b/apps/common/management/commands/services/services/base.py new file mode 100644 index 00000000000..ddcb4feca3b --- /dev/null +++ b/apps/common/management/commands/services/services/base.py @@ -0,0 +1,207 @@ +import abc +import time +import shutil +import psutil +import datetime +import threading +import subprocess +from ..hands import * + + +class BaseService(object): + + def __init__(self, **kwargs): + self.name = kwargs['name'] + self._process = None + self.STOP_TIMEOUT = 10 + self.max_retry = 0 + self.retry = 3 + self.LOG_KEEP_DAYS = 7 + self.EXIT_EVENT = threading.Event() + + @property + @abc.abstractmethod + def cmd(self): + return [] + + @property + @abc.abstractmethod + def cwd(self): + return '' + + @property + def is_running(self): + if self.pid == 0: + return False + try: + os.kill(self.pid, 0) + except (OSError, ProcessLookupError): + return False + else: + return True + + def show_status(self): + if self.is_running: + msg = f'{self.name} is running: {self.pid}.' + else: + msg = f'{self.name} is stopped.' + if DEBUG: + msg = '\033[31m{} is stopped.\033[0m\nYou can manual start it to find the error: \n' \ + ' $ cd {}\n' \ + ' $ {}'.format(self.name, self.cwd, ' '.join(self.cmd)) + + print(msg) + + # -- log -- + @property + def log_filename(self): + return f'{self.name}.log' + + @property + def log_filepath(self): + return os.path.join(LOG_DIR, self.log_filename) + + @property + def log_file(self): + return open(self.log_filepath, 'a') + + @property + def log_dir(self): + return os.path.dirname(self.log_filepath) + # -- end log -- + + # -- pid -- + @property + def pid_filepath(self): + return os.path.join(TMP_DIR, f'{self.name}.pid') + + @property + def pid(self): + if not os.path.isfile(self.pid_filepath): + return 0 + with open(self.pid_filepath) as f: + try: + pid = int(f.read().strip()) + except ValueError: + pid = 0 + return pid + + def write_pid(self): + with open(self.pid_filepath, 'w') as f: + f.write(str(self.process.pid)) + + def remove_pid(self): + if os.path.isfile(self.pid_filepath): + os.unlink(self.pid_filepath) + # -- end pid -- + + # -- process -- + @property + def process(self): + if not self._process: + try: + self._process = psutil.Process(self.pid) + except: + pass + return self._process + + # -- end process -- + + # -- action -- + def open_subprocess(self): + kwargs = {'cwd': self.cwd, 'stderr': self.log_file, 'stdout': self.log_file} + self._process = subprocess.Popen(self.cmd, **kwargs) + + def start(self): + if self.is_running: + self.show_status() + return + self.remove_pid() + self.open_subprocess() + self.write_pid() + self.start_other() + + def start_other(self): + pass + + def stop(self, force=False): + if not self.is_running: + self.show_status() + # self.remove_pid() + return + + print(f'Stop service: {self.name}', end='') + sig = 9 if force else 15 + os.kill(self.pid, sig) + + if self.process is None: + print("\033[31m No process found\033[0m") + return + try: + self.process.wait(1) + except: + pass + + for i in range(self.STOP_TIMEOUT): + if i == self.STOP_TIMEOUT - 1: + print("\033[31m Error\033[0m") + if not self.is_running: + print("\033[32m Ok\033[0m") + self.remove_pid() + break + else: + continue + + def watch(self): + self._check() + if not self.is_running: + self._restart() + self._rotate_log() + + def _check(self): + now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print(f"{now} Check service status: {self.name} -> ", end='') + if self.process: + try: + self.process.wait(1) # 不wait,子进程可能无法回收 + except: + pass + + if self.is_running: + print(f'running at {self.pid}') + else: + print(f'stopped at {self.pid}') + + def _restart(self): + if self.retry > self.max_retry: + logging.info("Service start failed, exit: {}".format(self.name)) + self.EXIT_EVENT.set() + return + self.retry += 1 + logging.info(f'> Find {self.name} stopped, retry {self.retry}, {self.pid}') + self.start() + + def _rotate_log(self): + now = datetime.datetime.now() + _time = now.strftime('%H:%M') + if _time != '23:59': + return + + backup_date = now.strftime('%Y-%m-%d') + backup_log_dir = os.path.join(self.log_dir, backup_date) + if not os.path.exists(backup_log_dir): + os.mkdir(backup_log_dir) + + backup_log_path = os.path.join(backup_log_dir, self.log_filename) + if os.path.isfile(self.log_filepath) and not os.path.isfile(backup_log_path): + logging.info(f'Rotate log file: {self.log_filepath} => {backup_log_path}') + shutil.copy(self.log_filepath, backup_log_path) + with open(self.log_filepath, 'w') as f: + pass + + to_delete_date = now - datetime.timedelta(days=self.LOG_KEEP_DAYS) + to_delete_dir = os.path.join(LOG_DIR, to_delete_date.strftime('%Y-%m-%d')) + if os.path.exists(to_delete_dir): + logging.info(f'Remove old log: {to_delete_dir}') + shutil.rmtree(to_delete_dir, ignore_errors=True) + # -- end action -- diff --git a/apps/common/management/commands/services/services/celery_base.py b/apps/common/management/commands/services/services/celery_base.py new file mode 100644 index 00000000000..0ae219bd5eb --- /dev/null +++ b/apps/common/management/commands/services/services/celery_base.py @@ -0,0 +1,45 @@ +from django.conf import settings + +from .base import BaseService +from ..hands import * + + +class CeleryBaseService(BaseService): + + def __init__(self, queue, num=10, **kwargs): + super().__init__(**kwargs) + self.queue = queue + self.num = num + + @property + def cmd(self): + print('\n- Start Celery as Distributed Task Queue: {}'.format(self.queue.capitalize())) + + os.environ.setdefault('LC_ALL', 'C.UTF-8') + os.environ.setdefault('PYTHONOPTIMIZE', '1') + os.environ.setdefault('ANSIBLE_FORCE_COLOR', 'True') + os.environ.setdefault('PYTHONPATH', settings.APPS_DIR) + + if os.getuid() == 0: + os.environ.setdefault('C_FORCE_ROOT', '1') + server_hostname = os.environ.get("SERVER_HOSTNAME") + if not server_hostname: + server_hostname = '%h' + + cmd = [ + 'celery', + '-A', 'ops', + 'worker', + '-P', 'threads', + '-l', 'error', + '-c', str(self.num), + '-Q', self.queue, + '--heartbeat-interval', '10', + '-n', f'{self.queue}@{server_hostname}', + '--without-mingle', + ] + return cmd + + @property + def cwd(self): + return APPS_DIR diff --git a/apps/common/management/commands/services/services/celery_default.py b/apps/common/management/commands/services/services/celery_default.py new file mode 100644 index 00000000000..5d3e6d7b8a4 --- /dev/null +++ b/apps/common/management/commands/services/services/celery_default.py @@ -0,0 +1,10 @@ +from .celery_base import CeleryBaseService + +__all__ = ['CeleryDefaultService'] + + +class CeleryDefaultService(CeleryBaseService): + + def __init__(self, **kwargs): + kwargs['queue'] = 'celery' + super().__init__(**kwargs) diff --git a/apps/common/management/commands/services/services/gunicorn.py b/apps/common/management/commands/services/services/gunicorn.py new file mode 100644 index 00000000000..cc42c4f7cb3 --- /dev/null +++ b/apps/common/management/commands/services/services/gunicorn.py @@ -0,0 +1,36 @@ +from .base import BaseService +from ..hands import * + +__all__ = ['GunicornService'] + + +class GunicornService(BaseService): + + def __init__(self, **kwargs): + self.worker = kwargs['worker_gunicorn'] + super().__init__(**kwargs) + + @property + def cmd(self): + print("\n- Start Gunicorn WSGI HTTP Server") + + log_format = '%(h)s %(t)s %(L)ss "%(r)s" %(s)s %(b)s ' + bind = f'{HTTP_HOST}:{HTTP_PORT}' + cmd = [ + 'gunicorn', 'smartdoc.wsgi:application', + '-b', bind, + '-k', 'gthread', + '--threads', '200', + '-w', str(self.worker), + '--max-requests', '10240', + '--max-requests-jitter', '2048', + '--access-logformat', log_format, + '--access-logfile', '-' + ] + if DEBUG: + cmd.append('--reload') + return cmd + + @property + def cwd(self): + return APPS_DIR diff --git a/apps/common/management/commands/services/services/local_model.py b/apps/common/management/commands/services/services/local_model.py new file mode 100644 index 00000000000..05f4f561009 --- /dev/null +++ b/apps/common/management/commands/services/services/local_model.py @@ -0,0 +1,45 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: local_model.py + @date:2024/8/21 13:28 + @desc: +""" +from .base import BaseService +from ..hands import * + +__all__ = ['GunicornLocalModelService'] + + +class GunicornLocalModelService(BaseService): + + def __init__(self, **kwargs): + self.worker = kwargs['worker_gunicorn'] + super().__init__(**kwargs) + + @property + def cmd(self): + print("\n- Start Gunicorn Local Model WSGI HTTP Server") + os.environ.setdefault('SERVER_NAME', 'local_model') + log_format = '%(h)s %(t)s %(L)ss "%(r)s" %(s)s %(b)s ' + bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}' + worker = CONFIG.get("LOCAL_MODEL_HOST_WORKER", 1) + cmd = [ + 'gunicorn', 'smartdoc.wsgi:application', + '-b', bind, + '-k', 'gthread', + '--threads', '200', + '-w', str(worker), + '--max-requests', '10240', + '--max-requests-jitter', '2048', + '--access-logformat', log_format, + '--access-logfile', '-' + ] + if DEBUG: + cmd.append('--reload') + return cmd + + @property + def cwd(self): + return APPS_DIR diff --git a/apps/common/management/commands/services/utils.py b/apps/common/management/commands/services/utils.py new file mode 100644 index 00000000000..2426758b8de --- /dev/null +++ b/apps/common/management/commands/services/utils.py @@ -0,0 +1,140 @@ +import threading +import signal +import time +import daemon +from daemon import pidfile +from .hands import * +from .hands import __version__ +from .services.base import BaseService + + +class ServicesUtil(object): + + def __init__(self, services, run_daemon=False, force_stop=False, stop_daemon=False): + self._services = services + self.run_daemon = run_daemon + self.force_stop = force_stop + self.stop_daemon = stop_daemon + self.EXIT_EVENT = threading.Event() + self.check_interval = 30 + self.files_preserve_map = {} + + def restart(self): + self.stop() + time.sleep(5) + self.start_and_watch() + + def start_and_watch(self): + logging.info(time.ctime()) + logging.info(f'MaxKB version {__version__}, more see https://www.jumpserver.org') + self.start() + if self.run_daemon: + self.show_status() + with self.daemon_context: + self.watch() + else: + self.watch() + + def start(self): + for service in self._services: + service: BaseService + service.start() + self.files_preserve_map[service.name] = service.log_file + + time.sleep(1) + + def stop(self): + for service in self._services: + service: BaseService + service.stop(force=self.force_stop) + + if self.stop_daemon: + self._stop_daemon() + + # -- watch -- + def watch(self): + while not self.EXIT_EVENT.is_set(): + try: + _exit = self._watch() + if _exit: + break + time.sleep(self.check_interval) + except KeyboardInterrupt: + print('Start stop services') + break + self.clean_up() + + def _watch(self): + for service in self._services: + service: BaseService + service.watch() + if service.EXIT_EVENT.is_set(): + self.EXIT_EVENT.set() + return True + return False + # -- end watch -- + + def clean_up(self): + if not self.EXIT_EVENT.is_set(): + self.EXIT_EVENT.set() + self.stop() + + def show_status(self): + for service in self._services: + service: BaseService + service.show_status() + + # -- daemon -- + def _stop_daemon(self): + if self.daemon_pid and self.daemon_is_running: + os.kill(self.daemon_pid, 15) + self.remove_daemon_pid() + + def remove_daemon_pid(self): + if os.path.isfile(self.daemon_pid_filepath): + os.unlink(self.daemon_pid_filepath) + + @property + def daemon_pid(self): + if not os.path.isfile(self.daemon_pid_filepath): + return 0 + with open(self.daemon_pid_filepath) as f: + try: + pid = int(f.read().strip()) + except ValueError: + pid = 0 + return pid + + @property + def daemon_is_running(self): + try: + os.kill(self.daemon_pid, 0) + except (OSError, ProcessLookupError): + return False + else: + return True + + @property + def daemon_pid_filepath(self): + return os.path.join(TMP_DIR, 'mk.pid') + + @property + def daemon_log_filepath(self): + return os.path.join(LOG_DIR, 'mk.log') + + @property + def daemon_context(self): + daemon_log_file = open(self.daemon_log_filepath, 'a') + context = daemon.DaemonContext( + pidfile=pidfile.TimeoutPIDLockFile(self.daemon_pid_filepath), + signal_map={ + signal.SIGTERM: lambda x, y: self.clean_up(), + signal.SIGHUP: 'terminate', + }, + stdout=daemon_log_file, + stderr=daemon_log_file, + files_preserve=list(self.files_preserve_map.values()), + detach_process=True, + ) + return context + # -- end daemon -- diff --git a/apps/common/management/commands/start.py b/apps/common/management/commands/start.py new file mode 100644 index 00000000000..4c078a8769a --- /dev/null +++ b/apps/common/management/commands/start.py @@ -0,0 +1,6 @@ +from .services.command import BaseActionCommand, Action + + +class Command(BaseActionCommand): + help = 'Start services' + action = Action.start.value diff --git a/apps/common/management/commands/status.py b/apps/common/management/commands/status.py new file mode 100644 index 00000000000..36f0d36080e --- /dev/null +++ b/apps/common/management/commands/status.py @@ -0,0 +1,6 @@ +from .services.command import BaseActionCommand, Action + + +class Command(BaseActionCommand): + help = 'Show services status' + action = Action.status.value diff --git a/apps/common/management/commands/stop.py b/apps/common/management/commands/stop.py new file mode 100644 index 00000000000..a79a5335c8f --- /dev/null +++ b/apps/common/management/commands/stop.py @@ -0,0 +1,6 @@ +from .services.command import BaseActionCommand, Action + + +class Command(BaseActionCommand): + help = 'Stop services' + action = Action.stop.value diff --git a/apps/common/middleware/cross_domain_middleware.py b/apps/common/middleware/cross_domain_middleware.py index d116dd7b7b8..06c0a6aba71 100644 --- a/apps/common/middleware/cross_domain_middleware.py +++ b/apps/common/middleware/cross_domain_middleware.py @@ -6,11 +6,10 @@ @date:2024/5/8 13:36 @desc: """ -from django.db.models import QuerySet from django.http import HttpResponse from django.utils.deprecation import MiddlewareMixin -from application.models.api_key_model import ApplicationApiKey +from common.cache_data.application_api_key_cache import get_application_api_key class CrossDomainMiddleware(MiddlewareMixin): @@ -27,13 +26,15 @@ def process_response(self, request, response): auth = request.META.get('HTTP_AUTHORIZATION') origin = request.META.get('HTTP_ORIGIN') if auth is not None and str(auth).startswith("application-") and origin is not None: - application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=auth).first() - if application_api_key.allow_cross_domain: + application_api_key = get_application_api_key(str(auth), True) + cross_domain_list = application_api_key.get('cross_domain_list', []) + allow_cross_domain = application_api_key.get('allow_cross_domain', False) + if allow_cross_domain: response['Access-Control-Allow-Methods'] = 'GET,POST,DELETE,PUT' response[ 'Access-Control-Allow-Headers'] = "Origin,X-Requested-With,Content-Type,Accept,Authorization,token" - if application_api_key.cross_domain_list is None or len(application_api_key.cross_domain_list) == 0: + if cross_domain_list is None or len(cross_domain_list) == 0: response['Access-Control-Allow-Origin'] = "*" - elif application_api_key.cross_domain_list.__contains__(origin): + elif cross_domain_list.__contains__(origin): response['Access-Control-Allow-Origin'] = origin return response diff --git a/apps/common/middleware/doc_headers_middleware.py b/apps/common/middleware/doc_headers_middleware.py new file mode 100644 index 00000000000..d818b842ca5 --- /dev/null +++ b/apps/common/middleware/doc_headers_middleware.py @@ -0,0 +1,62 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: static_headers_middleware.py + @date:2024/3/13 18:26 + @desc: +""" +from django.http import HttpResponse +from django.utils.deprecation import MiddlewareMixin + +content = """ + + + + + + + Document + + + + + +""" + + +class DocHeadersMiddleware(MiddlewareMixin): + def process_response(self, request, response): + if request.path.startswith('/doc/') or request.path.startswith('/doc/chat/'): + HTTP_REFERER = request.META.get('HTTP_REFERER') + if HTTP_REFERER is None: + return HttpResponse(content) + if HTTP_REFERER == request._current_scheme_host + request.path: + return response + return response diff --git a/apps/common/middleware/gzip.py b/apps/common/middleware/gzip.py new file mode 100644 index 00000000000..92c7cea3829 --- /dev/null +++ b/apps/common/middleware/gzip.py @@ -0,0 +1,84 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: gzip.py + @date:2025/2/27 10:03 + @desc: +""" +from django.utils.cache import patch_vary_headers +from django.utils.deprecation import MiddlewareMixin +from django.utils.regex_helper import _lazy_re_compile +from django.utils.text import compress_sequence, compress_string + +re_accepts_gzip = _lazy_re_compile(r"\bgzip\b") + + +class GZipMiddleware(MiddlewareMixin): + """ + Compress content if the browser allows gzip compression. + Set the Vary header accordingly, so that caches will base their storage + on the Accept-Encoding header. + """ + + max_random_bytes = 100 + + def process_response(self, request, response): + if request.method != 'GET' or request.path.startswith('/api'): + return response + # It's not worth attempting to compress really short responses. + if not response.streaming and len(response.content) < 200: + return response + + # Avoid gzipping if we've already got a content-encoding. + if response.has_header("Content-Encoding"): + return response + + patch_vary_headers(response, ("Accept-Encoding",)) + + ae = request.META.get("HTTP_ACCEPT_ENCODING", "") + if not re_accepts_gzip.search(ae): + return response + + if response.streaming: + if response.is_async: + # pull to lexical scope to capture fixed reference in case + # streaming_content is set again later. + original_iterator = response.streaming_content + + async def gzip_wrapper(): + async for chunk in original_iterator: + yield compress_string( + chunk, + max_random_bytes=self.max_random_bytes, + ) + + response.streaming_content = gzip_wrapper() + else: + response.streaming_content = compress_sequence( + response.streaming_content, + max_random_bytes=self.max_random_bytes, + ) + # Delete the `Content-Length` header for streaming content, because + # we won't know the compressed size until we stream it. + del response.headers["Content-Length"] + else: + # Return the compressed content only if it's actually shorter. + compressed_content = compress_string( + response.content, + max_random_bytes=self.max_random_bytes, + ) + if len(compressed_content) >= len(response.content): + return response + response.content = compressed_content + response.headers["Content-Length"] = str(len(response.content)) + + # If there is a strong ETag, make it weak to fulfill the requirements + # of RFC 9110 Section 8.8.1 while also allowing conditional request + # matches on ETags. + etag = response.get("ETag") + if etag and etag.startswith('"'): + response.headers["ETag"] = "W/" + etag + response.headers["Content-Encoding"] = "gzip" + + return response diff --git a/apps/common/middleware/static_headers_middleware.py b/apps/common/middleware/static_headers_middleware.py index 79b799a70a0..f5afcfb7c93 100644 --- a/apps/common/middleware/static_headers_middleware.py +++ b/apps/common/middleware/static_headers_middleware.py @@ -6,25 +6,28 @@ @date:2024/3/13 18:26 @desc: """ -from django.db.models import QuerySet from django.utils.deprecation import MiddlewareMixin -from application.models.api_key_model import ApplicationAccessToken +from common.cache_data.application_access_token_cache import get_application_access_token class StaticHeadersMiddleware(MiddlewareMixin): def process_response(self, request, response): if request.path.startswith('/ui/chat/'): access_token = request.path.replace('/ui/chat/', '') - application_access_token = QuerySet(ApplicationAccessToken).filter(access_token=access_token).first() + application_access_token = get_application_access_token(access_token, True) if application_access_token is not None: - if application_access_token.white_active: + white_active = application_access_token.get('white_active', False) + white_list = application_access_token.get('white_list', []) + application_icon = application_access_token.get('application_icon') + application_name = application_access_token.get('application_name') + if white_active: # 添加自定义的响应头 response[ - 'Content-Security-Policy'] = f'frame-ancestors {" ".join(application_access_token.white_list)}' + 'Content-Security-Policy'] = f'frame-ancestors {" ".join(white_list)}' response.content = (response.content.decode('utf-8').replace( '', - f'') - .replace('MaxKB', f'{application_access_token.application.name}').encode( + f'') + .replace('MaxKB', f'{application_name}').encode( "utf-8")) return response diff --git a/apps/common/models/db_model_manage.py b/apps/common/models/db_model_manage.py new file mode 100644 index 00000000000..80ce0f55b54 --- /dev/null +++ b/apps/common/models/db_model_manage.py @@ -0,0 +1,35 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: db_model_manage.py + @date:2024/7/22 17:00 + @desc: +""" +from importlib import import_module +from django.conf import settings + + +def new_instance_by_class_path(class_path: str): + parts = class_path.rpartition('.') + package_path = parts[0] + class_name = parts[2] + module = import_module(package_path) + HandlerClass = getattr(module, class_name) + return HandlerClass() + + +class DBModelManage: + model_dict = {} + + @staticmethod + def get_model(model_name): + return DBModelManage.model_dict.get(model_name) + + @staticmethod + def init(): + handles = [new_instance_by_class_path(class_path) for class_path in + (settings.MODEL_HANDLES if hasattr(settings, 'MODEL_HANDLES') else [])] + for h in handles: + model_dict = h.get_model_dict() + DBModelManage.model_dict = {**DBModelManage.model_dict, **model_dict} diff --git a/apps/common/models/handle/base_handle.py b/apps/common/models/handle/base_handle.py new file mode 100644 index 00000000000..17389673e2b --- /dev/null +++ b/apps/common/models/handle/base_handle.py @@ -0,0 +1,15 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: base_handle.py + @date:2024/7/22 17:02 + @desc: +""" +from abc import ABC, abstractmethod + + +class IBaseModelHandle(ABC): + @abstractmethod + def get_model_dict(self): + pass diff --git a/apps/common/models/handle/impl/default_base_model_handle.py b/apps/common/models/handle/impl/default_base_model_handle.py new file mode 100644 index 00000000000..b1ed7051a53 --- /dev/null +++ b/apps/common/models/handle/impl/default_base_model_handle.py @@ -0,0 +1,14 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: default_base_model_handle.py + @date:2024/7/22 17:06 + @desc: +""" +from common.models.handle.base_handle import IBaseModelHandle + + +class DefaultBaseModelHandle(IBaseModelHandle): + def get_model_dict(self): + return {} diff --git a/apps/common/response/result.py b/apps/common/response/result.py index d1cf6a3ad5b..d2cc37e7755 100644 --- a/apps/common/response/result.py +++ b/apps/common/response/result.py @@ -3,6 +3,7 @@ from django.http import JsonResponse from drf_yasg import openapi from rest_framework import status +from django.utils.translation import gettext_lazy as _ class Page(dict): @@ -15,11 +16,12 @@ def __init__(self, total: int, records: List, current_page: int, page_size: int, class Result(JsonResponse): + charset = 'utf-8' """ 接口统一返回对象 """ - def __init__(self, code=200, message="成功", data=None, response_status=status.HTTP_200_OK, **kwargs): + def __init__(self, code=200, message=_('Success'), data=None, response_status=status.HTTP_200_OK, **kwargs): back_info_dict = {"code": code, "message": message, 'data': data} super().__init__(data=back_info_dict, status=response_status, **kwargs) @@ -31,13 +33,13 @@ def get_page_request_params(other_request_params=None): in_=openapi.IN_PATH, type=openapi.TYPE_INTEGER, required=True, - description='当前页') + description=_('current page')) page_size = openapi.Parameter(name='page_size', in_=openapi.IN_PATH, type=openapi.TYPE_INTEGER, required=True, - description='每页大小') + description=_('page size')) result = [current_page, page_size] for other_request_param in other_request_params: result.append(other_request_param) @@ -48,41 +50,41 @@ def get_page_api_response(response_data_schema: openapi.Schema): """ 获取统一返回 响应Api """ - return openapi.Responses(responses={200: openapi.Response(description="响应参数", + return openapi.Responses(responses={200: openapi.Response(description=_('response parameters'), schema=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ 'code': openapi.Schema( type=openapi.TYPE_INTEGER, - title="响应码", + title=_('response code'), default=200, - description="成功:200 失败:其他"), + description=_('success:200 fail:other')), "message": openapi.Schema( type=openapi.TYPE_STRING, - title="提示", - default='成功', - description="错误提示"), + title=_('prompt'), + default=_('success'), + description=_('error prompt')), "data": openapi.Schema( type=openapi.TYPE_OBJECT, properties={ 'total': openapi.Schema( type=openapi.TYPE_INTEGER, - title="总条数", + title=_('total number of data'), default=1, - description="数据总条数"), + description=_('total number of data')), "records": openapi.Schema( type=openapi.TYPE_ARRAY, items=response_data_schema), "current": openapi.Schema( type=openapi.TYPE_INTEGER, - title="当前页", + title=_('current page'), default=1, - description="当前页"), + description=_('current page')), "size": openapi.Schema( type=openapi.TYPE_INTEGER, - title="每页大小", + title=_('page size'), default=10, - description="每页大小") + description=_('page size')) } ) @@ -96,20 +98,20 @@ def get_api_response(response_data_schema: openapi.Schema): """ 获取统一返回 响应Api """ - return openapi.Responses(responses={200: openapi.Response(description="响应参数", + return openapi.Responses(responses={200: openapi.Response(description=_('response parameters'), schema=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ 'code': openapi.Schema( type=openapi.TYPE_INTEGER, - title="响应码", + title=_('response code'), default=200, - description="成功:200 失败:其他"), + description=_('success:200 fail:other')), "message": openapi.Schema( type=openapi.TYPE_STRING, - title="提示", - default='成功', - description="错误提示"), + title=_('prompt'), + default=_('success'), + description=_('error prompt')), "data": response_data_schema } @@ -125,20 +127,20 @@ def get_api_array_response(response_data_schema: openapi.Schema): """ 获取统一返回 响应Api """ - return openapi.Responses(responses={200: openapi.Response(description="响应参数", + return openapi.Responses(responses={200: openapi.Response(description=_('response parameters'), schema=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ 'code': openapi.Schema( type=openapi.TYPE_INTEGER, - title="响应码", + title=_('response code'), default=200, - description="成功:200 失败:其他"), + description=_('success:200 fail:other')), "message": openapi.Schema( type=openapi.TYPE_STRING, - title="提示", - default='成功', - description="错误提示"), + title=_('prompt'), + default=_('success'), + description=_('error prompt')), "data": openapi.Schema(type=openapi.TYPE_ARRAY, items=response_data_schema) @@ -156,10 +158,10 @@ def success(data, **kwargs): return Result(data=data, **kwargs) -def error(message): +def error(message, **kwargs): """ 获取一个失败的响应对象 :param message: 错误提示 :return: 接口响应对象 """ - return Result(code=500, message=message) + return Result(code=500, message=message, **kwargs) diff --git a/apps/common/sql/list_embedding_text.sql b/apps/common/sql/list_embedding_text.sql index 74f3b224b42..ac0dc7b311e 100644 --- a/apps/common/sql/list_embedding_text.sql +++ b/apps/common/sql/list_embedding_text.sql @@ -19,9 +19,7 @@ SELECT paragraph."id" AS paragraph_id, paragraph.dataset_id AS dataset_id, 1 AS source_type, - concat_ws(' -',concat_ws(' -',paragraph.title,paragraph."content"),paragraph.title) AS "text", + concat_ws(E'\n',paragraph.title,paragraph."content") AS "text", paragraph.is_active AS is_active FROM paragraph paragraph diff --git a/apps/common/swagger_api/common_api.py b/apps/common/swagger_api/common_api.py index c3d8be6ca6f..3134db0d083 100644 --- a/apps/common/swagger_api/common_api.py +++ b/apps/common/swagger_api/common_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class CommonApi: @@ -16,31 +17,31 @@ class HitTestApi(ApiMixin): @staticmethod def get_request_params_api(): return [ - openapi.Parameter(name='query_text', - in_=openapi.IN_QUERY, - type=openapi.TYPE_STRING, - required=True, - description='问题文本'), - openapi.Parameter(name='top_number', - in_=openapi.IN_QUERY, - type=openapi.TYPE_NUMBER, - default=10, - required=True, - description='topN'), - openapi.Parameter(name='similarity', - in_=openapi.IN_QUERY, - type=openapi.TYPE_NUMBER, - default=0.6, - required=True, - description='相关性'), - openapi.Parameter(name='search_mode', - in_=openapi.IN_QUERY, - type=openapi.TYPE_STRING, - default="embedding", - required=True, - description='检索模式embedding|keywords|blend' - ) - ] + openapi.Parameter(name='query_text', + in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=True, + description=_('query text')), + openapi.Parameter(name='top_number', + in_=openapi.IN_QUERY, + type=openapi.TYPE_NUMBER, + default=10, + required=True, + description='topN'), + openapi.Parameter(name='similarity', + in_=openapi.IN_QUERY, + type=openapi.TYPE_NUMBER, + default=0.6, + required=True, + description=_('similarity')), + openapi.Parameter(name='search_mode', + in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + default="embedding", + required=True, + description=_('Retrieval pattern embedding|keywords|blend') + ) + ] @staticmethod def get_response_body_api(): @@ -53,31 +54,32 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容", - description="段落内容", default='段落内容'), - 'title': openapi.Schema(type=openapi.TYPE_STRING, title="标题", - description="标题", default="xxx的描述"), - 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量", + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('Paragraph content'), + description=_('Paragraph content'), default=_('Paragraph content')), + 'title': openapi.Schema(type=openapi.TYPE_STRING, title=_('title'), + description=_('title'), default=_('Description of xxx')), + 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of hits'), + description=_('Number of hits'), default=1), - 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点赞数量", - description="点赞数量", default=1), - 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点踩数量", - description="点踩数", default=1), - 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id", - description="知识库id", default='xxx'), - 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id", - description="文档id", default='xxx'), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", - description="是否可用", default=True), - 'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title="相关性得分", - description="相关性得分", default=True), - 'comprehensive_score': openapi.Schema(type=openapi.TYPE_NUMBER, title="综合得分,用于排序", - description="综合得分,用于排序", default=True), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of likes'), + description=_('Number of likes'), default=1), + 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of clicks and dislikes'), + description=_('Number of clicks and dislikes'), default=1), + 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'), + description=_('dataset id'), default='xxx'), + 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('document id'), + description=_('document id'), default='xxx'), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active'), default=True), + 'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('relevance score'), + description=_('relevance score'), default=True), + 'comprehensive_score': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('Comprehensive score, used for ranking'), + description=_('Comprehensive score, used for ranking'), default=True), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('Update time'), + description=_('Update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('Create time'), + description=_('Create time'), default="1970-01-01 00:00:00" ), diff --git a/apps/common/task/__init__.py b/apps/common/task/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/common/template/email_template_en.html b/apps/common/template/email_template_en.html new file mode 100644 index 00000000000..5d5515c449b --- /dev/null +++ b/apps/common/template/email_template_en.html @@ -0,0 +1,122 @@ + + + + + + + + + + +
+ + + + + + + + + +
+
+ Intelligent Knowledge Q&A System +
+
+
+

+ + + + Dear user: + +

+ +

+ ${code}  This is your dynamic verification code. Please fill it in within 30 minutes. To protect the security of your account, please do not provide this verification code to anyone. +

+
+ +
+
+

Intelligent knowledge base project team

+
+

+ Please do not reply to this system email
+

+ +
+
+
+
+
+ + diff --git a/apps/common/template/email_template.html b/apps/common/template/email_template_zh.html similarity index 92% rename from apps/common/template/email_template.html rename to apps/common/template/email_template_zh.html index 4b4f913ef9c..29c1fc65dad 100644 --- a/apps/common/template/email_template.html +++ b/apps/common/template/email_template_zh.html @@ -22,7 +22,6 @@ border-bottom-right-radius: 5px; border-bottom-left-radius: 5px; font-size: 14px; - font-family: 微软雅黑, 黑体; line-height: 1.5; box-shadow: rgb(153, 153, 153) 0px 0px 5px; border-collapse: collapse; @@ -56,17 +55,16 @@
- MaxKB 智能知识库 + 智能知识库问答系统
@@ -105,11 +103,10 @@

text-align: right; " > -

飞致云 - MaxKB 项目组

+

智能知识库项目组


此为系统邮件,请勿回复
- Please do not reply to this system email

diff --git a/apps/common/template/email_template_zh_Hant.html b/apps/common/template/email_template_zh_Hant.html new file mode 100644 index 00000000000..d54d4023138 --- /dev/null +++ b/apps/common/template/email_template_zh_Hant.html @@ -0,0 +1,123 @@ + + + + + + + + + + +
+ + + + + + + + + +
+
+ + 智慧知識庫問答系統 +
+
+
+

+ + + + 尊敬的用戶: + +

+ +

+ ${code}  為您的動態驗證碼,請於30分鐘內填寫,為保障帳戶安全,請勿向任何人提供此驗證碼。 +

+
+ +
+
+

智慧知識庫專案組

+
+

+ 此為系統郵件,請勿回覆
+

+ +
+
+
+
+
+ + diff --git a/apps/common/util/cache_util.py b/apps/common/util/cache_util.py new file mode 100644 index 00000000000..3d97a47cd14 --- /dev/null +++ b/apps/common/util/cache_util.py @@ -0,0 +1,68 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: cache_util.py + @date:2024/7/24 19:23 + @desc: +""" +from django.core.cache import caches + +cache = caches['default_file'] + + +def get_data_by_default_cache(key: str, get_data, cache_instance=cache, version=None, kwargs=None): + """ + 获取数据, 先从缓存中获取,如果获取不到再调用get_data 获取数据 + @param kwargs: get_data所需参数 + @param key: key + @param get_data: 获取数据函数 + @param cache_instance: cache实例 + @param version: 版本用于隔离 + @return: + """ + if kwargs is None: + kwargs = {} + if cache_instance.has_key(key, version=version): + return cache_instance.get(key, version=version) + data = get_data(**kwargs) + cache_instance.add(key, data, version=version) + return data + + +def set_data_by_default_cache(key: str, get_data, cache_instance=cache, version=None): + data = get_data() + cache_instance.set(key, data, version=version) + return data + + +def get_cache(cache_key, use_get_data: any = True, cache_instance=cache, version=None): + def inner(get_data): + def run(*args, **kwargs): + key = cache_key(*args, **kwargs) if callable(cache_key) else cache_key + is_use_get_data = use_get_data(*args, **kwargs) if callable(use_get_data) else use_get_data + if is_use_get_data: + if cache_instance.has_key(key, version=version): + return cache_instance.get(key, version=version) + data = get_data(*args, **kwargs) + cache_instance.add(key, data, timeout=None, version=version) + return data + data = get_data(*args, **kwargs) + cache_instance.set(key, data, timeout=None, version=version) + return data + + return run + + return inner + + +def del_cache(cache_key, cache_instance=cache, version=None): + def inner(func): + def run(*args, **kwargs): + key = cache_key(*args, **kwargs) if callable(cache_key) else cache_key + func(*args, **kwargs) + cache_instance.delete(key, version=version) + + return run + + return inner diff --git a/apps/common/util/common.py b/apps/common/util/common.py index 52d90ec8548..8583a1c989f 100644 --- a/apps/common/util/common.py +++ b/apps/common/util/common.py @@ -6,10 +6,70 @@ @date:2023/10/16 16:42 @desc: """ +import hashlib import importlib +import io +import mimetypes +import pickle +import random +import re +import shutil from functools import reduce from typing import Dict, List +from django.core.files.uploadedfile import InMemoryUploadedFile +from django.db.models import QuerySet +from django.utils.translation import gettext as _ +from pydub import AudioSegment + +from ..exception.app_exception import AppApiException +from ..models.db_model_manage import DBModelManage + +safe_builtins = { + 'MKInstance' +} + +ALLOWED_CLASSES = { + ("builtins", "dict"), + ('uuid', 'UUID'), + ("application.serializers.application_serializers", "MKInstance"), + ("function_lib.serializers.function_lib_serializer", "FlibInstance") +} + + +class RestrictedUnpickler(pickle.Unpickler): + + def find_class(self, module, name): + if (module, name) in ALLOWED_CLASSES: + return super().find_class(module, name) + raise pickle.UnpicklingError("global '%s.%s' is forbidden" % + (module, name)) + + +def restricted_loads(s): + """Helper function analogous to pickle.loads().""" + return RestrictedUnpickler(io.BytesIO(s)).load() + + +def encryption(message: str): + """ + 加密敏感字段数据 加密方式是 如果密码是 1234567890 那么给前端则是 123******890 + :param message: + :return: + """ + max_pre_len = 8 + max_post_len = 4 + message_len = len(message) + pre_len = int(message_len / 5 * 2) + post_len = int(message_len / 5 * 1) + pre_str = "".join([message[index] for index in + range(0, max_pre_len if pre_len > max_pre_len else 1 if pre_len <= 0 else int(pre_len))]) + end_str = "".join( + [message[index] for index in + range(message_len - (int(post_len) if pre_len < max_post_len else max_post_len), message_len)]) + content = "***************" + return pre_str + content + end_str + def sub_array(array: List, item_num=10): result = [] @@ -45,6 +105,30 @@ def get_exec_method(clazz_: str, method_: str): return getattr(getattr(package_model, clazz_name), method_) +def flat_map(array: List[List]): + """ + 将二位数组转为一维数组 + :param array: 二维数组 + :return: 一维数组 + """ + result = [] + for e in array: + result += e + return result + + +def password_encrypt(raw_password): + """ + 密码 md5加密 + :param raw_password: 密码 + :return: 加密后密码 + """ + md5 = hashlib.md5() # 2,实例化md5() 方法 + md5.update(raw_password.encode()) # 3,对字符串的字节类型加密 + result = md5.hexdigest() # 4,加密 + return result + + def post(post_function): def inner(func): def run(*args, **kwargs): @@ -54,3 +138,174 @@ def run(*args, **kwargs): return run return inner + + +def valid_license(model=None, count=None, message=None): + def inner(func): + def run(*args, **kwargs): + xpack_cache = DBModelManage.get_model('xpack_cache') + is_license_valid = xpack_cache.get('XPACK_LICENSE_IS_VALID', False) if xpack_cache is not None else False + record_count = QuerySet(model).count() + + if not is_license_valid and record_count >= count: + error_message = message or f'超出限制{count}, 请联系我们(https://fit2cloud.com/)。' + raise AppApiException(400, error_message) + + return func(*args, **kwargs) + + return run + + return inner + + +def parse_image(content: str): + matches = re.finditer("!\[.*?\]\(\/api\/(image|file)\/.*?\)", content) + image_list = [match.group() for match in matches] + return image_list + + +def parse_md_image(content: str): + matches = re.finditer("!\[.*?\]\(.*?\)", content) + image_list = [match.group() for match in matches] + return image_list + + +def bulk_create_in_batches(model, data, batch_size=1000): + if len(data) == 0: + return + for i in range(0, len(data), batch_size): + batch = data[i:i + batch_size] + model.objects.bulk_create(batch) + + +def bytes_to_uploaded_file(file_bytes, file_name="file.txt"): + content_type, _ = mimetypes.guess_type(file_name) + if content_type is None: + # 如果未能识别,设置为默认的二进制文件类型 + content_type = "application/octet-stream" + # 创建一个内存中的字节流对象 + file_stream = io.BytesIO(file_bytes) + + # 获取文件大小 + file_size = len(file_bytes) + + # 创建 InMemoryUploadedFile 对象 + uploaded_file = InMemoryUploadedFile( + file=file_stream, + field_name=None, + name=file_name, + content_type=content_type, + size=file_size, + charset=None, + ) + return uploaded_file + + +def any_to_amr(any_path, amr_path): + """ + 把任意格式转成amr文件 + """ + if any_path.endswith(".amr"): + shutil.copy2(any_path, amr_path) + return + if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): + raise NotImplementedError("Not support file type: {}".format(any_path)) + audio = AudioSegment.from_file(any_path) + audio = audio.set_frame_rate(8000) # only support 8000 + audio.export(amr_path, format="amr") + return audio.duration_seconds * 1000 + + +def any_to_mp3(any_path, mp3_path): + """ + 把任意格式转成mp3文件 + """ + if any_path.endswith(".mp3"): + shutil.copy2(any_path, mp3_path) + return + if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): + sil_to_wav(any_path, any_path) + any_path = mp3_path + audio = AudioSegment.from_file(any_path) + audio = audio.set_frame_rate(16000) + audio.export(mp3_path, format="mp3") + + +def sil_to_wav(silk_path, wav_path, rate: int = 24000): + """ + silk 文件转 wav + """ + try: + import pysilk + except ImportError: + raise AppApiException("import pysilk failed, wechaty voice message will not be supported.") + wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate) + with open(wav_path, "wb") as f: + f.write(wav_data) + + +def split_and_transcribe(file_path, model, max_segment_length_ms=59000, audio_format="mp3"): + audio_data = AudioSegment.from_file(file_path, format=audio_format) + audio_length_ms = len(audio_data) + + if audio_length_ms <= max_segment_length_ms: + return model.speech_to_text(io.BytesIO(audio_data.export(format=audio_format).read())) + + full_text = [] + for start_ms in range(0, audio_length_ms, max_segment_length_ms): + end_ms = min(audio_length_ms, start_ms + max_segment_length_ms) + segment = audio_data[start_ms:end_ms] + text = model.speech_to_text(io.BytesIO(segment.export(format=audio_format).read())) + if isinstance(text, str): + full_text.append(text) + return ' '.join(full_text) + + +def _remove_empty_lines(text): + if not isinstance(text, str): + raise AppApiException(500, _('Text-to-speech node, the text content must be of string type')) + if not text: + raise AppApiException(500, _('Text-to-speech node, the text content cannot be empty')) + result = '\n'.join(line for line in text.split('\n') if line.strip()) + return markdown_to_plain_text(result) + + +def markdown_to_plain_text(md: str) -> str: + # 移除图片 ![alt](url) + text = re.sub(r'!\[.*?\]\(.*?\)', '', md) + # 移除链接 [text](url) + text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) + # 移除 Markdown 标题符号 (#, ##, ###) + text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE) + # 移除加粗 **text** 或 __text__ + text = re.sub(r'\*\*(.*?)\*\*', r'\1', text) + text = re.sub(r'__(.*?)__', r'\1', text) + # 移除斜体 *text* 或 _text_ + text = re.sub(r'\*(.*?)\*', r'\1', text) + text = re.sub(r'_(.*?)_', r'\1', text) + # 移除行内代码 `code` + text = re.sub(r'`(.*?)`', r'\1', text) + # 移除代码块 ```code``` + text = re.sub(r'```[\s\S]*?```', '', text) + # 移除多余的换行符 + text = re.sub(r'\n{2,}', '\n', text) + # 使用正则表达式去除所有 HTML 标签 + text = re.sub(r'<[^>]+>', '', text) + # 去除多余的空白字符(包括换行符、制表符等) + text = re.sub(r'\s+', ' ', text) + # 去除表单渲染 + re.sub(r'[\s\S]*?<\/form_rander>', '', text) + # 去除首尾空格 + text = text.strip() + return text + + +SAFE_CHAR_SET = ( + [chr(i) for i in range(65, 91) if chr(i) not in {'I', 'O'}] + # 大写字母 A-H, J-N, P-Z + [chr(i) for i in range(97, 123) if chr(i) not in {'i', 'l', 'o'}] + # 小写字母 a-h, j-n, p-z + [str(i) for i in range(10) if str(i) not in {'0', '1', '7'}] # 数字 2-6, 8-9 +) + + +def get_random_chars(number=4): + return ''.join(random.choices(SAFE_CHAR_SET, k=number)) diff --git a/apps/common/util/field_message.py b/apps/common/util/field_message.py index 93b51b92097..409100b485b 100644 --- a/apps/common/util/field_message.py +++ b/apps/common/util/field_message.py @@ -6,101 +6,67 @@ @date:2024/3/1 14:30 @desc: """ -from django.utils.translation import gettext_lazy +from django.utils.functional import lazy +from rest_framework import serializers + + +def value_(field, value): + return f"【{field}】 {value}" + + +def reset_messages(field, messages): + return {key: lazy(value_, str)(field, messages.get(key)) for key in messages} + + +def reset_message_by_field(field_text, field): + return reset_messages(field_text, {**field.default_error_messages, **field.__bases__[0].default_error_messages}) class ErrMessage: @staticmethod def char(field: str): - return { - 'invalid': gettext_lazy("【%s】不是有效的字符串。" % field), - 'blank': gettext_lazy("【%s】此字段不能为空字符串。" % field), - 'max_length': gettext_lazy("【%s】请确保此字段的字符数不超过 {max_length} 个。" % field), - 'min_length': gettext_lazy("【%s】请确保此字段至少包含 {min_length} 个字符。" % field), - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field) - } + return reset_message_by_field(field, serializers.CharField) @staticmethod def uuid(field: str): - return {'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - 'invalid': gettext_lazy("【%s】必须是有效的UUID。" % field), - } + return reset_messages(field, serializers.UUIDField.default_error_messages) @staticmethod def integer(field: str): - return {'invalid': gettext_lazy('【%s】必须是有效的integer。' % field), - 'max_value': gettext_lazy('【%s】请确保此值小于或等于 {max_value} 。' % field), - 'min_value': gettext_lazy('【%s】请确保此值大于或等于 {min_value} 。' % field), - 'max_string_length': gettext_lazy('【%s】字符串值太大。') % field, - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - } + return reset_messages(field, serializers.IntegerField.default_error_messages) @staticmethod def list(field: str): - return {'not_a_list': gettext_lazy('【%s】应为列表,但得到的类型为 "{input_type}".' % field), - 'empty': gettext_lazy('【%s】此列表不能为空。' % field), - 'min_length': gettext_lazy('【%s】请确保此字段至少包含 {min_length} 个元素。' % field), - 'max_length': gettext_lazy('【%s】请确保此字段的元素不超过 {max_length} 个。' % field), - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - } + return reset_messages(field, serializers.ListField.default_error_messages) @staticmethod def boolean(field: str): - return {'invalid': gettext_lazy('【%s】必须是有效的布尔值。' % field), - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field)} + return reset_messages(field, serializers.BooleanField.default_error_messages) @staticmethod def dict(field: str): - return {'not_a_dict': gettext_lazy('【%s】应为字典,但得到的类型为 "{input_type}' % field), - 'empty': gettext_lazy('【%s】能是空的。' % field), - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - } + return reset_messages(field, serializers.DictField.default_error_messages) @staticmethod def float(field: str): - return {'invalid': gettext_lazy('【%s】需要一个有效的数字。' % field), - 'max_value': gettext_lazy('【%s】请确保此值小于或等于 {max_value}。' % field), - 'min_value': gettext_lazy('【%s】请确保此值大于或等于 {min_value}。' % field), - 'max_string_length': gettext_lazy('【%s】字符串值太大。' % field), - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - } + return reset_messages(field, serializers.FloatField.default_error_messages) @staticmethod def json(field: str): - return { - 'invalid': gettext_lazy('【%s】值必须是有效的JSON。' % field), - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - } + return reset_messages(field, serializers.JSONField.default_error_messages) @staticmethod def base(field: str): - return { - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - } + return reset_messages(field, serializers.Field.default_error_messages) @staticmethod def date(field: str): - return { - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - 'invalid': gettext_lazy('【%s】日期格式错误。请改用以下格式之一: {format}。'), - 'datetime': gettext_lazy('【%s】应为日期,但得到的是日期时间。') - } + return reset_messages(field, serializers.DateField.default_error_messages) @staticmethod def image(field: str): - return { - 'required': gettext_lazy('【%s】此字段必填。' % field), - 'null': gettext_lazy('【%s】此字段不能为null。' % field), - 'invalid_image': gettext_lazy('【%s】上载有效的图像。您上载的文件不是图像或图像已损坏。' % field), - 'max_length': gettext_lazy('请确保此文件名最多包含 {max_length} 个字符(长度为 {length})。') - } + return reset_messages(field, serializers.ImageField.default_error_messages) + + @staticmethod + def file(field: str): + return reset_messages(field, serializers.FileField.default_error_messages) diff --git a/apps/common/util/fork.py b/apps/common/util/fork.py index ee30f696e82..4405b9b76e4 100644 --- a/apps/common/util/fork.py +++ b/apps/common/util/fork.py @@ -142,7 +142,10 @@ def get_beautiful_soup(response): if len(charset_list) > 0: charset = charset_list[0] if charset != encoding: - html_content = response.content.decode(charset) + try: + html_content = response.content.decode(charset) + except Exception as e: + logging.getLogger("max_kb").error(f'{e}') return BeautifulSoup(html_content, "html.parser") return beautiful_soup diff --git a/apps/common/util/function_code.py b/apps/common/util/function_code.py new file mode 100644 index 00000000000..3a877a62367 --- /dev/null +++ b/apps/common/util/function_code.py @@ -0,0 +1,99 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: function_code.py + @date:2024/8/7 16:11 + @desc: +""" +import os +import pickle +import subprocess +import sys +import uuid +from textwrap import dedent + +from smartdoc.const import BASE_DIR +from smartdoc.const import PROJECT_DIR + +python_directory = sys.executable + + +class FunctionExecutor: + def __init__(self, sandbox=False): + self.sandbox = sandbox + if sandbox: + self.sandbox_path = '/opt/maxkb/app/sandbox' + self.user = 'sandbox' + else: + self.sandbox_path = os.path.join(PROJECT_DIR, 'data', 'sandbox') + self.user = None + self._createdir() + if self.sandbox: + os.system(f"chown -R {self.user}:root {self.sandbox_path}") + + def _createdir(self): + old_mask = os.umask(0o077) + try: + os.makedirs(self.sandbox_path, 0o700, exist_ok=True) + os.makedirs(os.path.join(self.sandbox_path, 'execute'), 0o700, exist_ok=True) + os.makedirs(os.path.join(self.sandbox_path, 'result'), 0o700, exist_ok=True) + finally: + os.umask(old_mask) + + def exec_code(self, code_str, keywords): + _id = str(uuid.uuid1()) + success = '{"code":200,"msg":"成功","data":exec_result}' + err = '{"code":500,"msg":str(e),"data":None}' + result_path = f'{self.sandbox_path}/result/{_id}.result' + _exec_code = f""" +try: + import os + import pickle + env = dict(os.environ) + for key in list(env.keys()): + if key in os.environ and (key.startswith('MAXKB') or key.startswith('POSTGRES') or key.startswith('PG')): + del os.environ[key] + locals_v={'{}'} + keywords={keywords} + globals_v=globals() + exec({dedent(code_str)!a}, globals_v, locals_v) + f_name, f = locals_v.popitem() + for local in locals_v: + globals_v[local] = locals_v[local] + exec_result=f(**keywords) + with open({result_path!a}, 'wb') as file: + file.write(pickle.dumps({success})) +except Exception as e: + with open({result_path!a}, 'wb') as file: + file.write(pickle.dumps({err})) +""" + if self.sandbox: + subprocess_result = self._exec_sandbox(_exec_code, _id) + else: + subprocess_result = self._exec(_exec_code) + if subprocess_result.returncode == 1: + raise Exception(subprocess_result.stderr) + with open(result_path, 'rb') as file: + result = pickle.loads(file.read()) + os.remove(result_path) + if result.get('code') == 200: + return result.get('data') + raise Exception(result.get('msg')) + + def _exec_sandbox(self, _code, _id): + exec_python_file = f'{self.sandbox_path}/execute/{_id}.py' + with open(exec_python_file, 'w') as file: + file.write(_code) + os.system(f"chown {self.user}:root {exec_python_file}") + kwargs = {'cwd': BASE_DIR} + subprocess_result = subprocess.run( + ['su', '-s', python_directory, '-c', "exec(open('" + exec_python_file + "').read())", self.user], + text=True, + capture_output=True, **kwargs) + os.remove(exec_python_file) + return subprocess_result + + @staticmethod + def _exec(_code): + return subprocess.run([python_directory, '-c', _code], text=True, capture_output=True) diff --git a/apps/common/util/page_utils.py b/apps/common/util/page_utils.py new file mode 100644 index 00000000000..61c52920d9a --- /dev/null +++ b/apps/common/util/page_utils.py @@ -0,0 +1,47 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: page_utils.py + @date:2024/11/21 10:32 + @desc: +""" +from math import ceil + + +def page(query_set, page_size, handler, is_the_task_interrupted=lambda: False): + """ + + @param query_set: 查询query_set + @param page_size: 每次查询大小 + @param handler: 数据处理器 + @param is_the_task_interrupted: 任务是否被中断 + @return: + """ + query = query_set.order_by("id") + count = query_set.count() + for i in range(0, ceil(count / page_size)): + if is_the_task_interrupted(): + return + offset = i * page_size + paragraph_list = query.all()[offset: offset + page_size] + handler(paragraph_list) + + +def page_desc(query_set, page_size, handler, is_the_task_interrupted=lambda: False): + """ + + @param query_set: 查询query_set + @param page_size: 每次查询大小 + @param handler: 数据处理器 + @param is_the_task_interrupted: 任务是否被中断 + @return: + """ + query = query_set.order_by("id") + count = query_set.count() + for i in sorted(range(0, ceil(count / page_size)), reverse=True): + if is_the_task_interrupted(): + return + offset = i * page_size + paragraph_list = query.all()[offset: offset + page_size] + handler(paragraph_list) diff --git a/apps/common/util/rsa_util.py b/apps/common/util/rsa_util.py index 00301867208..452ca678d9e 100644 --- a/apps/common/util/rsa_util.py +++ b/apps/common/util/rsa_util.py @@ -40,15 +40,12 @@ def generate(): def get_key_pair(): rsa_value = rsa_cache.get(cache_key) if rsa_value is None: - lock.acquire() - rsa_value = rsa_cache.get(cache_key) - if rsa_value is not None: - return rsa_value - try: + with lock: + rsa_value = rsa_cache.get(cache_key) + if rsa_value is not None: + return rsa_value rsa_value = get_key_pair_by_sql() rsa_cache.set(cache_key, rsa_value) - finally: - lock.release() return rsa_value diff --git a/apps/common/util/split_model.py b/apps/common/util/split_model.py index 19b265fc6ea..81b253531c9 100644 --- a/apps/common/util/split_model.py +++ b/apps/common/util/split_model.py @@ -19,6 +19,7 @@ def get_level_block(text, level_content_list, level_content_index, cursor): :param text: 文本 :param level_content_list: 拆分的title数组 :param level_content_index: 指定的下标 + :param cursor: 开始的下标位置 :return: 拆分后的文本数据 """ start_content: str = level_content_list[level_content_index].get('content') @@ -26,7 +27,7 @@ def get_level_block(text, level_content_list, level_content_index, cursor): level_content_list) else None start_index = text.index(start_content, cursor) end_index = text.index(next_content, start_index + 1) if next_content is not None else len(text) - return text[start_index:end_index].lstrip(level_content_list[level_content_index]['content']), end_index + return text[start_index + len(start_content):end_index], end_index def to_tree_obj(content, state='title'): @@ -148,10 +149,10 @@ def to_block_paragraph(tree_data_list: List[dict]): def parse_title_level(text, content_level_pattern: List, index): - if len(content_level_pattern) == index: + if index >= len(content_level_pattern): return [] result = parse_level(text, content_level_pattern[index]) - if len(result) == 0 and len(content_level_pattern) > index + 1: + if len(result) == 0 and len(content_level_pattern) > index: return parse_title_level(text, content_level_pattern, index + 1) return result @@ -163,7 +164,7 @@ def parse_level(text, pattern: str): :param pattern: 正则 :return: 符合正则的文本 """ - level_content_list = list(map(to_tree_obj, re_findall(pattern, text))) + level_content_list = list(map(to_tree_obj, [r[0:255] for r in re_findall(pattern, text) if r is not None])) return list(map(filter_special_symbol, level_content_list)) @@ -213,51 +214,57 @@ def group_by(list_source: List, key): return result -def result_tree_to_paragraph(result_tree: List[dict], result, parent_chain): +def result_tree_to_paragraph(result_tree: List[dict], result, parent_chain, with_filter: bool): """ 转换为分段对象 :param result_tree: 解析文本的树 :param result: 传[] 用于递归 :param parent_chain: 传[] 用户递归存储数据 + :param with_filter: 是否过滤block :return: List[{'problem':'xx','content':'xx'}] """ for item in result_tree: if item.get('state') == 'block': - result.append({'title': " ".join(parent_chain), 'content': item.get("content")}) + result.append({'title': " ".join(parent_chain), + 'content': filter_special_char(item.get("content")) if with_filter else item.get("content")}) children = item.get("children") if children is not None and len(children) > 0: - result_tree_to_paragraph(children, result, [*parent_chain, item.get('content')]) + result_tree_to_paragraph(children, result, + [*parent_chain, remove_special_symbol(item.get('content'))], with_filter) return result -def post_handler_paragraph(content: str, limit: int, with_filter: bool): +def post_handler_paragraph(content: str, limit: int): + """ + 根据文本的最大字符分段 + :param content: 需要分段的文本字段 + :param limit: 最大分段字符 + :return: 分段后数据 """ - 根据文本的最大字符分段 - :param with_filter: 是否过滤特殊字符 - :param content: 需要分段的文本字段 - :param limit: 最大分段字符 - :return: 分段后数据 - """ - split_list = content.split('\n') result = [] - temp_char = '' - for split in split_list: + temp_char, start = '', 0 + while (pos := content.find("\n", start)) != -1: + split, start = content[start:pos + 1], pos + 1 if len(temp_char + split) > limit: + if len(temp_char) > 4096: + pass result.append(temp_char) temp_char = '' - temp_char = temp_char + split + '\n' + temp_char = temp_char + split + temp_char = temp_char + content[start:] if len(temp_char) > 0: + if len(temp_char) > 4096: + pass result.append(temp_char) + pattern = "[\\S\\s]{1," + str(limit) + '}' # 如果\n 单段超过限制,则继续拆分 - s = list(map(lambda row: filter_special_char(row) if with_filter else row, list( - reduce(lambda x, y: [*x, *y], list(map(lambda row: list(re.findall(pattern, row)), result)), [])))) - return s + return reduce(lambda x, y: [*x, *y], map(lambda row: re.findall(pattern, row), result), []) replace_map = { re.compile('\n+'): '\n', - re.compile('\\s+'): ' ', + re.compile(' +'): ' ', re.compile('#+'): "", re.compile("\t+"): '' } @@ -277,11 +284,11 @@ def filter_special_char(content: str): class SplitModel: - def __init__(self, content_level_pattern, with_filter=True, limit=4096): + def __init__(self, content_level_pattern, with_filter=True, limit=100000): self.content_level_pattern = content_level_pattern self.with_filter = with_filter - if limit is None or limit > 4096: - limit = 4096 + if limit is None or limit > 100000: + limit = 100000 if limit < 50: limit = 50 self.limit = limit @@ -293,40 +300,29 @@ def parse_to_tree(self, text: str, index=0): :param index: 从那个正则开始解析 :return: 解析后的树形结果数据 """ - if len(self.content_level_pattern) == index: - return - level_content_list = parse_title_level(text, self.content_level_pattern, 0) + level_content_list = parse_title_level(text, self.content_level_pattern, index) + if len(level_content_list) == 0: + return [to_tree_obj(row, 'block') for row in post_handler_paragraph(text, limit=self.limit)] + if index == 0 and text.lstrip().index(level_content_list[0]["content"].lstrip()) != 0: + level_content_list.insert(0, to_tree_obj("")) + cursor = 0 - for i in range(len(level_content_list)): - block, cursor = get_level_block(text, level_content_list, i, cursor) - children = self.parse_to_tree(text=block, - index=index + 1) - if children is not None and len(children) > 0: - level_content_list[i]['children'] = children - else: - if len(block) > 0: - level_content_list[i]['children'] = list( - map(lambda row: to_tree_obj(row, 'block'), - post_handler_paragraph(block, with_filter=self.with_filter, limit=self.limit))) - if len(level_content_list) > 0: - end_index = text.index(level_content_list[0].get('content')) - if end_index == 0: - return level_content_list - other_content = text[0:end_index] - children = self.parse_to_tree(text=other_content, - index=index) - if len(children) > 0: - level_content_list = [*level_content_list, *children] - else: - if len(other_content.strip()) > 0: - level_content_list = [*level_content_list, *list( - map(lambda row: to_tree_obj(row, 'block'), - post_handler_paragraph(other_content, with_filter=self.with_filter, limit=self.limit)))] - else: - if len(text.strip()) > 0: - level_content_list = [*level_content_list, *list( - map(lambda row: to_tree_obj(row, 'block'), - post_handler_paragraph(text, with_filter=self.with_filter, limit=self.limit)))] + level_title_content_list = [item for item in level_content_list if item.get('state') == 'title'] + for i in range(len(level_title_content_list)): + start_content: str = level_title_content_list[i].get('content') + if cursor < text.index(start_content, cursor): + for row in post_handler_paragraph(text[cursor: text.index(start_content, cursor)], limit=self.limit): + level_content_list.insert(0, to_tree_obj(row, 'block')) + + block, cursor = get_level_block(text, level_title_content_list, i, cursor) + if len(block) == 0: + continue + children = self.parse_to_tree(text=block, index=index + 1) + level_title_content_list[i]['children'] = children + first_child_idx_in_block = block.lstrip().index(children[0]["content"].lstrip()) + if first_child_idx_in_block != 0: + inner_children = self.parse_to_tree(block[:first_child_idx_in_block], index + 1) + level_title_content_list[i]['children'].extend(inner_children) return level_content_list def parse(self, text: str): @@ -335,17 +331,22 @@ def parse(self, text: str): :param text: 文本数据 :return: 解析后数据 {content:段落数据,keywords:[‘段落关键词’],parent_chain:['段落父级链路']} """ + text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') text = text.replace("\0", '') result_tree = self.parse_to_tree(text, 0) - result = result_tree_to_paragraph(result_tree, [], []) - return [item for item in [self.post_reset_paragraph(row) for row in result] if + result = result_tree_to_paragraph(result_tree, [], [], self.with_filter) + for e in result: + if len(e['content']) > 4096: + pass + title_list = list(set([row.get('title') for row in result])) + return [item for item in [self.post_reset_paragraph(row, title_list) for row in result] if 'content' in item and len(item.get('content').strip()) > 0] - def post_reset_paragraph(self, paragraph: Dict): - result = self.filter_title_special_characters(paragraph) + def post_reset_paragraph(self, paragraph: Dict, title_list: List[str]): + result = self.content_is_null(paragraph, title_list) + result = self.filter_title_special_characters(result) result = self.sub_title(result) - result = self.content_is_null(result) return result @staticmethod @@ -357,11 +358,14 @@ def sub_title(paragraph: Dict): return paragraph @staticmethod - def content_is_null(paragraph: Dict): + def content_is_null(paragraph: Dict, title_list: List[str]): if 'title' in paragraph: title = paragraph.get('title') content = paragraph.get('content') if (content is None or len(content.strip()) == 0) and (title is not None and len(title) > 0): + find = [t for t in title_list if t.__contains__(title) and t != title] + if find: + return {'title': '', 'content': ''} return {'title': '', 'content': title} return paragraph @@ -387,7 +391,7 @@ def filter_title_special_characters(paragraph: Dict): } -def get_split_model(filename: str, with_filter: bool = False, limit: int = 4096): +def get_split_model(filename: str, with_filter: bool = False, limit: int = 100000): """ 根据文件名称获取分段模型 :param limit: 每段大小 diff --git a/apps/common/util/test.py b/apps/common/util/test.py index a9536ba9ca7..bfcab42d4f1 100644 --- a/apps/common/util/test.py +++ b/apps/common/util/test.py @@ -13,11 +13,12 @@ from django.core.cache import cache # alg使用的算法 -HEADER = {'typ': 'JWP', 'alg': 'default'} +HEADER = {'type': 'JWP', 'alg': 'default'} TOKEN_KEY = 'solomon_world_token' TOKEN_SALT = 'solomonwanc@gmail.com' TIME_OUT = 30 * 60 + # 加密 def encrypt(obj): value = signing.dumps(obj, key=TOKEN_KEY, salt=TOKEN_SALT) @@ -29,7 +30,6 @@ def encrypt(obj): def decrypt(src): src = signing.b64_decode(src.encode()).decode() raw = signing.loads(src, key=TOKEN_KEY, salt=TOKEN_SALT) - print(type(raw)) return raw @@ -74,5 +74,3 @@ def check_token(token): if last_token: return last_token == token return False - - diff --git a/apps/common/util/ts_vecto_util.py b/apps/common/util/ts_vecto_util.py index 451d87bf870..37d03d4b544 100644 --- a/apps/common/util/ts_vecto_util.py +++ b/apps/common/util/ts_vecto_util.py @@ -12,9 +12,6 @@ import jieba import jieba.posseg -from jieba import analyse - -from common.util.split_model import group_by jieba_word_list_cache = [chr(item) for item in range(38, 84)] @@ -49,14 +46,16 @@ def get_word_list(text: str): def replace_word(word_dict, text: str): for key in word_dict: - text = re.sub('(?= 0]) + result = jieba.lcut(text, cut_all=True) + return " ".join(result) def to_query(text: str): - # 获取不分词的数据 - word_list = get_word_list(text) - # 获取关键词关系 - word_dict = to_word_dict(word_list, text) - # 替换字符串 - text = replace_word(word_dict, text) - extract_tags = analyse.extract_tags(text, topK=5, withWeight=True, allowPOS=('ns', 'n', 'vn', 'v', 'eng')) - result = " ".join([get_key_by_word_dict(word, word_dict) for word, score in extract_tags if - not remove_chars.__contains__(word)]) - # 删除词库 - for word in word_list: - jieba.del_word(word) + extract_tags = jieba.lcut(text, cut_all=True) + result = " ".join(extract_tags) return result diff --git a/apps/dataset/migrations/0005_file.py b/apps/dataset/migrations/0005_file.py new file mode 100644 index 00000000000..3c74fc8dbbe --- /dev/null +++ b/apps/dataset/migrations/0005_file.py @@ -0,0 +1,30 @@ +# Generated by Django 4.2.13 on 2024-07-05 18:59 + +from django.db import migrations, models +import uuid + +from smartdoc.const import CONFIG + + +class Migration(migrations.Migration): + dependencies = [ + ('dataset', '0004_document_directly_return_similarity'), + ] + + operations = [ + migrations.RunSQL(f"grant execute on function lo_from_bytea to {CONFIG.get('DB_USER')}"), + migrations.CreateModel( + name='File', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, + verbose_name='主键id')), + ('file_name', models.CharField(default='', max_length=256, verbose_name='文件名称')), + ('loid', models.IntegerField(verbose_name='loid')), + ], + options={ + 'db_table': 'file', + }, + ), + ] diff --git a/apps/dataset/migrations/0006_dataset_embedding_mode.py b/apps/dataset/migrations/0006_dataset_embedding_mode.py new file mode 100644 index 00000000000..2248d8e3634 --- /dev/null +++ b/apps/dataset/migrations/0006_dataset_embedding_mode.py @@ -0,0 +1,21 @@ +# Generated by Django 4.2.13 on 2024-07-17 13:56 + +import dataset.models.data_set +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('setting', '0005_model_permission_type'), + ('dataset', '0005_file'), + ] + + operations = [ + migrations.AddField( + model_name='dataset', + name='embedding_mode', + field=models.ForeignKey(default=dataset.models.data_set.default_model, on_delete=django.db.models.deletion.DO_NOTHING, to='setting.model', verbose_name='向量模型'), + ), + ] diff --git a/apps/dataset/migrations/0007_alter_paragraph_content.py b/apps/dataset/migrations/0007_alter_paragraph_content.py new file mode 100644 index 00000000000..ab654b1a1e3 --- /dev/null +++ b/apps/dataset/migrations/0007_alter_paragraph_content.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.14 on 2024-07-24 14:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dataset', '0006_dataset_embedding_mode'), + ] + + operations = [ + migrations.AlterField( + model_name='paragraph', + name='content', + field=models.CharField(max_length=102400, verbose_name='段落内容'), + ), + ] diff --git a/apps/dataset/migrations/0008_alter_document_status_alter_paragraph_status.py b/apps/dataset/migrations/0008_alter_document_status_alter_paragraph_status.py new file mode 100644 index 00000000000..3380d7b928a --- /dev/null +++ b/apps/dataset/migrations/0008_alter_document_status_alter_paragraph_status.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.14 on 2024-07-29 15:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dataset', '0007_alter_paragraph_content'), + ] + + operations = [ + migrations.AlterField( + model_name='document', + name='status', + field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中')], default='3', max_length=1, verbose_name='状态'), + ), + migrations.AlterField( + model_name='paragraph', + name='status', + field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中')], default='0', max_length=1, verbose_name='状态'), + ), + ] diff --git a/apps/dataset/migrations/0009_alter_document_status_alter_paragraph_status.py b/apps/dataset/migrations/0009_alter_document_status_alter_paragraph_status.py new file mode 100644 index 00000000000..7c138a609e0 --- /dev/null +++ b/apps/dataset/migrations/0009_alter_document_status_alter_paragraph_status.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.15 on 2024-10-15 14:49 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dataset', '0008_alter_document_status_alter_paragraph_status'), + ] + + operations = [ + migrations.AlterField( + model_name='document', + name='status', + field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中'), ('4', '生成问题中')], default='3', max_length=1, verbose_name='状态'), + ), + migrations.AlterField( + model_name='paragraph', + name='status', + field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中'), ('4', '生成问题中')], default='0', max_length=1, verbose_name='状态'), + ), + ] diff --git a/apps/dataset/migrations/0010_file_meta.py b/apps/dataset/migrations/0010_file_meta.py new file mode 100644 index 00000000000..6e28e3eecc3 --- /dev/null +++ b/apps/dataset/migrations/0010_file_meta.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-11-07 15:32 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dataset', '0009_alter_document_status_alter_paragraph_status'), + ] + + operations = [ + migrations.AddField( + model_name='file', + name='meta', + field=models.JSONField(default=dict, verbose_name='文件关联数据'), + ), + ] diff --git a/apps/dataset/migrations/0011_document_status_meta_paragraph_status_meta_and_more.py b/apps/dataset/migrations/0011_document_status_meta_paragraph_status_meta_and_more.py new file mode 100644 index 00000000000..7c5d6375506 --- /dev/null +++ b/apps/dataset/migrations/0011_document_status_meta_paragraph_status_meta_and_more.py @@ -0,0 +1,54 @@ +# Generated by Django 4.2.15 on 2024-11-22 14:44 +from django.db.models import QuerySet + +from django.db import migrations, models + +import dataset +from common.event import ListenerManagement +from dataset.models import State, TaskType + +sql = """ +UPDATE "document" +SET status ="replace"("replace"("replace"(status, '2', '3'),'0','3'),'1','2') +""" +sql_paragraph = """ +UPDATE "paragraph" +SET status ="replace"("replace"("replace"(status, '2', '3'),'0','3'),'1','2') +""" + + +def updateDocumentStatus(apps, schema_editor): + DocumentModel = apps.get_model('dataset', 'Document') + ListenerManagement.get_aggregation_document_status_by_query_set(QuerySet(DocumentModel))() + + +class Migration(migrations.Migration): + dependencies = [ + ('dataset', '0010_file_meta'), + ] + + operations = [ + migrations.AddField( + model_name='document', + name='status_meta', + field=models.JSONField(default=dataset.models.data_set.default_status_meta, verbose_name='状态统计数据'), + ), + migrations.AddField( + model_name='paragraph', + name='status_meta', + field=models.JSONField(default=dataset.models.data_set.default_status_meta, verbose_name='状态数据'), + ), + migrations.AlterField( + model_name='document', + name='status', + field=models.CharField(default=dataset.models.data_set.Status.__str__, max_length=20, verbose_name='状态'), + ), + migrations.AlterField( + model_name='paragraph', + name='status', + field=models.CharField(default=dataset.models.data_set.Status.__str__, max_length=20, verbose_name='状态'), + ), + migrations.RunSQL(sql_paragraph), + migrations.RunSQL(sql), + migrations.RunPython(updateDocumentStatus) + ] diff --git a/apps/dataset/models/data_set.py b/apps/dataset/models/data_set.py index d0f56a017fc..dd2f1b7c9a7 100644 --- a/apps/dataset/models/data_set.py +++ b/apps/dataset/models/data_set.py @@ -7,18 +7,74 @@ @desc: 数据集 """ import uuid +from enum import Enum from django.db import models +from django.db.models.signals import pre_delete +from django.dispatch import receiver +from common.db.sql_execute import select_one from common.mixins.app_model_mixin import AppModelMixin +from setting.models import Model from users.models import User -class Status(models.TextChoices): - """订单类型""" - embedding = 0, '导入中' - success = 1, '已完成' - error = 2, '导入失败' +class TaskType(Enum): + # 向量 + EMBEDDING = 1 + # 生成问题 + GENERATE_PROBLEM = 2 + # 同步 + SYNC = 3 + + +class State(Enum): + # 等待 + PENDING = '0' + # 执行中 + STARTED = '1' + # 成功 + SUCCESS = '2' + # 失败 + FAILURE = '3' + # 取消任务 + REVOKE = '4' + # 取消成功 + REVOKED = '5' + # 忽略 + IGNORED = 'n' + + +class Status: + type_cls = TaskType + state_cls = State + + def __init__(self, status: str = None): + self.task_status = {} + status_list = list(status[::-1] if status is not None else '') + for _type in self.type_cls: + index = _type.value - 1 + _state = self.state_cls(status_list[index] if len(status_list) > index else 'n') + self.task_status[_type] = _state + + @staticmethod + def of(status: str): + return Status(status) + + def __str__(self): + result = [] + for _type in sorted(self.type_cls, key=lambda item: item.value, reverse=True): + result.insert(len(self.type_cls) - _type.value, self.task_status[_type].value) + return ''.join(result) + + def __setitem__(self, key, value): + self.task_status[key] = value + + def __getitem__(self, item): + return self.task_status[item] + + def update_status(self, task_type: TaskType, state: State): + self.task_status[task_type] = state class Type(models.TextChoices): @@ -26,12 +82,23 @@ class Type(models.TextChoices): web = 1, 'web站点类型' + lark = 2, '飞书类型' + yuque = 3, '语雀类型' + class HitHandlingMethod(models.TextChoices): optimization = 'optimization', '模型优化' directly_return = 'directly_return', '直接返回' +def default_model(): + return uuid.UUID('42f63a3d-427e-11ef-b3ec-a8a1595801ab') + + +def default_status_meta(): + return {"state_time": {}} + + class DataSet(AppModelMixin): """ 数据集表 @@ -42,7 +109,8 @@ class DataSet(AppModelMixin): user = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name="所属用户") type = models.CharField(verbose_name='类型', max_length=1, choices=Type.choices, default=Type.base) - + embedding_mode = models.ForeignKey(Model, on_delete=models.DO_NOTHING, verbose_name="向量模型", + default=default_model) meta = models.JSONField(verbose_name="元数据", default=dict) class Meta: @@ -57,8 +125,8 @@ class Document(AppModelMixin): dataset = models.ForeignKey(DataSet, on_delete=models.DO_NOTHING) name = models.CharField(max_length=150, verbose_name="文档名称") char_length = models.IntegerField(verbose_name="文档字符数 冗余字段") - status = models.CharField(verbose_name='状态', max_length=1, choices=Status.choices, - default=Status.embedding) + status = models.CharField(verbose_name='状态', max_length=20, default=Status('').__str__) + status_meta = models.JSONField(verbose_name="状态统计数据", default=default_status_meta) is_active = models.BooleanField(default=True) type = models.CharField(verbose_name='类型', max_length=1, choices=Type.choices, @@ -81,10 +149,10 @@ class Paragraph(AppModelMixin): id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") document = models.ForeignKey(Document, on_delete=models.DO_NOTHING, db_constraint=False) dataset = models.ForeignKey(DataSet, on_delete=models.DO_NOTHING) - content = models.CharField(max_length=4096, verbose_name="段落内容") + content = models.CharField(max_length=102400, verbose_name="段落内容") title = models.CharField(max_length=256, verbose_name="标题", default="") - status = models.CharField(verbose_name='状态', max_length=1, choices=Status.choices, - default=Status.embedding) + status = models.CharField(verbose_name='状态', max_length=20, default=Status('').__str__) + status_meta = models.JSONField(verbose_name="状态数据", default=default_status_meta) hit_num = models.IntegerField(verbose_name="命中次数", default=0) is_active = models.BooleanField(default=True) @@ -123,3 +191,32 @@ class Image(AppModelMixin): class Meta: db_table = "image" + + +class File(AppModelMixin): + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") + + file_name = models.CharField(max_length=256, verbose_name="文件名称", default="") + + loid = models.IntegerField(verbose_name="loid") + + meta = models.JSONField(verbose_name="文件关联数据", default=dict) + + class Meta: + db_table = "file" + + def save( + self, bytea=None, force_insert=False, force_update=False, using=None, update_fields=None + ): + result = select_one("SELECT lo_from_bytea(%s, %s::bytea) as loid", [0, bytea]) + self.loid = result['loid'] + super().save() + + def get_byte(self): + result = select_one(f'SELECT lo_get({self.loid}) as "data"', []) + return result['data'] + + +@receiver(pre_delete, sender=File) +def on_delete_file(sender, instance, **kwargs): + select_one(f'SELECT lo_unlink({instance.loid})', []) diff --git a/apps/dataset/serializers/common_serializers.py b/apps/dataset/serializers/common_serializers.py index 16d33e66202..856f3da1584 100644 --- a/apps/dataset/serializers/common_serializers.py +++ b/apps/dataset/serializers/common_serializers.py @@ -7,12 +7,16 @@ @desc: """ import os +import re +import uuid +import zipfile from typing import List from django.db.models import QuerySet from drf_yasg import openapi from rest_framework import serializers +from common.config.embedding_config import ModelManage from common.db.search import native_search from common.db.sql_execute import update_execute from common.exception.app_exception import AppApiException @@ -20,8 +24,63 @@ from common.util.field_message import ErrMessage from common.util.file_util import get_file_content from common.util.fork import Fork -from dataset.models import Paragraph +from dataset.models import Paragraph, Problem, ProblemParagraphMapping, DataSet, File, Image +from setting.models_provider import get_model from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ + + +def zip_dir(zip_path, output=None): + output = output or os.path.basename(zip_path) + '.zip' + zip = zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) + for root, dirs, files in os.walk(zip_path): + relative_root = '' if root == zip_path else root.replace(zip_path, '') + os.sep + for filename in files: + zip.write(os.path.join(root, filename), relative_root + filename) + zip.close() + + +def is_valid_uuid(s): + try: + uuid.UUID(s) + return True + except ValueError: + return False + + +def write_image(zip_path: str, image_list: List[str]): + for image in image_list: + search = re.search("\(.*\)", image) + if search: + text = search.group() + if text.startswith('(/api/file/'): + r = text.replace('(/api/file/', '').replace(')', '') + r = r.strip().split(" ")[0] + if not is_valid_uuid(r): + break + file = QuerySet(File).filter(id=r).first() + if file is None: + break + zip_inner_path = os.path.join('api', 'file', r) + file_path = os.path.join(zip_path, zip_inner_path) + if not os.path.exists(os.path.dirname(file_path)): + os.makedirs(os.path.dirname(file_path)) + with open(os.path.join(zip_path, file_path), 'wb') as f: + f.write(file.get_byte()) + else: + r = text.replace('(/api/image/', '').replace(')', '') + r = r.strip().split(" ")[0] + if not is_valid_uuid(r): + break + image_model = QuerySet(Image).filter(id=r).first() + if image_model is None: + break + zip_inner_path = os.path.join('api', 'image', r) + file_path = os.path.join(zip_path, zip_inner_path) + if not os.path.exists(os.path.dirname(file_path)): + os.makedirs(os.path.dirname(file_path)) + with open(file_path, 'wb') as f: + f.write(image_model.image) def update_document_char_length(document_id: str): @@ -39,16 +98,16 @@ def list_paragraph(paragraph_list: List[str]): class MetaSerializer(serializers.Serializer): class WebMeta(serializers.Serializer): - source_url = serializers.CharField(required=True, error_messages=ErrMessage.char("文档地址")) + source_url = serializers.CharField(required=True, error_messages=ErrMessage.char(_('source url'))) selector = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("选择器")) + error_messages=ErrMessage.char(_('selector'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) source_url = self.data.get('source_url') response = Fork(source_url, []).fork() if response.status == 500: - raise AppApiException(500, f"url错误,无法解析【{source_url}】") + raise AppApiException(500, _('URL error, cannot parse [{source_url}]').format(source_url=source_url)) class BaseMeta(serializers.Serializer): def is_valid(self, *, raise_exception=False): @@ -57,7 +116,7 @@ def is_valid(self, *, raise_exception=False): class BatchSerializer(ApiMixin, serializers.Serializer): id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True), - error_messages=ErrMessage.char("id列表")) + error_messages=ErrMessage.char(_('id list'))) def is_valid(self, *, model=None, raise_exception=False): super().is_valid(raise_exception=True) @@ -67,7 +126,8 @@ def is_valid(self, *, model=None, raise_exception=False): if len(model_list) != len(id_list): model_id_list = [str(m.id) for m in model_list] error_id_list = list(filter(lambda row_id: not model_id_list.__contains__(row_id), id_list)) - raise AppApiException(500, f"id不正确:{error_id_list}") + raise AppApiException(500, _('The following id does not exist: {error_id_list}').format( + error_id_list=error_id_list)) @staticmethod def get_request_body_api(): @@ -75,7 +135,113 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, properties={ 'id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="主键id列表", - description="主键id列表") + title=_('id list'), + description=_('id list')) + } + ) + + +class ProblemParagraphObject: + def __init__(self, dataset_id: str, document_id: str, paragraph_id: str, problem_content: str): + self.dataset_id = dataset_id + self.document_id = document_id + self.paragraph_id = paragraph_id + self.problem_content = problem_content + + +def or_get(exists_problem_list, content, dataset_id, document_id, paragraph_id, problem_content_dict): + if content in problem_content_dict: + return problem_content_dict.get(content)[0], document_id, paragraph_id + exists = [row for row in exists_problem_list if row.content == content] + if len(exists) > 0: + problem_content_dict[content] = exists[0], False + return exists[0], document_id, paragraph_id + else: + problem = Problem(id=uuid.uuid1(), content=content, dataset_id=dataset_id) + problem_content_dict[content] = problem, True + return problem, document_id, paragraph_id + + +class ProblemParagraphManage: + def __init__(self, problemParagraphObjectList: [ProblemParagraphObject], dataset_id): + self.dataset_id = dataset_id + self.problemParagraphObjectList = problemParagraphObjectList + + def to_problem_model_list(self): + problem_list = [item.problem_content for item in self.problemParagraphObjectList] + exists_problem_list = [] + if len(self.problemParagraphObjectList) > 0: + # 查询到已存在的问题列表 + exists_problem_list = QuerySet(Problem).filter(dataset_id=self.dataset_id, + content__in=problem_list).all() + problem_content_dict = {} + problem_model_list = [ + or_get(exists_problem_list, problemParagraphObject.problem_content, problemParagraphObject.dataset_id, + problemParagraphObject.document_id, problemParagraphObject.paragraph_id, problem_content_dict) for + problemParagraphObject in self.problemParagraphObjectList] + + problem_paragraph_mapping_list = [ + ProblemParagraphMapping(id=uuid.uuid1(), document_id=document_id, problem_id=problem_model.id, + paragraph_id=paragraph_id, + dataset_id=self.dataset_id) for + problem_model, document_id, paragraph_id in problem_model_list] + + result = [problem_model for problem_model, is_create in problem_content_dict.values() if + is_create], problem_paragraph_mapping_list + return result + + +def get_embedding_model_by_dataset_id_list(dataset_id_list: List): + dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list) + if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1: + raise Exception(_('The knowledge base is inconsistent with the vector model')) + if len(dataset_list) == 0: + raise Exception(_('Knowledge base setting error, please reset the knowledge base')) + return ModelManage.get_model(str(dataset_list[0].embedding_mode_id), + lambda _id: get_model(dataset_list[0].embedding_mode)) + + +def get_embedding_model_by_dataset_id(dataset_id: str): + dataset = QuerySet(DataSet).select_related('embedding_mode').filter(id=dataset_id).first() + return ModelManage.get_model(str(dataset.embedding_mode_id), lambda _id: get_model(dataset.embedding_mode)) + + +def get_embedding_model_by_dataset(dataset): + return ModelManage.get_model(str(dataset.embedding_mode_id), lambda _id: get_model(dataset.embedding_mode)) + + +def get_embedding_model_id_by_dataset_id(dataset_id): + dataset = QuerySet(DataSet).select_related('embedding_mode').filter(id=dataset_id).first() + return str(dataset.embedding_mode_id) + + +def get_embedding_model_id_by_dataset_id_list(dataset_id_list: List): + dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list) + if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1: + raise Exception(_('The knowledge base is inconsistent with the vector model')) + if len(dataset_list) == 0: + raise Exception(_('Knowledge base setting error, please reset the knowledge base')) + return str(dataset_list[0].embedding_mode_id) + + +class GenerateRelatedSerializer(ApiMixin, serializers.Serializer): + model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('Model id'))) + prompt = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_('Prompt word'))) + state_list = serializers.ListField(required=False, child=serializers.CharField(required=True), + error_messages=ErrMessage.list("state list")) + + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, + title=_('Model id'), + description=_('Model id')), + 'prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_('Prompt word'), + description=_("Prompt word")), + 'state_list': openapi.Schema(type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_STRING), + title=_('state list')) } ) diff --git a/apps/dataset/serializers/dataset_serializers.py b/apps/dataset/serializers/dataset_serializers.py index d3f5af73a4c..895443d997f 100644 --- a/apps/dataset/serializers/dataset_serializers.py +++ b/apps/dataset/serializers/dataset_serializers.py @@ -6,40 +6,52 @@ @date:2023/9/21 16:14 @desc: """ +import io import logging import os.path import re import traceback import uuid +import zipfile from functools import reduce -from typing import Dict +from tempfile import TemporaryDirectory +from typing import Dict, List from urllib.parse import urlparse +from celery_once import AlreadyQueued from django.contrib.postgres.fields import ArrayField from django.core import validators from django.db import transaction, models -from django.db.models import QuerySet, Q +from django.db.models import QuerySet +from django.db.models.functions import Reverse, Substr +from django.http import HttpResponse from drf_yasg import openapi from rest_framework import serializers from application.models import ApplicationDatasetMapping -from common.config.embedding_config import VectorStore, EmbeddingModel +from common.config.embedding_config import VectorStore from common.db.search import get_dynamics_model, native_page_search, native_search from common.db.sql_execute import select_list -from common.event import ListenerManagement, SyncWebDatasetArgs +from common.event import ListenerManagement from common.exception.app_exception import AppApiException from common.mixins.api_mixin import ApiMixin -from common.util.common import post +from common.util.common import post, flat_map, valid_license, parse_image from common.util.field_message import ErrMessage from common.util.file_util import get_file_content from common.util.fork import ChildLink, Fork from common.util.split_model import get_split_model -from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, ProblemParagraphMapping -from dataset.serializers.common_serializers import list_paragraph, MetaSerializer +from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, ProblemParagraphMapping, TaskType, \ + State, File, Image +from dataset.serializers.common_serializers import list_paragraph, MetaSerializer, ProblemParagraphManage, \ + get_embedding_model_by_dataset_id, get_embedding_model_id_by_dataset_id, write_image, zip_dir, \ + GenerateRelatedSerializer from dataset.serializers.document_serializers import DocumentSerializers, DocumentInstanceSerializer +from dataset.task import sync_web_dataset, sync_replace_web_dataset, generate_related_by_dataset_id from embedding.models import SearchMode -from setting.models import AuthOperate +from embedding.task import embedding_by_dataset, delete_embedding_by_dataset +from setting.models import AuthOperate, Model from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ """ # __exact 精确等于 like ‘aaa’ @@ -69,9 +81,9 @@ class Meta: fields = ['id', 'name', 'desc', 'meta', 'create_time', 'update_time'] class Application(ApiMixin, serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id'))) - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("数据集id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('dataset id'))) @staticmethod def get_request_params_api(): @@ -80,7 +92,7 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id') + description=_('dataset id')), ] @staticmethod @@ -91,22 +103,31 @@ def get_response_body_api(): 'create_time', 'update_time'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), - 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), - 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('id')), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('application name'), + description=_('application name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="_('application description')", + description="_('application description')"), + 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('model id'), + description=_('model id')), + "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, + title=_('Whether to start multiple rounds of dialogue'), + description=_( + 'Whether to start multiple rounds of dialogue')), + 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_('opening remarks'), + description=_('opening remarks')), 'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="示例列表", description="示例列表"), - 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户", description="所属用户"), + title=_('example'), description=_('example')), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('User id'), description=_('User id')), - 'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否发布", description='是否发布'), + 'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Whether to publish'), + description=_('Whether to publish')), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description='创建时间'), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time')), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description='修改时间') + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time')) } ) @@ -115,29 +136,33 @@ class Query(ApiMixin, serializers.Serializer): 查询对象 """ name = serializers.CharField(required=False, - error_messages=ErrMessage.char("知识库名称"), + error_messages=ErrMessage.char(_('dataset name')), max_length=64, min_length=1) desc = serializers.CharField(required=False, - error_messages=ErrMessage.char("知识库描述"), + error_messages=ErrMessage.char(_('dataset description')), max_length=256, min_length=1, ) user_id = serializers.CharField(required=True) + select_user_id = serializers.CharField(required=False) def get_query_set(self): user_id = self.data.get("user_id") query_set_dict = {} query_set = QuerySet(model=get_dynamics_model( {'temp.name': models.CharField(), 'temp.desc': models.CharField(), - "document_temp.char_length": models.IntegerField(), 'temp.create_time': models.DateTimeField()})) + "document_temp.char_length": models.IntegerField(), 'temp.create_time': models.DateTimeField(), + 'temp.user_id': models.CharField(), 'temp.id': models.CharField()})) if "desc" in self.data and self.data.get('desc') is not None: query_set = query_set.filter(**{'temp.desc__icontains': self.data.get("desc")}) if "name" in self.data and self.data.get('name') is not None: query_set = query_set.filter(**{'temp.name__icontains': self.data.get("name")}) - query_set = query_set.order_by("-temp.create_time") + if "select_user_id" in self.data and self.data.get('select_user_id') is not None: + query_set = query_set.filter(**{'temp.user_id__exact': self.data.get("select_user_id")}) + query_set = query_set.order_by("-temp.create_time", "temp.id") query_set_dict['default_sql'] = query_set query_set_dict['dataset_custom_sql'] = QuerySet(model=get_dynamics_model( @@ -149,7 +174,7 @@ def get_query_set(self): query_set_dict['team_member_permission_custom_sql'] = QuerySet(model=get_dynamics_model( {'user_id': models.CharField(), 'team_member_permission.auth_target_type': models.CharField(), - 'team_member_permission.operate': ArrayField(verbose_name="权限操作列表", + 'team_member_permission.operate': ArrayField(verbose_name=_('permission'), base_field=models.CharField(max_length=256, blank=True, choices=AuthOperate.choices, @@ -175,12 +200,12 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='知识库名称'), + description=_('dataset name')), openapi.Parameter(name='desc', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='知识库描述') + description=_('dataset description')) ] @staticmethod @@ -188,52 +213,134 @@ def get_response_body_api(): return DataSetSerializers.Operate.get_response_body_api() class Create(ApiMixin, serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id"), ) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id')), ) class CreateBaseSerializers(ApiMixin, serializers.Serializer): """ 创建通用数据集序列化对象 """ name = serializers.CharField(required=True, - error_messages=ErrMessage.char("知识库名称"), + error_messages=ErrMessage.char(_('dataset name')), max_length=64, min_length=1) desc = serializers.CharField(required=True, - error_messages=ErrMessage.char("知识库描述"), + error_messages=ErrMessage.char(_('dataset description')), max_length=256, min_length=1) + embedding_mode_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_('embedding mode'))) + documents = DocumentInstanceSerializer(required=False, many=True) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) return True + class CreateQASerializers(serializers.Serializer): + """ + 创建web站点序列化对象 + """ + name = serializers.CharField(required=True, + error_messages=ErrMessage.char(_('dataset name')), + max_length=64, + min_length=1) + + desc = serializers.CharField(required=True, + error_messages=ErrMessage.char(_('dataset description')), + max_length=256, + min_length=1) + + embedding_mode_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_('embedding mode'))) + + file_list = serializers.ListSerializer(required=True, + error_messages=ErrMessage.list(_('file list')), + child=serializers.FileField(required=True, + error_messages=ErrMessage.file( + _('file list')))) + + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='file', + in_=openapi.IN_FORM, + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_FILE), + required=True, + description=_('upload files ')), + openapi.Parameter(name='name', + in_=openapi.IN_FORM, + required=True, + type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name')), + openapi.Parameter(name='desc', + in_=openapi.IN_FORM, + required=True, + type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description')), + ] + + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['id', 'name', 'desc', 'user_id', 'char_length', 'document_count', + 'update_time', 'create_time', 'document_list'], + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", + description="id", default="xx"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name'), default=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description'), default=_('dataset description')), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), + description=_('user id'), default="user_xxxx"), + 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'), + description=_('char length'), default=10), + 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'), + description=_('document count'), default=1), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), + default="1970-01-01 00:00:00"), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), + default="1970-01-01 00:00:00" + ), + 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('document list'), + description=_('document list'), + items=DocumentSerializers.Operate.get_response_body_api()) + } + ) + class CreateWebSerializers(serializers.Serializer): """ 创建web站点序列化对象 """ name = serializers.CharField(required=True, - error_messages=ErrMessage.char("知识库名称"), + error_messages=ErrMessage.char(_('dataset name')), max_length=64, min_length=1) desc = serializers.CharField(required=True, - error_messages=ErrMessage.char("知识库描述"), + error_messages=ErrMessage.char(_('dataset description')), max_length=256, min_length=1) - source_url = serializers.CharField(required=True, error_messages=ErrMessage.char("Web 根地址"), ) + source_url = serializers.CharField(required=True, error_messages=ErrMessage.char(_('web source url')), ) + + embedding_mode_id = serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_('embedding mode'))) selector = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("选择器")) + error_messages=ErrMessage.char(_('selector'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) source_url = self.data.get('source_url') response = Fork(source_url, []).fork() if response.status == 500: - raise AppApiException(500, f"url错误,无法解析【{source_url}】") + raise AppApiException(500, + _('URL error, cannot parse [{source_url}]').format(source_url=source_url)) return True @staticmethod @@ -245,25 +352,25 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称", - description="名称", default="测试知识库"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="描述", - description="描述", default="测试知识库描述"), - 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户id", - description="所属用户id", default="user_xxxx"), - 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title="字符数", - description="字符数", default=10), - 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title="文档数量", - description="文档数量", default=1), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name'), default=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description'), default=_('dataset description')), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), + description=_('user id'), default="user_xxxx"), + 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'), + description=_('char length'), default=10), + 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'), + description=_('document count'), default=1), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ), - 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档列表", - description="文档列表", + 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('document list'), + description=_('document list'), items=DocumentSerializers.Operate.get_response_body_api()) } ) @@ -274,20 +381,39 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['name', 'desc', 'url'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="知识库名称", description="知识库名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="知识库描述", description="知识库描述"), - 'source_url': openapi.Schema(type=openapi.TYPE_STRING, title="web站点url", - description="web站点url"), - 'selector': openapi.Schema(type=openapi.TYPE_STRING, title="选择器", description="选择器") + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description')), + 'embedding_mode_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('embedding mode'), + description=_('embedding mode')), + 'source_url': openapi.Schema(type=openapi.TYPE_STRING, title=_('web source url'), + description=_('web source url')), + 'selector': openapi.Schema(type=openapi.TYPE_STRING, title=_('selector'), + description=_('selector')) } ) @staticmethod def post_embedding_dataset(document_list, dataset_id): + model_id = get_embedding_model_id_by_dataset_id(dataset_id) # 发送向量化事件 - ListenerManagement.embedding_by_dataset_signal.send(dataset_id) + embedding_by_dataset.delay(dataset_id, model_id) return document_list + def save_qa(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + self.CreateQASerializers(data=instance).is_valid() + file_list = instance.get('file_list') + document_list = flat_map([DocumentSerializers.Create.parse_qa_file(file) for file in file_list]) + dataset_instance = {'name': instance.get('name'), 'desc': instance.get('desc'), 'documents': document_list, + 'embedding_mode_id': instance.get('embedding_mode_id')} + return self.save(dataset_instance, with_valid=True) + + @valid_license(model=DataSet, count=50, + message=_( + 'The community version supports up to 50 knowledge bases. If you need more knowledge bases, please contact us (https://fit2cloud.com/).')) @post(post_function=post_embedding_dataset) @transaction.atomic def save(self, instance: Dict, with_valid=True): @@ -297,14 +423,14 @@ def save(self, instance: Dict, with_valid=True): dataset_id = uuid.uuid1() user_id = self.data.get('user_id') if QuerySet(DataSet).filter(user_id=user_id, name=instance.get('name')).exists(): - raise AppApiException(500, "知识库名称重复!") + raise AppApiException(500, _('Knowledge base name duplicate!')) dataset = DataSet( - **{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id}) + **{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id, + 'embedding_mode_id': instance.get('embedding_mode_id')}) document_model_list = [] paragraph_model_list = [] - problem_model_list = [] - problem_paragraph_mapping_list = [] + problem_paragraph_object_list = [] # 插入文档 for document in instance.get('documents') if 'documents' in instance else []: document_paragraph_dict_model = DocumentSerializers.Create.get_document_paragraph_model(dataset_id, @@ -312,12 +438,12 @@ def save(self, instance: Dict, with_valid=True): document_model_list.append(document_paragraph_dict_model.get('document')) for paragraph in document_paragraph_dict_model.get('paragraph_model_list'): paragraph_model_list.append(paragraph) - for problem in document_paragraph_dict_model.get('problem_model_list'): - problem_model_list.append(problem) - for problem_paragraph_mapping in document_paragraph_dict_model.get('problem_paragraph_mapping_list'): - problem_paragraph_mapping_list.append(problem_paragraph_mapping) - problem_model_list, problem_paragraph_mapping_list = DocumentSerializers.Create.reset_problem_model( - problem_model_list, problem_paragraph_mapping_list) + for problem_paragraph_object in document_paragraph_dict_model.get('problem_paragraph_object_list'): + problem_paragraph_object_list.append(problem_paragraph_object) + + problem_model_list, problem_paragraph_mapping_list = (ProblemParagraphManage(problem_paragraph_object_list, + dataset_id) + .to_problem_model_list()) # 插入知识库 dataset.save() # 插入文档 @@ -329,11 +455,13 @@ def save(self, instance: Dict, with_valid=True): # 批量插入关联问题 QuerySet(ProblemParagraphMapping).bulk_create(problem_paragraph_mapping_list) if len( problem_paragraph_mapping_list) > 0 else None - # 响应数据 return {**DataSetSerializers(dataset).data, - 'document_list': DocumentSerializers.Query(data={'dataset_id': dataset_id}).list( - with_valid=True)}, dataset_id + 'user_id': user_id, + 'document_list': document_model_list, + "document_count": len(document_model_list), + "char_length": reduce(lambda x, y: x + y, [d.char_length for d in document_model_list], + 0)}, dataset_id @staticmethod def get_last_url_path(url): @@ -343,39 +471,22 @@ def get_last_url_path(url): else: return parsed_url.path.split("/")[-1] - @staticmethod - def get_save_handler(dataset_id, selector): - def handler(child_link: ChildLink, response: Fork.Response): - if response.status == 200: - try: - document_name = child_link.tag.text if child_link.tag is not None and len( - child_link.tag.text.strip()) > 0 else child_link.url - paragraphs = get_split_model('web.md').parse(response.content) - DocumentSerializers.Create(data={'dataset_id': dataset_id}).save( - {'name': document_name, 'paragraphs': paragraphs, - 'meta': {'source_url': child_link.url, 'selector': selector}, - 'type': Type.web}, with_valid=True) - except Exception as e: - logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') - - return handler - def save_web(self, instance: Dict, with_valid=True): if with_valid: self.is_valid(raise_exception=True) self.CreateWebSerializers(data=instance).is_valid(raise_exception=True) user_id = self.data.get('user_id') if QuerySet(DataSet).filter(user_id=user_id, name=instance.get('name')).exists(): - raise AppApiException(500, "知识库名称重复!") + raise AppApiException(500, _('Knowledge base name duplicate!')) dataset_id = uuid.uuid1() dataset = DataSet( **{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id, 'type': Type.web, - 'meta': {'source_url': instance.get('source_url'), 'selector': instance.get('selector')}}) + 'embedding_mode_id': instance.get('embedding_mode_id'), + 'meta': {'source_url': instance.get('source_url'), 'selector': instance.get('selector'), + 'embedding_mode_id': instance.get('embedding_mode_id')}}) dataset.save() - ListenerManagement.sync_web_dataset_signal.send( - SyncWebDatasetArgs(str(dataset_id), instance.get('source_url'), instance.get('selector'), - self.get_save_handler(dataset_id, instance.get('selector')))) + sync_web_dataset.delay(str(dataset_id), instance.get('source_url'), instance.get('selector')) return {**DataSetSerializers(dataset).data, 'document_list': []} @@ -388,25 +499,25 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称", - description="名称", default="测试知识库"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="描述", - description="描述", default="测试知识库描述"), - 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户id", - description="所属用户id", default="user_xxxx"), - 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title="字符数", - description="字符数", default=10), - 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title="文档数量", - description="文档数量", default=1), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name'), default=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description'), default=_('dataset description')), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), + description=_('user id'), default="user_xxxx"), + 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'), + description=_('char length'), default=10), + 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'), + description=_('document count'), default=1), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ), - 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档列表", - description="文档列表", + 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('document list'), + description=_('document list'), items=DocumentSerializers.Operate.get_response_body_api()) } ) @@ -417,9 +528,14 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['name', 'desc'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="知识库名称", description="知识库名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="知识库描述", description="知识库描述"), - 'documents': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档数据", description="文档数据", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description')), + 'embedding_mode_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('embedding mode'), + description=_('embedding mode')), + 'documents': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('documents'), + description=_('documents'), items=DocumentSerializers().Create.get_request_body_api() ) } @@ -427,14 +543,14 @@ def get_request_body_api(): class Edit(serializers.Serializer): name = serializers.CharField(required=False, max_length=64, min_length=1, - error_messages=ErrMessage.char("知识库名称")) + error_messages=ErrMessage.char(_('dataset name'))) desc = serializers.CharField(required=False, max_length=256, min_length=1, - error_messages=ErrMessage.char("知识库描述")) + error_messages=ErrMessage.char(_('dataset description'))) meta = serializers.DictField(required=False) application_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "应用id")), - error_messages=ErrMessage.char("应用列表")) + _('application id'))), + error_messages=ErrMessage.char(_('application id list'))) @staticmethod def get_dataset_meta_valid_map(): @@ -453,21 +569,21 @@ def is_valid(self, *, dataset: DataSet = None): class HitTest(ApiMixin, serializers.Serializer): id = serializers.CharField(required=True, error_messages=ErrMessage.char("id")) - user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char("用户id")) - query_text = serializers.CharField(required=True, error_messages=ErrMessage.char("查询文本")) - top_number = serializers.IntegerField(required=True, max_value=10, min_value=1, - error_messages=ErrMessage.char("响应Top")) - similarity = serializers.FloatField(required=True, max_value=1, min_value=0, - error_messages=ErrMessage.char("相似度")) + user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char(_('user id'))) + query_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_('query text'))) + top_number = serializers.IntegerField(required=True, max_value=10000, min_value=1, + error_messages=ErrMessage.char("top number")) + similarity = serializers.FloatField(required=True, max_value=2, min_value=0, + error_messages=ErrMessage.char(_('similarity'))) search_mode = serializers.CharField(required=True, validators=[ validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"), - message="类型只支持register|reset_password", code=500) - ], error_messages=ErrMessage.char("检索模式")) + message=_('The type only supports embedding|keywords|blend'), code=500) + ], error_messages=ErrMessage.char(_('search mode'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) if not QuerySet(DataSet).filter(id=self.data.get("id")).exists(): - raise AppApiException(300, "id不存在") + raise AppApiException(300, _('id does not exist')) def hit_test(self): self.is_valid() @@ -476,12 +592,13 @@ def hit_test(self): QuerySet(Document).filter( dataset_id=self.data.get('id'), is_active=False)] + model = get_embedding_model_by_dataset_id(self.data.get('id')) # 向量库检索 hit_list = vector.hit_test(self.data.get('query_text'), [self.data.get('id')], exclude_document_id_list, self.data.get('top_number'), self.data.get('similarity'), SearchMode(self.data.get('search_mode')), - EmbeddingModel.get_embedding_model()) + model) hit_dict = reduce(lambda x, y: {**x, **y}, [{hit.get('paragraph_id'): hit} for hit in hit_list], {}) p_list = list_paragraph([h.get('paragraph_id') for h in hit_list]) return [{**p, 'similarity': hit_dict.get(p.get('id')).get('similarity'), @@ -489,22 +606,22 @@ def hit_test(self): class SyncWeb(ApiMixin, serializers.Serializer): id = serializers.CharField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char( - "用户id")) + _('user id'))) sync_type = serializers.CharField(required=True, error_messages=ErrMessage.char( - "同步类型"), validators=[ + _(_('sync type'))), validators=[ validators.RegexValidator(regex=re.compile("^replace|complete$"), - message="同步类型只支持:replace|complete", code=500) + message=_('The synchronization type only supports:replace|complete'), code=500) ]) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) first = QuerySet(DataSet).filter(id=self.data.get("id")).first() if first is None: - raise AppApiException(300, "id不存在") + raise AppApiException(300, _('id does not exist')) if first.type != Type.web: - raise AppApiException(500, "只有web站点类型才支持同步") + raise AppApiException(500, _('Synchronization is only supported for web site types')) def sync(self, with_valid=True): if with_valid: @@ -523,7 +640,9 @@ def handler(child_link: ChildLink, response: Fork.Response): document_name = child_link.tag.text if child_link.tag is not None and len( child_link.tag.text.strip()) > 0 else child_link.url paragraphs = get_split_model('web.md').parse(response.content) - first = QuerySet(Document).filter(meta__source_url=child_link.url, dataset=dataset).first() + print(child_link.url.strip()) + first = QuerySet(Document).filter(meta__source_url=child_link.url.strip(), + dataset=dataset).first() if first is not None: # 如果存在,使用文档同步 DocumentSerializers.Sync(data={'document_id': first.id}).sync() @@ -531,7 +650,8 @@ def handler(child_link: ChildLink, response: Fork.Response): # 插入 DocumentSerializers.Create(data={'dataset_id': dataset.id}).save( {'name': document_name, 'paragraphs': paragraphs, - 'meta': {'source_url': child_link.url, 'selector': dataset.meta.get('selector')}, + 'meta': {'source_url': child_link.url.strip(), + 'selector': dataset.meta.get('selector')}, 'type': Type.web}, with_valid=True) except Exception as e: logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') @@ -545,9 +665,7 @@ def replace_sync(self, dataset): """ url = dataset.meta.get('source_url') selector = dataset.meta.get('selector') if 'selector' in dataset.meta else None - ListenerManagement.sync_web_dataset_signal.send( - SyncWebDatasetArgs(str(dataset.id), url, selector, - self.get_sync_handler(dataset))) + sync_replace_web_dataset.delay(str(dataset.id), url, selector) def complete_sync(self, dataset): """ @@ -561,7 +679,7 @@ def complete_sync(self, dataset): # 删除段落 QuerySet(Paragraph).filter(dataset=dataset).delete() # 删除向量 - ListenerManagement.delete_embedding_by_dataset_signal.send(self.data.get('id')) + delete_embedding_by_dataset(self.data.get('id')) # 同步 self.replace_sync(dataset) @@ -571,24 +689,96 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), openapi.Parameter(name='sync_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='同步类型->replace:替换同步,complete:完整同步') + description=_( + 'Synchronization type->replace: replacement synchronization, complete: complete synchronization')) ] class Operate(ApiMixin, serializers.Serializer): id = serializers.CharField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char( - "用户id")) + _('user id'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) if not QuerySet(DataSet).filter(id=self.data.get("id")).exists(): - raise AppApiException(300, "id不存在") + raise AppApiException(300, _('id does not exist')) + + def export_excel(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document_list = QuerySet(Document).filter(dataset_id=self.data.get('id')) + paragraph_list = native_search(QuerySet(Paragraph).filter(dataset_id=self.data.get("id")), get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_paragraph_document_name.sql'))) + problem_mapping_list = native_search( + QuerySet(ProblemParagraphMapping).filter(dataset_id=self.data.get("id")), get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')), + with_table_name=True) + data_dict, document_dict = DocumentSerializers.Operate.merge_problem(paragraph_list, problem_mapping_list, + document_list) + workbook = DocumentSerializers.Operate.get_workbook(data_dict, document_dict) + response = HttpResponse(content_type='application/vnd.ms-excel') + response['Content-Disposition'] = 'attachment; filename="dataset.xlsx"' + workbook.save(response) + return response + + def export_zip(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document_list = QuerySet(Document).filter(dataset_id=self.data.get('id')) + paragraph_list = native_search(QuerySet(Paragraph).filter(dataset_id=self.data.get("id")), get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_paragraph_document_name.sql'))) + problem_mapping_list = native_search( + QuerySet(ProblemParagraphMapping).filter(dataset_id=self.data.get("id")), get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')), + with_table_name=True) + data_dict, document_dict = DocumentSerializers.Operate.merge_problem(paragraph_list, problem_mapping_list, + document_list) + res = [parse_image(paragraph.get('content')) for paragraph in paragraph_list] + + workbook = DocumentSerializers.Operate.get_workbook(data_dict, document_dict) + response = HttpResponse(content_type='application/zip') + response['Content-Disposition'] = 'attachment; filename="archive.zip"' + zip_buffer = io.BytesIO() + with TemporaryDirectory() as tempdir: + dataset_file = os.path.join(tempdir, 'dataset.xlsx') + workbook.save(dataset_file) + for r in res: + write_image(tempdir, r) + zip_dir(tempdir, zip_buffer) + response.write(zip_buffer.getvalue()) + return response + + @staticmethod + def merge_problem(paragraph_list: List[Dict], problem_mapping_list: List[Dict]): + result = {} + document_dict = {} + + for paragraph in paragraph_list: + problem_list = [problem_mapping.get('content') for problem_mapping in problem_mapping_list if + problem_mapping.get('paragraph_id') == paragraph.get('id')] + document_sheet = result.get(paragraph.get('document_id')) + d = document_dict.get(paragraph.get('document_name')) + if d is None: + document_dict[paragraph.get('document_name')] = {paragraph.get('document_id')} + else: + d.add(paragraph.get('document_id')) + + if document_sheet is None: + result[paragraph.get('document_id')] = [[paragraph.get('title'), paragraph.get('content'), + '\n'.join(problem_list)]] + else: + document_sheet.append([paragraph.get('title'), paragraph.get('content'), '\n'.join(problem_list)]) + result_document_dict = {} + for d_name in document_dict: + for index, d_id in enumerate(document_dict.get(d_name)): + result_document_dict[d_id] = d_name if index == 0 else d_name + str(index) + return result, result_document_dict @transaction.atomic def delete(self): @@ -599,9 +789,60 @@ def delete(self): QuerySet(Paragraph).filter(dataset=dataset).delete() QuerySet(Problem).filter(dataset=dataset).delete() dataset.delete() - ListenerManagement.delete_embedding_by_dataset_signal.send(self.data.get('id')) + delete_embedding_by_dataset(self.data.get('id')) return True + @transaction.atomic + def re_embedding(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + dataset_id = self.data.get('id') + dataset = QuerySet(DataSet).filter(id=dataset_id).first() + embedding_model_id = dataset.embedding_mode_id + dataset_user_id = dataset.user_id + embedding_model = QuerySet(Model).filter(id=embedding_model_id).first() + if embedding_model is None: + raise AppApiException(500, _('Model does not exist')) + if embedding_model.permission_type == 'PRIVATE' and dataset_user_id != embedding_model.user_id: + raise AppApiException(500, _('No permission to use this model') + f"{embedding_model.name}") + ListenerManagement.update_status(QuerySet(Document).filter(dataset_id=self.data.get('id')), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).filter(dataset_id=self.data.get('id')), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.get_aggregation_document_status_by_dataset_id(self.data.get('id'))() + embedding_model_id = get_embedding_model_id_by_dataset_id(self.data.get('id')) + try: + embedding_by_dataset.delay(dataset_id, embedding_model_id) + except AlreadyQueued as e: + raise AppApiException(500, _('Failed to send the vectorization task, please try again later!')) + + def generate_related(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + GenerateRelatedSerializer(data=instance).is_valid(raise_exception=True) + dataset_id = self.data.get('id') + model_id = instance.get("model_id") + prompt = instance.get("prompt") + state_list = instance.get('state_list') + ListenerManagement.update_status(QuerySet(Document).filter(dataset_id=dataset_id), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType.GENERATE_PROBLEM.value, + 1), + ).filter(task_type_status__in=state_list, dataset_id=dataset_id) + .values('id'), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.get_aggregation_document_status_by_dataset_id(dataset_id)() + try: + generate_related_by_dataset_id.delay(dataset_id, model_id, prompt, state_list) + except AlreadyQueued as e: + raise AppApiException(500, _('Failed to send the vectorization task, please try again later!')) + def list_application(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) @@ -622,7 +863,7 @@ def one(self, user_id, with_valid=True): ), 'team_member_permission_custom_sql': QuerySet( model=get_dynamics_model({'user_id': models.CharField(), 'team_member_permission.operate': ArrayField( - verbose_name="权限操作列表", + verbose_name=_('permission'), base_field=models.CharField(max_length=256, blank=True, choices=AuthOperate.choices, @@ -639,6 +880,7 @@ def one(self, user_id, with_valid=True): QuerySet(ApplicationDatasetMapping).filter( dataset_id=self.data.get('id'))]))} + @transaction.atomic def edit(self, dataset: Dict, user_id: str): """ 修改知识库 @@ -649,9 +891,11 @@ def edit(self, dataset: Dict, user_id: str): self.is_valid() if QuerySet(DataSet).filter(user_id=user_id, name=dataset.get('name')).exclude( id=self.data.get('id')).exists(): - raise AppApiException(500, "知识库名称重复!") + raise AppApiException(500, _('Knowledge base name duplicate!')) _dataset = QuerySet(DataSet).get(id=self.data.get("id")) DataSetSerializers.Edit(data=dataset).is_valid(dataset=_dataset) + if 'embedding_mode_id' in dataset: + _dataset.embedding_mode_id = dataset.get('embedding_mode_id') if "name" in dataset: _dataset.name = dataset.get("name") if 'desc' in dataset: @@ -665,7 +909,9 @@ def edit(self, dataset: Dict, user_id: str): self.list_application(with_valid=False)] for dataset_id in application_id_list: if not application_dataset_id_list.__contains__(dataset_id): - raise AppApiException(500, f"未知的应用id${dataset_id},无法关联") + raise AppApiException(500, + _('Unknown application id {dataset_id}, cannot be associated').format( + dataset_id=dataset_id)) # 删除已经关联的id QuerySet(ApplicationDatasetMapping).filter(application_id__in=application_dataset_id_list, @@ -687,12 +933,15 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['name', 'desc'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="知识库名称", description="知识库名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="知识库描述", description="知识库描述"), - 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title="知识库元数据", - description="知识库元数据->web:{source_url:xxx,selector:'xxx'},base:{}"), - 'application_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="应用id列表", - description="应用id列表", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description')), + 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title=_('meta'), + description=_( + 'Knowledge base metadata->web:{source_url:xxx,selector:\'xxx\'},base:{}')), + 'application_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('application id list'), + description=_('application id list'), items=openapi.Schema(type=openapi.TYPE_STRING)) } ) @@ -706,21 +955,21 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称", - description="名称", default="测试知识库"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="描述", - description="描述", default="测试知识库描述"), - 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户id", - description="所属用户id", default="user_xxxx"), - 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title="字符数", - description="字符数", default=10), - 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title="文档数量", - description="文档数量", default=1), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'), + description=_('dataset name'), default=_('dataset name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'), + description=_('dataset description'), default=_('dataset description')), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), + description=_('user id'), default="user_xxxx"), + 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'), + description=_('char length'), default=10), + 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'), + description=_('document count'), default=1), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ) } @@ -732,5 +981,5 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id') + description=_('dataset id')), ] diff --git a/apps/dataset/serializers/document_serializers.py b/apps/dataset/serializers/document_serializers.py index b659ea9781a..3b92a7e60fa 100644 --- a/apps/dataset/serializers/document_serializers.py +++ b/apps/dataset/serializers/document_serializers.py @@ -6,58 +6,127 @@ @date:2023/9/22 13:43 @desc: """ +import io import logging import os import re import traceback import uuid from functools import reduce +from tempfile import TemporaryDirectory from typing import List, Dict +import openpyxl +from celery_once import AlreadyQueued from django.core import validators -from django.db import transaction -from django.db.models import QuerySet +from django.db import transaction, models +from django.db.models import QuerySet, Count +from django.db.models.functions import Substr, Reverse +from django.http import HttpResponse +from django.utils.translation import get_language +from django.utils.translation import gettext_lazy as _, gettext, to_locale from drf_yasg import openapi +from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE from rest_framework import serializers +from xlwt import Utils -from common.db.search import native_search, native_page_search +from common.db.search import native_search, native_page_search, get_dynamics_model +from common.event import ListenerManagement from common.event.common import work_thread_pool -from common.event.listener_manage import ListenerManagement, SyncWebDocumentArgs, UpdateEmbeddingDatasetIdArgs from common.exception.app_exception import AppApiException +from common.handle.impl.csv_split_handle import CsvSplitHandle from common.handle.impl.doc_split_handle import DocSplitHandle +from common.handle.impl.html_split_handle import HTMLSplitHandle from common.handle.impl.pdf_split_handle import PdfSplitHandle +from common.handle.impl.qa.csv_parse_qa_handle import CsvParseQAHandle +from common.handle.impl.qa.xls_parse_qa_handle import XlsParseQAHandle +from common.handle.impl.qa.xlsx_parse_qa_handle import XlsxParseQAHandle +from common.handle.impl.qa.zip_parse_qa_handle import ZipParseQAHandle +from common.handle.impl.table.csv_parse_table_handle import CsvSplitHandle as CsvSplitTableHandle +from common.handle.impl.table.xls_parse_table_handle import XlsSplitHandle as XlsSplitTableHandle +from common.handle.impl.table.xlsx_parse_table_handle import XlsxSplitHandle as XlsxSplitTableHandle from common.handle.impl.text_split_handle import TextSplitHandle +from common.handle.impl.xls_split_handle import XlsSplitHandle +from common.handle.impl.xlsx_split_handle import XlsxSplitHandle +from common.handle.impl.zip_split_handle import ZipSplitHandle from common.mixins.api_mixin import ApiMixin -from common.util.common import post +from common.util.common import post, flat_map, bulk_create_in_batches, parse_image from common.util.field_message import ErrMessage from common.util.file_util import get_file_content from common.util.fork import Fork from common.util.split_model import get_split_model -from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, Status, ProblemParagraphMapping, Image -from dataset.serializers.common_serializers import BatchSerializer, MetaSerializer +from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, ProblemParagraphMapping, Image, \ + TaskType, State +from dataset.serializers.common_serializers import BatchSerializer, MetaSerializer, ProblemParagraphManage, \ + get_embedding_model_id_by_dataset_id, write_image, zip_dir from dataset.serializers.paragraph_serializers import ParagraphSerializers, ParagraphInstanceSerializer +from dataset.task import sync_web_document, generate_related_by_document_id +from embedding.task.embedding import embedding_by_document, delete_embedding_by_document_list, \ + delete_embedding_by_document, update_embedding_dataset_id, delete_embedding_by_paragraph_ids, \ + embedding_by_document_list +from setting.models import Model from smartdoc.conf import PROJECT_DIR +parse_qa_handle_list = [XlsParseQAHandle(), CsvParseQAHandle(), XlsxParseQAHandle(), ZipParseQAHandle()] +parse_table_handle_list = [CsvSplitTableHandle(), XlsSplitTableHandle(), XlsxSplitTableHandle()] + + +class FileBufferHandle: + buffer = None + + def get_buffer(self, file): + if self.buffer is None: + self.buffer = file.read() + return self.buffer + + +class BatchCancelInstanceSerializer(serializers.Serializer): + id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True), + error_messages=ErrMessage.char(_('id list'))) + type = serializers.IntegerField(required=True, error_messages=ErrMessage.integer( + _('task type'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + _type = self.data.get('type') + try: + TaskType(_type) + except Exception as e: + raise AppApiException(500, _('task type not support')) + + +class CancelInstanceSerializer(serializers.Serializer): + type = serializers.IntegerField(required=True, error_messages=ErrMessage.integer( + _('task type'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + _type = self.data.get('type') + try: + TaskType(_type) + except Exception as e: + raise AppApiException(500, _('task type not support')) + class DocumentEditInstanceSerializer(ApiMixin, serializers.Serializer): meta = serializers.DictField(required=False) name = serializers.CharField(required=False, max_length=128, min_length=1, error_messages=ErrMessage.char( - "文档名称")) + _('document name'))) hit_handling_method = serializers.CharField(required=False, validators=[ validators.RegexValidator(regex=re.compile("^optimization|directly_return$"), - message="类型只支持optimization|directly_return", + message=_('The type only supports optimization|directly_return'), code=500) - ], error_messages=ErrMessage.char("命中处理方式")) + ], error_messages=ErrMessage.char(_('hit handling method'))) directly_return_similarity = serializers.FloatField(required=False, max_value=2, min_value=0, error_messages=ErrMessage.float( - "直接返回分数")) + _('directly return similarity'))) is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean( - "文档是否可用")) + _('document is active'))) @staticmethod def get_meta_valid_map(): @@ -78,12 +147,27 @@ def is_valid(self, *, document: Document = None): class DocumentWebInstanceSerializer(ApiMixin, serializers.Serializer): source_url_list = serializers.ListField(required=True, child=serializers.CharField(required=True, error_messages=ErrMessage.char( - "文档地址")), + _('document url list'))), error_messages=ErrMessage.char( - "文档地址列表")) + _('document url list'))) selector = serializers.CharField(required=False, allow_null=True, allow_blank=True, error_messages=ErrMessage.char( - "选择器")) + _('selector'))) + + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='file', + in_=openapi.IN_FORM, + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_FILE), + required=True, + description=_('file')), + openapi.Parameter(name='dataset_id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('dataset id')), + ] @staticmethod def get_request_body_api(): @@ -91,16 +175,17 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['source_url_list'], properties={ - 'source_url_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="段落列表", description="段落列表", + 'source_url_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('source url list'), + description=_('source url list'), items=openapi.Schema(type=openapi.TYPE_STRING)), - 'selector': openapi.Schema(type=openapi.TYPE_STRING, title="文档名称", description="文档名称") + 'selector': openapi.Schema(type=openapi.TYPE_STRING, title=_('selector'), description=_('selector')) } ) class DocumentInstanceSerializer(ApiMixin, serializers.Serializer): name = serializers.CharField(required=True, - error_messages=ErrMessage.char("文档名称"), + error_messages=ErrMessage.char(_('document name')), max_length=128, min_length=1) @@ -112,24 +197,100 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['name', 'paragraphs'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="文档名称", description="文档名称"), - 'paragraphs': openapi.Schema(type=openapi.TYPE_ARRAY, title="段落列表", description="段落列表", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('document name'), + description=_('document name')), + 'paragraphs': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('paragraphs'), + description=_('paragraphs'), items=ParagraphSerializers.Create.get_request_body_api()) } ) +class DocumentInstanceQASerializer(ApiMixin, serializers.Serializer): + file_list = serializers.ListSerializer(required=True, + error_messages=ErrMessage.list(_('file list')), + child=serializers.FileField(required=True, + error_messages=ErrMessage.file(_('file')))) + + +class DocumentInstanceTableSerializer(ApiMixin, serializers.Serializer): + file_list = serializers.ListSerializer(required=True, + error_messages=ErrMessage.list(_('file list')), + child=serializers.FileField(required=True, + error_messages=ErrMessage.file(_('file')))) + + class DocumentSerializers(ApiMixin, serializers.Serializer): + class Export(ApiMixin, serializers.Serializer): + type = serializers.CharField(required=True, validators=[ + validators.RegexValidator(regex=re.compile("^csv|excel$"), + message=_('The template type only supports excel|csv'), + code=500) + ], error_messages=ErrMessage.char(_('type'))) + + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='type', + in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=True, + description=_('Export template type csv|excel')), + + ] + + def export(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + language = get_language() + if self.data.get('type') == 'csv': + file = open( + os.path.join(PROJECT_DIR, "apps", "dataset", 'template', f'csv_template_{to_locale(language)}.csv'), + "rb") + content = file.read() + file.close() + return HttpResponse(content, status=200, headers={'Content-Type': 'text/csv', + 'Content-Disposition': 'attachment; filename="csv_template.csv"'}) + elif self.data.get('type') == 'excel': + file = open(os.path.join(PROJECT_DIR, "apps", "dataset", 'template', + f'excel_template_{to_locale(language)}.xlsx'), "rb") + content = file.read() + file.close() + return HttpResponse(content, status=200, headers={'Content-Type': 'application/vnd.ms-excel', + 'Content-Disposition': 'attachment; filename="excel_template.xlsx"'}) + + def table_export(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + language = get_language() + if self.data.get('type') == 'csv': + file = open( + os.path.join(PROJECT_DIR, "apps", "dataset", 'template', + f'table_template_{to_locale(language)}.csv'), + "rb") + content = file.read() + file.close() + return HttpResponse(content, status=200, headers={'Content-Type': 'text/cxv', + 'Content-Disposition': 'attachment; filename="csv_template.csv"'}) + elif self.data.get('type') == 'excel': + file = open(os.path.join(PROJECT_DIR, "apps", "dataset", 'template', + f'table_template_{to_locale(language)}.xlsx'), + "rb") + content = file.read() + file.close() + return HttpResponse(content, status=200, headers={'Content-Type': 'application/vnd.ms-excel', + 'Content-Disposition': 'attachment; filename="excel_template.xlsx"'}) + class Migrate(ApiMixin, serializers.Serializer): dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) target_dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "目标知识库id")) - document_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char("文档列表"), + _('target dataset id'))) + document_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char(_('document list')), child=serializers.UUIDField(required=True, - error_messages=ErrMessage.uuid("文档id"))) + error_messages=ErrMessage.uuid( + _('document id')))) @transaction.atomic def migrate(self, with_valid=True): @@ -172,12 +333,27 @@ def migrate(self, with_valid=True): meta={}) else: document_list.update(dataset_id=target_dataset_id) - # 修改向量信息 - ListenerManagement.update_embedding_dataset_id(UpdateEmbeddingDatasetIdArgs( - [paragraph.id for paragraph in paragraph_list], - target_dataset_id)) + model_id = None + if dataset.embedding_mode_id != target_dataset.embedding_mode_id: + model_id = get_embedding_model_id_by_dataset_id(target_dataset_id) + + pid_list = [paragraph.id for paragraph in paragraph_list] # 修改段落信息 paragraph_list.update(dataset_id=target_dataset_id) + # 修改向量信息 + if model_id: + delete_embedding_by_paragraph_ids(pid_list) + ListenerManagement.update_status(QuerySet(Document).filter(id__in=document_id_list), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id__in=document_id_list), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.get_aggregation_document_status_by_query_set( + QuerySet(Document).filter(id__in=document_id_list))() + embedding_by_document_list.delay(document_id_list, model_id) + else: + update_embedding_dataset_id(pid_list, target_dataset_id) @staticmethod def get_target_dataset_problem(target_dataset_id: str, @@ -207,12 +383,12 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('document id')), openapi.Parameter(name='target_dataset_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='目标知识库id') + description=_('target document id')) ] @staticmethod @@ -220,21 +396,26 @@ def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title='文档id列表', - description="文档id列表" + title=_('document id list'), + description=_('document id list') ) class Query(ApiMixin, serializers.Serializer): # 知识库id dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) name = serializers.CharField(required=False, max_length=128, min_length=1, error_messages=ErrMessage.char( - "文档名称")) - hit_handling_method = serializers.CharField(required=False, error_messages=ErrMessage.char("命中处理方式")) + _('document name'))) + hit_handling_method = serializers.CharField(required=False, + error_messages=ErrMessage.char(_('hit handling method'))) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('document is active'))) + task_type = serializers.IntegerField(required=False, error_messages=ErrMessage.integer(_('task type'))) + status = serializers.CharField(required=False, error_messages=ErrMessage.char(_('status'))) + order_by = serializers.CharField(required=False, error_messages=ErrMessage.char(_('order by'))) def get_query_set(self): query_set = QuerySet(model=Document) @@ -243,8 +424,36 @@ def get_query_set(self): query_set = query_set.filter(**{'name__icontains': self.data.get('name')}) if 'hit_handling_method' in self.data and self.data.get('hit_handling_method') is not None: query_set = query_set.filter(**{'hit_handling_method': self.data.get('hit_handling_method')}) - query_set = query_set.order_by('-create_time') - return query_set + if 'is_active' in self.data and self.data.get('is_active') is not None: + query_set = query_set.filter(**{'is_active': self.data.get('is_active')}) + if 'status' in self.data and self.data.get( + 'status') is not None: + task_type = self.data.get('task_type') + status = self.data.get( + 'status') + if task_type is not None: + query_set = query_set.annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType(task_type).value, + 1), + ).filter(task_type_status=State(status).value).values('id') + else: + if status != State.SUCCESS.value: + query_set = query_set.filter(status__icontains=status) + else: + query_set = query_set.filter(status__iregex='^[2n]*$') + order_by = self.data.get('order_by', '') + order_by_query_set = QuerySet(model=get_dynamics_model( + {'char_length': models.CharField(), 'paragraph_count': models.IntegerField(), + "update_time": models.IntegerField(), 'create_time': models.DateTimeField()})) + if order_by: + order_by_query_set = order_by_query_set.order_by(order_by) + else: + order_by_query_set = order_by_query_set.order_by('-create_time', 'id') + return { + 'document_custom_sql': query_set, + 'order_by_query': order_by_query_set + } def list(self, with_valid=False): if with_valid: @@ -264,41 +473,44 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='文档名称'), + description=_('document name')), openapi.Parameter(name='hit_handling_method', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='文档命中处理方式')] + description=_('hit handling method')), ] @staticmethod def get_response_body_api(): return openapi.Schema(type=openapi.TYPE_ARRAY, - title="文档列表", description="文档列表", + title=_('document list'), description=_('document list'), items=DocumentSerializers.Operate.get_response_body_api()) class Sync(ApiMixin, serializers.Serializer): document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "文档id")) + _('document id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) document_id = self.data.get('document_id') first = QuerySet(Document).filter(id=document_id).first() if first is None: - raise AppApiException(500, "文档id不存在") + raise AppApiException(500, _('document id not exist')) if first.type != Type.web: - raise AppApiException(500, "只有web站点类型才支持同步") + raise AppApiException(500, _('Synchronization is only supported for web site types')) def sync(self, with_valid=True, with_embedding=True): if with_valid: self.is_valid(raise_exception=True) document_id = self.data.get('document_id') document = QuerySet(Document).filter(id=document_id).first() + state = State.SUCCESS if document.type != Type.web: return True try: - document.status = Status.embedding - document.save() + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.SYNC, + State.PENDING) + ListenerManagement.get_aggregation_document_status(document_id)() source_url = document.meta.get('source_url') selector_list = document.meta.get('selector').split( " ") if 'selector' in document.meta and document.meta.get('selector') is not None else [] @@ -308,18 +520,20 @@ def sync(self, with_valid=True, with_embedding=True): QuerySet(model=Paragraph).filter(document_id=document_id).delete() # 删除问题 QuerySet(model=ProblemParagraphMapping).filter(document_id=document_id).delete() + delete_problems_and_mappings([document_id]) # 删除向量库 - ListenerManagement.delete_embedding_by_document_signal.send(document_id) + delete_embedding_by_document(document_id) paragraphs = get_split_model('web.md').parse(result.content) - document.char_length = reduce(lambda x, y: x + y, - [len(p.get('content')) for p in paragraphs], - 0) - document.save() + char_length = reduce(lambda x, y: x + y, + [len(p.get('content')) for p in paragraphs], + 0) + QuerySet(Document).filter(id=document_id).update(char_length=char_length) document_paragraph_model = DocumentSerializers.Create.get_paragraph_model(document, paragraphs) paragraph_model_list = document_paragraph_model.get('paragraph_model_list') - problem_model_list = document_paragraph_model.get('problem_model_list') - problem_paragraph_mapping_list = document_paragraph_model.get('problem_paragraph_mapping_list') + problem_paragraph_object_list = document_paragraph_model.get('problem_paragraph_object_list') + problem_model_list, problem_paragraph_mapping_list = ProblemParagraphManage( + problem_paragraph_object_list, document.dataset_id).to_problem_model_list() # 批量插入段落 QuerySet(Paragraph).bulk_create(paragraph_model_list) if len(paragraph_model_list) > 0 else None # 批量插入问题 @@ -329,19 +543,34 @@ def sync(self, with_valid=True, with_embedding=True): problem_paragraph_mapping_list) > 0 else None # 向量化 if with_embedding: - ListenerManagement.embedding_by_document_signal.send(document_id) + embedding_model_id = get_embedding_model_id_by_dataset_id(document.dataset_id) + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.get_aggregation_document_status(document_id)() + embedding_by_document.delay(document_id, embedding_model_id) + else: - document.status = Status.error - document.save() + state = State.FAILURE except Exception as e: logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') - document.status = Status.error - document.save() + state = State.FAILURE + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.SYNC, + state) + ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id), + TaskType.SYNC, + state) + ListenerManagement.get_aggregation_document_status(document_id)() return True class Operate(ApiMixin, serializers.Serializer): document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "文档id")) + _('document id'))) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('dataset id'))) @staticmethod def get_request_params_api(): @@ -349,26 +578,148 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('document id')), openapi.Parameter(name='document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id') + description=_('document id')) ] def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) document_id = self.data.get('document_id') if not QuerySet(Document).filter(id=document_id).exists(): - raise AppApiException(500, "文档id不存在") + raise AppApiException(500, _('document id not exist')) + + def export(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document = QuerySet(Document).filter(id=self.data.get("document_id")).first() + paragraph_list = native_search(QuerySet(Paragraph).filter(document_id=self.data.get("document_id")), + get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', + 'list_paragraph_document_name.sql'))) + problem_mapping_list = native_search( + QuerySet(ProblemParagraphMapping).filter(document_id=self.data.get("document_id")), get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')), + with_table_name=True) + data_dict, document_dict = self.merge_problem(paragraph_list, problem_mapping_list, [document]) + workbook = self.get_workbook(data_dict, document_dict) + response = HttpResponse(content_type='application/vnd.ms-excel') + response['Content-Disposition'] = f'attachment; filename="data.xlsx"' + workbook.save(response) + return response + + def export_zip(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document = QuerySet(Document).filter(id=self.data.get("document_id")).first() + paragraph_list = native_search(QuerySet(Paragraph).filter(document_id=self.data.get("document_id")), + get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', + 'list_paragraph_document_name.sql'))) + problem_mapping_list = native_search( + QuerySet(ProblemParagraphMapping).filter(document_id=self.data.get("document_id")), get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')), + with_table_name=True) + data_dict, document_dict = self.merge_problem(paragraph_list, problem_mapping_list, [document]) + res = [parse_image(paragraph.get('content')) for paragraph in paragraph_list] + + workbook = DocumentSerializers.Operate.get_workbook(data_dict, document_dict) + response = HttpResponse(content_type='application/zip') + response['Content-Disposition'] = 'attachment; filename="archive.zip"' + zip_buffer = io.BytesIO() + with TemporaryDirectory() as tempdir: + dataset_file = os.path.join(tempdir, 'dataset.xlsx') + workbook.save(dataset_file) + for r in res: + write_image(tempdir, r) + zip_dir(tempdir, zip_buffer) + response.write(zip_buffer.getvalue()) + return response + + @staticmethod + def get_workbook(data_dict, document_dict): + # 创建工作簿对象 + workbook = openpyxl.Workbook() + workbook.remove_sheet(workbook.active) + if len(data_dict.keys()) == 0: + data_dict['sheet'] = [] + for sheet_id in data_dict: + # 添加工作表 + worksheet = workbook.create_sheet(document_dict.get(sheet_id)) + data = [ + [gettext('Section title (optional)'), + gettext('Section content (required, question answer, no more than 4096 characters)'), + gettext('Question (optional, one per line in the cell)')], + *data_dict.get(sheet_id, []) + ] + # 写入数据到工作表 + for row_idx, row in enumerate(data): + for col_idx, col in enumerate(row): + cell = worksheet.cell(row=row_idx + 1, column=col_idx + 1) + if isinstance(col, str): + col = re.sub(ILLEGAL_CHARACTERS_RE, '', col) + if col.startswith(('=', '+', '-', '@')): + col = '\ufeff' + col + cell.value = col + # 创建HttpResponse对象返回Excel文件 + return workbook + + @staticmethod + def merge_problem(paragraph_list: List[Dict], problem_mapping_list: List[Dict], document_list): + result = {} + document_dict = {} + + for paragraph in paragraph_list: + problem_list = [problem_mapping.get('content') for problem_mapping in problem_mapping_list if + problem_mapping.get('paragraph_id') == paragraph.get('id')] + document_sheet = result.get(paragraph.get('document_id')) + document_name = DocumentSerializers.Operate.reset_document_name(paragraph.get('document_name')) + d = document_dict.get(document_name) + if d is None: + document_dict[document_name] = {paragraph.get('document_id')} + else: + d.add(paragraph.get('document_id')) + + if document_sheet is None: + result[paragraph.get('document_id')] = [[paragraph.get('title'), paragraph.get('content'), + '\n'.join(problem_list)]] + else: + document_sheet.append([paragraph.get('title'), paragraph.get('content'), '\n'.join(problem_list)]) + for document in document_list: + if document.id not in result: + document_name = DocumentSerializers.Operate.reset_document_name(document.name) + result[document.id] = [[]] + d = document_dict.get(document_name) + if d is None: + document_dict[document_name] = {document.id} + else: + d.add(document.id) + result_document_dict = {} + for d_name in document_dict: + for index, d_id in enumerate(document_dict.get(d_name)): + result_document_dict[d_id] = d_name if index == 0 else d_name + str(index) + return result, result_document_dict + + @staticmethod + def reset_document_name(document_name): + if document_name is not None: + document_name = document_name.strip()[0:29] + if document_name is None or not Utils.valid_sheet_name(document_name): + return "Sheet" + return document_name.strip() def one(self, with_valid=False): if with_valid: self.is_valid(raise_exception=True) query_set = QuerySet(model=Document) query_set = query_set.filter(**{'id': self.data.get("document_id")}) - return native_search(query_set, select_string=get_file_content( + return native_search({ + 'document_custom_sql': query_set, + 'order_by_query': QuerySet(Document).order_by('-create_time', 'id') + }, select_string=get_file_content( os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_document.sql')), with_search_one=True) def edit(self, instance: Dict, with_valid=False): @@ -384,21 +735,61 @@ def edit(self, instance: Dict, with_valid=False): _document.save() return self.one() - def refresh(self, with_valid=True): + def refresh(self, state_list=None, with_valid=True): + if state_list is None: + state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value, + State.REVOKE.value, + State.REVOKED.value, State.IGNORED.value] if with_valid: self.is_valid(raise_exception=True) + dataset = QuerySet(DataSet).filter(id=self.data.get('dataset_id')).first() + embedding_model_id = dataset.embedding_mode_id + dataset_user_id = dataset.user_id + embedding_model = QuerySet(Model).filter(id=embedding_model_id).first() + if embedding_model is None: + raise AppApiException(500, _('Model does not exist')) + if embedding_model.permission_type == 'PRIVATE' and dataset_user_id != embedding_model.user_id: + raise AppApiException(500, _('No permission to use this model') + f"{embedding_model.name}") document_id = self.data.get("document_id") - document = QuerySet(Document).filter(id=document_id).first() - if document.type == Type.web: - # 异步同步 - work_thread_pool.submit(lambda x: DocumentSerializers.Sync(data={'document_id': document_id}).sync(), - {}) + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType.EMBEDDING.value, + 1), + ).filter(task_type_status__in=state_list, document_id=document_id) + .values('id'), + TaskType.EMBEDDING, + State.PENDING) + ListenerManagement.get_aggregation_document_status(document_id)() + + try: + embedding_by_document.delay(document_id, embedding_model_id, state_list) + except AlreadyQueued as e: + raise AppApiException(500, _('The task is being executed, please do not send it repeatedly.')) + + def cancel(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + CancelInstanceSerializer(data=instance).is_valid() + document_id = self.data.get("document_id") + ListenerManagement.update_status(QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value, + 1), + ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter( + document_id=document_id).values('id'), + TaskType(instance.get('type')), + State.REVOKE) + ListenerManagement.update_status(QuerySet(Document).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value, + 1), + ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter( + id=document_id).values('id'), + TaskType(instance.get('type')), + State.REVOKE) - else: - if document.status != Status.embedding.value: - document.status = Status.embedding - document.save() - ListenerManagement.embedding_by_document_signal.send(document_id) return True @transaction.atomic @@ -408,9 +799,9 @@ def delete(self): # 删除段落 QuerySet(model=Paragraph).filter(document_id=document_id).delete() # 删除问题 - QuerySet(model=ProblemParagraphMapping).filter(document_id=document_id).delete() + delete_problems_and_mappings([document_id]) # 删除向量库 - ListenerManagement.delete_embedding_by_document_signal.send(document_id) + delete_embedding_by_document(document_id) return True @staticmethod @@ -422,20 +813,20 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称", - description="名称", default="测试知识库"), - 'char_length': openapi.Schema(type=openapi.TYPE_INTEGER, title="字符数", - description="字符数", default=10), - 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'paragraph_count': openapi.Schema(type=openapi.TYPE_INTEGER, title="文档数量", - description="文档数量", default=1), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", - description="是否可用", default=True), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('name'), + description=_('name'), default="xx"), + 'char_length': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('char length'), + description=_('char length'), default=10), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')), + 'paragraph_count': openapi.Schema(type=openapi.TYPE_INTEGER, title="_('document count')", + description="_('document count')", default=1), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active'), default=True), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ) } @@ -446,32 +837,70 @@ def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="文档名称", description="文档名称"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), - 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title="命中处理方式", - description="ai优化:optimization,直接返回:directly_return"), - 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title="直接返回分数", + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('document name'), + description=_('document name')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active')), + 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title=_('hit handling method'), + description=_( + 'ai optimization: optimization, direct return: directly_return')), + 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, + title=_('directly return similarity'), default=0.9), - 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title="文档元数据", - description="文档元数据->web:{source_url:xxx,selector:'xxx'},base:{}"), + 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title=_('meta'), + description=_( + 'Document metadata->web:{source_url:xxx,selector:\'xxx\'},base:{}')), } ) class Create(ApiMixin, serializers.Serializer): dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "文档id")) + _('document id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(DataSet).filter(id=self.data.get('dataset_id')).exists(): - raise AppApiException(10000, "知识库id不存在") + raise AppApiException(10000, _('dataset id not exist')) return True @staticmethod - def post_embedding(result, document_id): - ListenerManagement.embedding_by_document_signal.send(document_id) + def post_embedding(result, document_id, dataset_id): + DocumentSerializers.Operate( + data={'dataset_id': dataset_id, 'document_id': document_id}).refresh() return result + @staticmethod + def parse_qa_file(file): + get_buffer = FileBufferHandle().get_buffer + for parse_qa_handle in parse_qa_handle_list: + if parse_qa_handle.support(file, get_buffer): + return parse_qa_handle.handle(file, get_buffer, save_image) + raise AppApiException(500, _('Unsupported file format')) + + @staticmethod + def parse_table_file(file): + get_buffer = FileBufferHandle().get_buffer + for parse_table_handle in parse_table_handle_list: + if parse_table_handle.support(file, get_buffer): + return parse_table_handle.handle(file, get_buffer, save_image) + raise AppApiException(500, _('Unsupported file format')) + + def save_qa(self, instance: Dict, with_valid=True): + if with_valid: + DocumentInstanceQASerializer(data=instance).is_valid(raise_exception=True) + self.is_valid(raise_exception=True) + file_list = instance.get('file_list') + document_list = flat_map([self.parse_qa_file(file) for file in file_list]) + return DocumentSerializers.Batch(data={'dataset_id': self.data.get('dataset_id')}).batch_save(document_list) + + def save_table(self, instance: Dict, with_valid=True): + if with_valid: + DocumentInstanceTableSerializer(data=instance).is_valid(raise_exception=True) + self.is_valid(raise_exception=True) + file_list = instance.get('file_list') + document_list = flat_map([self.parse_table_file(file) for file in file_list]) + return DocumentSerializers.Batch(data={'dataset_id': self.data.get('dataset_id')}).batch_save(document_list) + @post(post_function=post_embedding) @transaction.atomic def save(self, instance: Dict, with_valid=False, **kwargs): @@ -480,11 +909,13 @@ def save(self, instance: Dict, with_valid=False, **kwargs): self.is_valid(raise_exception=True) dataset_id = self.data.get('dataset_id') document_paragraph_model = self.get_document_paragraph_model(dataset_id, instance) + document_model = document_paragraph_model.get('document') paragraph_model_list = document_paragraph_model.get('paragraph_model_list') - problem_model_list = document_paragraph_model.get('problem_model_list') - problem_paragraph_mapping_list = document_paragraph_model.get('problem_paragraph_mapping_list') - + problem_paragraph_object_list = document_paragraph_model.get('problem_paragraph_object_list') + problem_model_list, problem_paragraph_mapping_list = (ProblemParagraphManage(problem_paragraph_object_list, + dataset_id) + .to_problem_model_list()) # 插入文档 document_model.save() # 批量插入段落 @@ -497,29 +928,7 @@ def save(self, instance: Dict, with_valid=False, **kwargs): document_id = str(document_model.id) return DocumentSerializers.Operate( data={'dataset_id': dataset_id, 'document_id': document_id}).one( - with_valid=True), document_id - - @staticmethod - def get_sync_handler(dataset_id): - def handler(source_url: str, selector, response: Fork.Response): - if response.status == 200: - try: - paragraphs = get_split_model('web.md').parse(response.content) - # 插入 - DocumentSerializers.Create(data={'dataset_id': dataset_id}).save( - {'name': source_url, 'paragraphs': paragraphs, - 'meta': {'source_url': source_url, 'selector': selector}, - 'type': Type.web}, with_valid=True) - except Exception as e: - logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') - else: - Document(name=source_url, - meta={'source_url': source_url, 'selector': selector}, - type=Type.web, - char_length=0, - status=Status.error).save() - - return handler + with_valid=True), document_id, dataset_id def save_web(self, instance: Dict, with_valid=True): if with_valid: @@ -528,8 +937,7 @@ def save_web(self, instance: Dict, with_valid=True): dataset_id = self.data.get('dataset_id') source_url_list = instance.get('source_url_list') selector = instance.get('selector') - args = SyncWebDocumentArgs(source_url_list, selector, self.get_sync_handler(dataset_id)) - ListenerManagement.sync_web_document_signal.send(args) + sync_web_document.delay(dataset_id, source_url_list, selector) @staticmethod def get_paragraph_model(document_model, paragraph_list: List): @@ -539,35 +947,15 @@ def get_paragraph_model(document_model, paragraph_list: List): dataset_id, document_model.id, paragraph) for paragraph in paragraph_list] paragraph_model_list = [] - problem_model_list = [] - problem_paragraph_mapping_list = [] + problem_paragraph_object_list = [] for paragraphs in paragraph_model_dict_list: paragraph = paragraphs.get('paragraph') - for problem_model in paragraphs.get('problem_model_list'): - problem_model_list.append(problem_model) - for problem_paragraph_mapping in paragraphs.get('problem_paragraph_mapping_list'): - problem_paragraph_mapping_list.append(problem_paragraph_mapping) + for problem_model in paragraphs.get('problem_paragraph_object_list'): + problem_paragraph_object_list.append(problem_model) paragraph_model_list.append(paragraph) - problem_model_list, problem_paragraph_mapping_list = DocumentSerializers.Create.reset_problem_model( - problem_model_list, problem_paragraph_mapping_list) - return {'document': document_model, 'paragraph_model_list': paragraph_model_list, - 'problem_model_list': problem_model_list, - 'problem_paragraph_mapping_list': problem_paragraph_mapping_list} - - @staticmethod - def reset_problem_model(problem_model_list, problem_paragraph_mapping_list): - new_problem_model_list = [x for i, x in enumerate(problem_model_list) if - len([item for item in problem_model_list[:i] if item.content == x.content]) <= 0] - - for new_problem_model in new_problem_model_list: - old_model_list = [problem.id for problem in problem_model_list if - problem.content == new_problem_model.content] - for problem_paragraph_mapping in problem_paragraph_mapping_list: - if old_model_list.__contains__(problem_paragraph_mapping.problem_id): - problem_paragraph_mapping.problem_id = new_problem_model.id - return new_problem_model_list, problem_paragraph_mapping_list + 'problem_paragraph_object_list': problem_paragraph_object_list} @staticmethod def get_document_paragraph_model(dataset_id, instance: Dict): @@ -595,31 +983,31 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id') + description=_('document id')) ] class Split(ApiMixin, serializers.Serializer): file = serializers.ListField(required=True, error_messages=ErrMessage.list( - "文件列表")) + _('file list'))) limit = serializers.IntegerField(required=False, error_messages=ErrMessage.integer( - "分段长度")) + _('limit'))) patterns = serializers.ListField(required=False, child=serializers.CharField(required=True, error_messages=ErrMessage.char( - "分段标识")), - error_messages=ErrMessage.uuid( - "分段标识列表")) + _('patterns'))), + error_messages=ErrMessage.list( + _('patterns'))) with_filter = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean( - "自动清洗")) + _('Auto Clean'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) files = self.data.get('file') for f in files: if f.size > 1024 * 1024 * 100: - raise AppApiException(500, "上传文件最大不能超过100MB") + raise AppApiException(500, _('The maximum size of the uploaded file cannot exceed 100MB')) @staticmethod def get_request_params_api(): @@ -629,27 +1017,28 @@ def get_request_params_api(): type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_FILE), required=True, - description='上传文件'), + description=_('file list')), openapi.Parameter(name='limit', in_=openapi.IN_FORM, required=False, - type=openapi.TYPE_INTEGER, title="分段长度", description="分段长度"), + type=openapi.TYPE_INTEGER, title=_('limit'), description=_('limit')), openapi.Parameter(name='patterns', in_=openapi.IN_FORM, required=False, type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_STRING), - title="分段正则列表", description="分段正则列表"), + title=_('Segmented regular list'), description=_('Segmented regular list')), openapi.Parameter(name='with_filter', in_=openapi.IN_FORM, required=False, - type=openapi.TYPE_BOOLEAN, title="是否清除特殊字符", description="是否清除特殊字符"), + type=openapi.TYPE_BOOLEAN, title=_('Whether to clear special characters'), + description=_('Whether to clear special characters')), ] def parse(self): file_list = self.data.get("file") - return list( - map(lambda f: file_to_paragraph(f, self.data.get("patterns", None), self.data.get("with_filter", None), - self.data.get("limit", None)), file_list)) + return reduce(lambda x, y: [*x, *y], + [file_to_paragraph(f, self.data.get("patterns", None), self.data.get("with_filter", None), + self.data.get("limit", 4096)) for f in file_list], []) class SplitPattern(ApiMixin, serializers.Serializer): @staticmethod @@ -661,22 +1050,23 @@ def list(): {'key': '#####', 'value': "(?<=\\n)(? 0 else None # 批量插入段落 - QuerySet(Paragraph).bulk_create(paragraph_model_list) if len(paragraph_model_list) > 0 else None + bulk_create_in_batches(Paragraph, paragraph_model_list, batch_size=1000) # 批量插入问题 - QuerySet(Problem).bulk_create(problem_model_list) if len(problem_model_list) > 0 else None + bulk_create_in_batches(Problem, problem_model_list, batch_size=1000) # 批量插入关联问题 - QuerySet(ProblemParagraphMapping).bulk_create(problem_paragraph_mapping_list) if len( - problem_paragraph_mapping_list) > 0 else None + bulk_create_in_batches(ProblemParagraphMapping, problem_paragraph_mapping_list, batch_size=1000) # 查询文档 query_set = QuerySet(model=Document) + if len(document_model_list) == 0: + return [], dataset_id query_set = query_set.filter(**{'id__in': [d.id for d in document_model_list]}) - return native_search(query_set, select_string=get_file_content( - os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_document.sql')), with_search_one=False), + return native_search({ + 'document_custom_sql': query_set, + 'order_by_query': QuerySet(Document).order_by('-create_time', 'id') + }, select_string=get_file_content( + os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_document.sql')), + with_search_one=False), dataset_id @staticmethod def _batch_sync(document_id_list: List[str]): @@ -739,19 +1134,41 @@ def batch_delete(self, instance: Dict, with_valid=True): document_id_list = instance.get("id_list") QuerySet(Document).filter(id__in=document_id_list).delete() QuerySet(Paragraph).filter(document_id__in=document_id_list).delete() - QuerySet(ProblemParagraphMapping).filter(document_id__in=document_id_list).delete() + delete_problems_and_mappings(document_id_list) # 删除向量库 - ListenerManagement.delete_embedding_by_document_list_signal.send(document_id_list) + delete_embedding_by_document_list(document_id_list) return True + def batch_cancel(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + BatchCancelInstanceSerializer(data=instance).is_valid(raise_exception=True) + document_id_list = instance.get("id_list") + ListenerManagement.update_status(QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value, + 1), + ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter( + document_id__in=document_id_list).values('id'), + TaskType(instance.get('type')), + State.REVOKE) + ListenerManagement.update_status(QuerySet(Document).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value, + 1), + ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter( + id__in=document_id_list).values('id'), + TaskType(instance.get('type')), + State.REVOKE) + def batch_edit_hit_handling(self, instance: Dict, with_valid=True): if with_valid: BatchSerializer(data=instance).is_valid(model=Document, raise_exception=True) hit_handling_method = instance.get('hit_handling_method') if hit_handling_method is None: - raise AppApiException(500, '命中处理方式必填') + raise AppApiException(500, _('Hit handling method is required')) if hit_handling_method != 'optimization' and hit_handling_method != 'directly_return': - raise AppApiException(500, '命中处理方式必须为directly_return|optimization') + raise AppApiException(500, _('The hit processing method must be directly_return|optimization')) self.is_valid(raise_exception=True) document_id_list = instance.get("id_list") hit_handling_method = instance.get('hit_handling_method') @@ -761,6 +1178,73 @@ def batch_edit_hit_handling(self, instance: Dict, with_valid=True): update_dict['directly_return_similarity'] = directly_return_similarity QuerySet(Document).filter(id__in=document_id_list).update(**update_dict) + def batch_refresh(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document_id_list = instance.get("id_list") + state_list = instance.get("state_list") + dataset_id = self.data.get('dataset_id') + for document_id in document_id_list: + try: + DocumentSerializers.Operate( + data={'dataset_id': dataset_id, 'document_id': document_id}).refresh(state_list) + except AlreadyQueued as e: + pass + + class GenerateRelated(ApiMixin, serializers.Serializer): + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + document_id = self.data.get('document_id') + if not QuerySet(Document).filter(id=document_id).exists(): + raise AppApiException(500, _('document id not exist')) + + def generate_related(self, model_id, prompt, state_list=None, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document_id = self.data.get('document_id') + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.get_aggregation_document_status(document_id)() + try: + generate_related_by_document_id.delay(document_id, model_id, prompt, state_list) + except AlreadyQueued as e: + raise AppApiException(500, _('The task is being executed, please do not send it again.')) + + class BatchGenerateRelated(ApiMixin, serializers.Serializer): + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) + + def batch_generate_related(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + document_id_list = instance.get("document_id_list") + model_id = instance.get("model_id") + prompt = instance.get("prompt") + state_list = instance.get('state_list') + ListenerManagement.update_status(QuerySet(Document).filter(id__in=document_id_list), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType.GENERATE_PROBLEM.value, + 1), + ).filter(task_type_status__in=state_list, document_id__in=document_id_list) + .values('id'), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.get_aggregation_document_status_by_query_set( + QuerySet(Document).filter(id__in=document_id_list))() + try: + for document_id in document_id_list: + generate_related_by_document_id.delay(document_id, model_id, prompt, state_list) + except AlreadyQueued as e: + pass + class FileBufferHandle: buffer = None @@ -772,16 +1256,46 @@ def get_buffer(self, file): default_split_handle = TextSplitHandle() -split_handles = [DocSplitHandle(), PdfSplitHandle(), default_split_handle] +split_handles = [HTMLSplitHandle(), DocSplitHandle(), PdfSplitHandle(), XlsxSplitHandle(), XlsSplitHandle(), + CsvSplitHandle(), + ZipSplitHandle(), + default_split_handle] def save_image(image_list): - QuerySet(Image).bulk_create(image_list) + if image_list is not None and len(image_list) > 0: + exist_image_list = [str(i.get('id')) for i in + QuerySet(Image).filter(id__in=[i.id for i in image_list]).values('id')] + save_image_list = [image for image in image_list if not exist_image_list.__contains__(str(image.id))] + save_image_list = list({img.id: img for img in save_image_list}.values()) + if len(save_image_list) > 0: + QuerySet(Image).bulk_create(save_image_list) def file_to_paragraph(file, pattern_list: List, with_filter: bool, limit: int): get_buffer = FileBufferHandle().get_buffer for split_handle in split_handles: if split_handle.support(file, get_buffer): - return split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image) - return default_split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image) + result = split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image) + if isinstance(result, list): + return result + return [result] + result = default_split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image) + if isinstance(result, list): + return result + return [result] + + +def delete_problems_and_mappings(document_ids): + problem_paragraph_mappings = ProblemParagraphMapping.objects.filter(document_id__in=document_ids) + problem_ids = set(problem_paragraph_mappings.values_list('problem_id', flat=True)) + + if problem_ids: + problem_paragraph_mappings.delete() + remaining_problem_counts = ProblemParagraphMapping.objects.filter(problem_id__in=problem_ids).values( + 'problem_id').annotate(count=Count('problem_id')) + remaining_problem_ids = {pc['problem_id'] for pc in remaining_problem_counts} + problem_ids_to_delete = problem_ids - remaining_problem_ids + Problem.objects.filter(id__in=problem_ids_to_delete).delete() + else: + problem_paragraph_mappings.delete() diff --git a/apps/dataset/serializers/file_serializers.py b/apps/dataset/serializers/file_serializers.py new file mode 100644 index 00000000000..899c8a088de --- /dev/null +++ b/apps/dataset/serializers/file_serializers.py @@ -0,0 +1,93 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: image_serializers.py + @date:2024/4/22 16:36 + @desc: +""" +import uuid + +from django.db.models import QuerySet +from django.http import HttpResponse +from rest_framework import serializers + +from common.exception.app_exception import NotFound404 +from common.field.common import UploadedFileField +from common.util.field_message import ErrMessage +from dataset.models import File +from django.utils.translation import gettext_lazy as _ + +mime_types = {"html": "text/html", "htm": "text/html", "shtml": "text/html", "css": "text/css", "xml": "text/xml", + "gif": "image/gif", "jpeg": "image/jpeg", "jpg": "image/jpeg", "js": "application/javascript", + "atom": "application/atom+xml", "rss": "application/rss+xml", "mml": "text/mathml", "txt": "text/plain", + "jad": "text/vnd.sun.j2me.app-descriptor", "wml": "text/vnd.wap.wml", "htc": "text/x-component", + "avif": "image/avif", "png": "image/png", "svg": "image/svg+xml", "svgz": "image/svg+xml", + "tif": "image/tiff", "tiff": "image/tiff", "wbmp": "image/vnd.wap.wbmp", "webp": "image/webp", + "ico": "image/x-icon", "jng": "image/x-jng", "bmp": "image/x-ms-bmp", "woff": "font/woff", + "woff2": "font/woff2", "jar": "application/java-archive", "war": "application/java-archive", + "ear": "application/java-archive", "json": "application/json", "hqx": "application/mac-binhex40", + "doc": "application/msword", "pdf": "application/pdf", "ps": "application/postscript", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "eps": "application/postscript", "ai": "application/postscript", "rtf": "application/rtf", + "m3u8": "application/vnd.apple.mpegurl", "kml": "application/vnd.google-earth.kml+xml", + "kmz": "application/vnd.google-earth.kmz", "xls": "application/vnd.ms-excel", + "eot": "application/vnd.ms-fontobject", "ppt": "application/vnd.ms-powerpoint", + "odg": "application/vnd.oasis.opendocument.graphics", + "odp": "application/vnd.oasis.opendocument.presentation", + "ods": "application/vnd.oasis.opendocument.spreadsheet", "odt": "application/vnd.oasis.opendocument.text", + "wmlc": "application/vnd.wap.wmlc", "wasm": "application/wasm", "7z": "application/x-7z-compressed", + "cco": "application/x-cocoa", "jardiff": "application/x-java-archive-diff", + "jnlp": "application/x-java-jnlp-file", "run": "application/x-makeself", "pl": "application/x-perl", + "pm": "application/x-perl", "prc": "application/x-pilot", "pdb": "application/x-pilot", + "rar": "application/x-rar-compressed", "rpm": "application/x-redhat-package-manager", + "sea": "application/x-sea", "swf": "application/x-shockwave-flash", "sit": "application/x-stuffit", + "tcl": "application/x-tcl", "tk": "application/x-tcl", "der": "application/x-x509-ca-cert", + "pem": "application/x-x509-ca-cert", "crt": "application/x-x509-ca-cert", + "xpi": "application/x-xpinstall", "xhtml": "application/xhtml+xml", "xspf": "application/xspf+xml", + "zip": "application/zip", "bin": "application/octet-stream", "exe": "application/octet-stream", + "dll": "application/octet-stream", "deb": "application/octet-stream", "dmg": "application/octet-stream", + "iso": "application/octet-stream", "img": "application/octet-stream", "msi": "application/octet-stream", + "msp": "application/octet-stream", "msm": "application/octet-stream", "mid": "audio/midi", + "midi": "audio/midi", "kar": "audio/midi", "mp3": "audio/mpeg", "ogg": "audio/ogg", "m4a": "audio/x-m4a", + "ra": "audio/x-realaudio", "3gpp": "video/3gpp", "3gp": "video/3gpp", "ts": "video/mp2t", + "mp4": "video/mp4", "mpeg": "video/mpeg", "mpg": "video/mpeg", "mov": "video/quicktime", + "webm": "video/webm", "flv": "video/x-flv", "m4v": "video/x-m4v", "mng": "video/x-mng", + "asx": "video/x-ms-asf", "asf": "video/x-ms-asf", "wmv": "video/x-ms-wmv", "avi": "video/x-msvideo"} + + +class FileSerializer(serializers.Serializer): + file = UploadedFileField(required=True, error_messages=ErrMessage.image(_('file'))) + meta = serializers.JSONField(required=False, allow_null=True) + + def upload(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + meta = self.data.get('meta', None) + if not meta: + meta = {'debug': True} + file_id = meta.get('file_id', uuid.uuid1()) + file = File(id=file_id, file_name=self.data.get('file').name, meta=meta) + file.save(self.data.get('file').read()) + return f'/api/file/{file_id}' + + class Operate(serializers.Serializer): + id = serializers.UUIDField(required=True) + + def get(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + file_id = self.data.get('id') + file = QuerySet(File).filter(id=file_id).first() + if file is None: + raise NotFound404(404, _('File not found')) + # 如果是音频文件,直接返回文件流 + file_type = file.file_name.split(".")[-1] + if file_type in ['mp3', 'wav', 'ogg', 'aac']: + return HttpResponse(file.get_byte(), status=200, headers={'Content-Type': f'audio/{file_type}', + 'Content-Disposition': 'attachment; filename="{}"'.format( + file.file_name)}) + return HttpResponse(file.get_byte(), status=200, + headers={'Content-Type': mime_types.get(file_type, 'text/plain')}) diff --git a/apps/dataset/serializers/image_serializers.py b/apps/dataset/serializers/image_serializers.py index 46a1d72bc7a..a5ac289b07f 100644 --- a/apps/dataset/serializers/image_serializers.py +++ b/apps/dataset/serializers/image_serializers.py @@ -16,10 +16,11 @@ from common.field.common import UploadedImageField from common.util.field_message import ErrMessage from dataset.models import Image +from django.utils.translation import gettext_lazy as _ class ImageSerializer(serializers.Serializer): - image = UploadedImageField(required=True, error_messages=ErrMessage.image("图片")) + image = UploadedImageField(required=True, error_messages=ErrMessage.image(_('image'))) def upload(self, with_valid=True): if with_valid: @@ -38,5 +39,10 @@ def get(self, with_valid=True): image_id = self.data.get('id') image = QuerySet(Image).filter(id=image_id).first() if image is None: - raise NotFound404(404, "不存在的图片") + raise NotFound404(404, _('Image not found')) + if image.image_name.endswith('.svg'): + return HttpResponse(image.image, status=200, headers={'Content-Type': 'image/svg+xml'}) + # gif + elif image.image_name.endswith('.gif'): + return HttpResponse(image.image, status=200, headers={'Content-Type': 'image/gif'}) return HttpResponse(image.image, status=200, headers={'Content-Type': 'image/png'}) diff --git a/apps/dataset/serializers/paragraph_serializers.py b/apps/dataset/serializers/paragraph_serializers.py index 3188766a782..3a63fd95cd0 100644 --- a/apps/dataset/serializers/paragraph_serializers.py +++ b/apps/dataset/serializers/paragraph_serializers.py @@ -9,21 +9,28 @@ import uuid from typing import Dict +from celery_once import AlreadyQueued from django.db import transaction -from django.db.models import QuerySet +from django.db.models import QuerySet, Count from drf_yasg import openapi from rest_framework import serializers from common.db.search import page_search -from common.event.listener_manage import ListenerManagement, UpdateEmbeddingDocumentIdArgs +from common.event import ListenerManagement from common.exception.app_exception import AppApiException from common.mixins.api_mixin import ApiMixin from common.util.common import post from common.util.field_message import ErrMessage -from dataset.models import Paragraph, Problem, Document, ProblemParagraphMapping -from dataset.serializers.common_serializers import update_document_char_length, BatchSerializer +from dataset.models import Paragraph, Problem, Document, ProblemParagraphMapping, DataSet, TaskType, State +from dataset.serializers.common_serializers import update_document_char_length, BatchSerializer, ProblemParagraphObject, \ + ProblemParagraphManage, get_embedding_model_id_by_dataset_id from dataset.serializers.problem_serializers import ProblemInstanceSerializer, ProblemSerializer, ProblemSerializers from embedding.models import SourceType +from embedding.task.embedding import embedding_by_problem as embedding_by_problem_task, embedding_by_problem, \ + delete_embedding_by_source, enable_embedding_by_paragraph, disable_embedding_by_paragraph, embedding_by_paragraph, \ + delete_embedding_by_paragraph, delete_embedding_by_paragraph_ids, update_embedding_document_id +from dataset.task import generate_related_by_paragraph_id_list +from django.utils.translation import gettext_lazy as _ class ParagraphSerializer(serializers.ModelSerializer): @@ -37,17 +44,17 @@ class ParagraphInstanceSerializer(ApiMixin, serializers.Serializer): """ 段落实例对象 """ - content = serializers.CharField(required=True, error_messages=ErrMessage.char("段落内容"), - max_length=4096, + content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('content')), + max_length=102400, min_length=1, allow_null=True, allow_blank=True) - title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char("段落标题"), + title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char(_('section title')), allow_null=True, allow_blank=True) problem_list = ProblemInstanceSerializer(required=False, many=True) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("段落是否可用")) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) @staticmethod def get_request_body_api(): @@ -55,16 +62,16 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['content'], properties={ - 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title="分段内容", - description="分段内容"), + 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title=_('section content'), + description=_('section content')), - 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title="分段标题", - description="分段标题"), + 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title=_('section title'), + description=_('section title')), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), description=_('Is active')), - 'problem_list': openapi.Schema(type=openapi.TYPE_ARRAY, title='问题列表', - description="问题列表", + 'problem_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('problem list'), + description=_('problem list'), items=ProblemInstanceSerializer.get_request_body_api()) } ) @@ -72,30 +79,30 @@ def get_request_body_api(): class EditParagraphSerializers(serializers.Serializer): title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char( - "分段标题"), allow_null=True, allow_blank=True) - content = serializers.CharField(required=False, max_length=4096, allow_null=True, allow_blank=True, + _('section title')), allow_null=True, allow_blank=True) + content = serializers.CharField(required=False, max_length=102400, allow_null=True, allow_blank=True, error_messages=ErrMessage.char( - "分段内容")) + _('section title'))) problem_list = ProblemInstanceSerializer(required=False, many=True) class ParagraphSerializers(ApiMixin, serializers.Serializer): title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char( - "分段标题"), allow_null=True, allow_blank=True) - content = serializers.CharField(required=True, max_length=4096, error_messages=ErrMessage.char( - "分段内容")) + _('section title')), allow_null=True, allow_blank=True) + content = serializers.CharField(required=True, max_length=102400, error_messages=ErrMessage.char( + _('section title'))) class Problem(ApiMixin, serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) - document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id")) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) - paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("段落id")) + paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('paragraph id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(Paragraph).filter(id=self.data.get('paragraph_id')).exists(): - raise AppApiException(500, "段落id不存在") + raise AppApiException(500, _('Paragraph id does not exist')) def list(self, with_valid=False): """ @@ -112,7 +119,7 @@ def list(self, with_valid=False): QuerySet(Problem).filter(id__in=[row.problem_id for row in problem_paragraph_mapping])] @transaction.atomic - def save(self, instance: Dict, with_valid=True, with_embedding=True): + def save(self, instance: Dict, with_valid=True, with_embedding=True, embedding_by_problem=None): if with_valid: self.is_valid() ProblemInstanceSerializer(data=instance).is_valid(raise_exception=True) @@ -124,22 +131,23 @@ def save(self, instance: Dict, with_valid=True, with_embedding=True): problem.save() if QuerySet(ProblemParagraphMapping).filter(dataset_id=self.data.get('dataset_id'), problem_id=problem.id, paragraph_id=self.data.get('paragraph_id')).exists(): - raise AppApiException(500, "已经关联,请勿重复关联") + raise AppApiException(500, _('Already associated, please do not associate again')) problem_paragraph_mapping = ProblemParagraphMapping(id=uuid.uuid1(), problem_id=problem.id, document_id=self.data.get('document_id'), paragraph_id=self.data.get('paragraph_id'), dataset_id=self.data.get('dataset_id')) problem_paragraph_mapping.save() + model_id = get_embedding_model_id_by_dataset_id(self.data.get('dataset_id')) if with_embedding: - ListenerManagement.embedding_by_problem_signal.send({'text': problem.content, - 'is_active': True, - 'source_type': SourceType.PROBLEM, - 'source_id': problem_paragraph_mapping.id, - 'document_id': self.data.get('document_id'), - 'paragraph_id': self.data.get('paragraph_id'), - 'dataset_id': self.data.get('dataset_id'), - }) + embedding_by_problem_task({'text': problem.content, + 'is_active': True, + 'source_type': SourceType.PROBLEM, + 'source_id': problem_paragraph_mapping.id, + 'document_id': self.data.get('document_id'), + 'paragraph_id': self.data.get('paragraph_id'), + 'dataset_id': self.data.get('dataset_id'), + }, model_id) return ProblemSerializers.Operate( data={'dataset_id': self.data.get('dataset_id'), @@ -151,17 +159,17 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), openapi.Parameter(name='document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id'), + description=_('document id')), openapi.Parameter(name='paragraph_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='段落id')] + description=_('paragraph id'))] @staticmethod def get_request_body_api(): @@ -169,7 +177,7 @@ def get_request_body_api(): required=["content"], properties={ 'content': openapi.Schema( - type=openapi.TYPE_STRING, title="内容") + type=openapi.TYPE_STRING, title=_('content'),) }) @staticmethod @@ -180,30 +188,30 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="问题内容", - description="问题内容", default='问题内容'), - 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量", + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('question content'), + description=_('question content'), default=_('question content')), + 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('hit num'), description=_('hit num'), default=1), - 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id", - description="知识库id", default='xxx'), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'), + description=_('dataset id'), default='xxx'), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ) } ) class Association(ApiMixin, serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) - problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("问题id")) + problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('problem id'))) - document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id")) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) - paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("段落id")) + paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('paragraph id'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) @@ -211,9 +219,9 @@ def is_valid(self, *, raise_exception=True): paragraph_id = self.data.get('paragraph_id') problem_id = self.data.get("problem_id") if not QuerySet(Paragraph).filter(dataset_id=dataset_id, id=paragraph_id).exists(): - raise AppApiException(500, "段落不存在") + raise AppApiException(500, _('Paragraph does not exist')) if not QuerySet(Problem).filter(dataset_id=dataset_id, id=problem_id).exists(): - raise AppApiException(500, "问题不存在") + raise AppApiException(500, _('Problem does not exist')) def association(self, with_valid=True, with_embedding=True): if with_valid: @@ -226,14 +234,15 @@ def association(self, with_valid=True, with_embedding=True): problem_id=problem.id) problem_paragraph_mapping.save() if with_embedding: - ListenerManagement.embedding_by_problem_signal.send({'text': problem.content, - 'is_active': True, - 'source_type': SourceType.PROBLEM, - 'source_id': problem_paragraph_mapping.id, - 'document_id': self.data.get('document_id'), - 'paragraph_id': self.data.get('paragraph_id'), - 'dataset_id': self.data.get('dataset_id'), - }) + model_id = get_embedding_model_id_by_dataset_id(self.data.get('dataset_id')) + embedding_by_problem({'text': problem.content, + 'is_active': True, + 'source_type': SourceType.PROBLEM, + 'source_id': problem_paragraph_mapping.id, + 'document_id': self.data.get('document_id'), + 'paragraph_id': self.data.get('paragraph_id'), + 'dataset_id': self.data.get('dataset_id'), + }, model_id) def un_association(self, with_valid=True): if with_valid: @@ -245,7 +254,7 @@ def un_association(self, with_valid=True): 'problem_id')).first() problem_paragraph_mapping_id = problem_paragraph_mapping.id problem_paragraph_mapping.delete() - ListenerManagement.delete_embedding_by_source_signal.send(problem_paragraph_mapping_id) + delete_embedding_by_source(problem_paragraph_mapping_id) return True @staticmethod @@ -254,27 +263,27 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), openapi.Parameter(name='document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id') + description=_('document id')) , openapi.Parameter(name='paragraph_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='段落id'), + description=_('paragraph id')), openapi.Parameter(name='problem_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='问题id') + description=_('problem id')) ] class Batch(serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) - document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) @transaction.atomic def batch_delete(self, instance: Dict, with_valid=True): @@ -283,20 +292,20 @@ def batch_delete(self, instance: Dict, with_valid=True): self.is_valid(raise_exception=True) paragraph_id_list = instance.get("id_list") QuerySet(Paragraph).filter(id__in=paragraph_id_list).delete() - QuerySet(ProblemParagraphMapping).filter(paragraph_id__in=paragraph_id_list).delete() + delete_problems_and_mappings(paragraph_id_list) update_document_char_length(self.data.get('document_id')) # 删除向量库 - ListenerManagement.delete_embedding_by_paragraph_ids(paragraph_id_list) + delete_embedding_by_paragraph_ids(paragraph_id_list) return True class Migrate(ApiMixin, serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) - document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id")) - target_dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("目标知识库id")) - target_document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("目标文档id")) - paragraph_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char("段落列表"), + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) + target_dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('target dataset id'))) + target_document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('target document id'))) + paragraph_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char(_('paragraph id list')), child=serializers.UUIDField(required=True, - error_messages=ErrMessage.uuid("段落id"))) + error_messages=ErrMessage.uuid(_('paragraph id')))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -305,12 +314,14 @@ def is_valid(self, *, raise_exception=False): document_id = self.data.get('document_id') target_document_id = self.data.get('target_document_id') if document_id == target_document_id: - raise AppApiException(5000, "需要迁移的文档和目标文档一致") + raise AppApiException(5000, _('The document to be migrated is consistent with the target document')) if len([document for document in document_list if str(document.id) == self.data.get('document_id')]) < 1: - raise AppApiException(5000, f"文档id不存在【{self.data.get('document_id')}】") + raise AppApiException(5000, _('The document id does not exist [{document_id}]').format( + document_id=self.data.get('document_id'))) if len([document for document in document_list if str(document.id) == self.data.get('target_document_id')]) < 1: - raise AppApiException(5000, f"目标文档id不存在【{self.data.get('target_document_id')}】") + raise AppApiException(5000, _('The target document id does not exist [{document_id}]').format( + document_id=self.data.get('target_document_id'))) @transaction.atomic def migrate(self, with_valid=True): @@ -335,10 +346,8 @@ def migrate(self, with_valid=True): # 修改mapping QuerySet(ProblemParagraphMapping).bulk_update(problem_paragraph_mapping_list, ['document_id']) - # 修改向量段落信息 - ListenerManagement.update_embedding_document_id(UpdateEmbeddingDocumentIdArgs( - [paragraph.id for paragraph in paragraph_list], - target_document_id, target_dataset_id)) + update_embedding_document_id([paragraph.id for paragraph in paragraph_list], + target_document_id, target_dataset_id, None) # 修改段落信息 paragraph_list.update(document_id=target_document_id) # 不同数据集迁移 @@ -365,12 +374,17 @@ def migrate(self, with_valid=True): # 修改mapping QuerySet(ProblemParagraphMapping).bulk_update(problem_paragraph_mapping_list, ['problem_id', 'dataset_id', 'document_id']) - # 修改向量段落信息 - ListenerManagement.update_embedding_document_id(UpdateEmbeddingDocumentIdArgs( - [paragraph.id for paragraph in paragraph_list], - target_document_id, target_dataset_id)) + target_dataset = QuerySet(DataSet).filter(id=target_dataset_id).first() + dataset = QuerySet(DataSet).filter(id=dataset_id).first() + embedding_model_id = None + if target_dataset.embedding_mode_id != dataset.embedding_mode_id: + embedding_model_id = str(target_dataset.embedding_mode_id) + pid_list = [paragraph.id for paragraph in paragraph_list] # 修改段落信息 paragraph_list.update(dataset_id=target_dataset_id, document_id=target_document_id) + # 修改向量段落信息 + update_embedding_document_id(pid_list, target_document_id, target_dataset_id, embedding_model_id) + update_document_char_length(document_id) update_document_char_length(target_document_id) @@ -409,22 +423,22 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id'), + description=_('document id')), openapi.Parameter(name='document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id'), + description=_('document id')), openapi.Parameter(name='target_dataset_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='目标知识库id'), + description=_('target dataset id')), openapi.Parameter(name='target_document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='目标知识库id') + description=_('target document id')), ] @staticmethod @@ -432,34 +446,35 @@ def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title='段落id列表', - description="段落id列表" + title=_('paragraph id list'), + description=_('paragraph id list') ) class Operate(ApiMixin, serializers.Serializer): # 段落id paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "段落id")) + _('paragraph id'))) # 知识库id dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) # 文档id document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "文档id")) + _('document id'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) if not QuerySet(Paragraph).filter(id=self.data.get('paragraph_id')).exists(): - raise AppApiException(500, "段落id不存在") + raise AppApiException(500, _('Paragraph id does not exist')) @staticmethod - def post_embedding(paragraph, instance): + def post_embedding(paragraph, instance, dataset_id): if 'is_active' in instance and instance.get('is_active') is not None: - s = (ListenerManagement.enable_embedding_by_paragraph_signal if instance.get( - 'is_active') else ListenerManagement.disable_embedding_by_paragraph_signal) - s.send(paragraph.get('id')) + (enable_embedding_by_paragraph if instance.get( + 'is_active') else disable_embedding_by_paragraph)(paragraph.get('id')) + else: - ListenerManagement.embedding_by_paragraph_signal.send(paragraph.get('id')) + model_id = get_embedding_model_id_by_dataset_id(dataset_id) + embedding_by_paragraph(paragraph.get('id'), model_id) return paragraph @post(post_embedding) @@ -485,7 +500,7 @@ def edit(self, instance: Dict): # 校验前端 携带过来的id for update_problem in update_problem_list: if not set([str(row.id) for row in problem_list]).__contains__(update_problem.get('id')): - raise AppApiException(500, update_problem.get('id') + '问题id不存在') + raise AppApiException(500, _('Problem id does not exist')) # 对比需要删除的问题 delete_problem_list = list(filter( lambda row: not [str(update_row.get('id')) for update_row in update_problem_list].__contains__( @@ -507,7 +522,7 @@ def edit(self, instance: Dict): _paragraph.save() update_document_char_length(self.data.get('document_id')) - return self.one(), instance + return self.one(), instance, self.data.get('dataset_id') def get_problem_list(self): ProblemParagraphMapping(ProblemParagraphMapping) @@ -528,10 +543,11 @@ def delete(self, with_valid=False): if with_valid: self.is_valid(raise_exception=True) paragraph_id = self.data.get('paragraph_id') - QuerySet(Paragraph).filter(id=paragraph_id).delete() - QuerySet(ProblemParagraphMapping).filter(paragraph_id=paragraph_id).delete() + Paragraph.objects.filter(id=paragraph_id).delete() + delete_problems_and_mappings([paragraph_id]) + update_document_char_length(self.data.get('document_id')) - ListenerManagement.delete_embedding_by_paragraph_signal.send(paragraph_id) + delete_embedding_by_paragraph(paragraph_id) @staticmethod def get_request_body_api(): @@ -544,20 +560,20 @@ def get_response_body_api(): @staticmethod def get_request_params_api(): return [openapi.Parameter(type=openapi.TYPE_STRING, in_=openapi.IN_PATH, name='paragraph_id', - description="段落id")] + description=_('paragraph id'))] class Create(ApiMixin, serializers.Serializer): dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "文档id")) + _('document id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(Document).filter(id=self.data.get('document_id'), dataset_id=self.data.get('dataset_id')).exists(): - raise AppApiException(500, "文档id不正确") + raise AppApiException(500, _('The document id is incorrect')) def save(self, instance: Dict, with_valid=True, with_embedding=True): if with_valid: @@ -567,8 +583,10 @@ def save(self, instance: Dict, with_valid=True, with_embedding=True): document_id = self.data.get('document_id') paragraph_problem_model = self.get_paragraph_problem_model(dataset_id, document_id, instance) paragraph = paragraph_problem_model.get('paragraph') - problem_model_list = paragraph_problem_model.get('problem_model_list') - problem_paragraph_mapping_list = paragraph_problem_model.get('problem_paragraph_mapping_list') + problem_paragraph_object_list = paragraph_problem_model.get('problem_paragraph_object_list') + problem_model_list, problem_paragraph_mapping_list = (ProblemParagraphManage(problem_paragraph_object_list, + dataset_id). + to_problem_model_list()) # 插入段落 paragraph_problem_model.get('paragraph').save() # 插入問題 @@ -579,7 +597,8 @@ def save(self, instance: Dict, with_valid=True, with_embedding=True): # 修改长度 update_document_char_length(document_id) if with_embedding: - ListenerManagement.embedding_by_paragraph_signal.send(str(paragraph.id)) + model_id = get_embedding_model_id_by_dataset_id(dataset_id) + embedding_by_paragraph(str(paragraph.id), model_id) return ParagraphSerializers.Operate( data={'paragraph_id': str(paragraph.id), 'dataset_id': dataset_id, 'document_id': document_id}).one( with_valid=True) @@ -591,30 +610,12 @@ def get_paragraph_problem_model(dataset_id: str, document_id: str, instance: Dic content=instance.get("content"), dataset_id=dataset_id, title=instance.get("title") if 'title' in instance else '') - problem_list = instance.get('problem_list') - exists_problem_list = [] - if 'problem_list' in instance and len(problem_list) > 0: - exists_problem_list = QuerySet(Problem).filter(dataset_id=dataset_id, - content__in=[p.get('content') for p in - problem_list]).all() - - problem_model_list = [ - ParagraphSerializers.Create.or_get(exists_problem_list, problem.get('content'), dataset_id) for - problem in ( - instance.get('problem_list') if 'problem_list' in instance else [])] - # 问题去重 - problem_model_list = [x for i, x in enumerate(problem_model_list) if - len([item for item in problem_model_list[:i] if item.content == x.content]) <= 0] - - problem_paragraph_mapping_list = [ - ProblemParagraphMapping(id=uuid.uuid1(), document_id=document_id, problem_id=problem_model.id, - paragraph_id=paragraph.id, - dataset_id=dataset_id) for - problem_model in problem_model_list] + problem_paragraph_object_list = [ + ProblemParagraphObject(dataset_id, document_id, paragraph.id, problem.get('content')) for problem in + (instance.get('problem_list') if 'problem_list' in instance else [])] + return {'paragraph': paragraph, - 'problem_model_list': [problem_model for problem_model in problem_model_list if - not list(exists_problem_list).__contains__(problem_model)], - 'problem_paragraph_mapping_list': problem_paragraph_mapping_list} + 'problem_paragraph_object_list': problem_paragraph_object_list} @staticmethod def or_get(exists_problem_list, content, dataset_id): @@ -634,22 +635,22 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), openapi.Parameter(name='document_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description="文档id") + description=_('document id')) ] class Query(ApiMixin, serializers.Serializer): dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "知识库id")) + _('dataset id'))) document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char( - "文档id")) + _('document id'))) title = serializers.CharField(required=False, error_messages=ErrMessage.char( - "段落标题")) + _('section title'))) content = serializers.CharField(required=False) @@ -662,6 +663,7 @@ def get_query_set(self): **{'title__icontains': self.data.get('title')}) if 'content' in self.data: query_set = query_set.filter(**{'content__icontains': self.data.get('content')}) + query_set.order_by('-create_time', 'id') return query_set def list(self): @@ -677,17 +679,17 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='文档id'), + description=_('document id')), openapi.Parameter(name='title', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='标题'), + description=_('title')), openapi.Parameter(name='content', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='内容') + description=_('content')) ] @staticmethod @@ -700,28 +702,67 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容", - description="段落内容", default='段落内容'), - 'title': openapi.Schema(type=openapi.TYPE_STRING, title="标题", - description="标题", default="xxx的描述"), - 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量", + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('content'), + description=_('content'), default=_('content')), + 'title': openapi.Schema(type=openapi.TYPE_STRING, title=_('title'), + description=_('title'), default="xxx"), + 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('hit num'), description=_('hit num'), default=1), - 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点赞数量", - description="点赞数量", default=1), - 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点踩数量", - description="点踩数", default=1), - 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id", - description="知识库id", default='xxx'), - 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id", - description="文档id", default='xxx'), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", - description="是否可用", default=True), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of likes'), + description=_('Number of likes'), default=1), + 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of dislikes'), + description=_('Number of dislikes'), default=1), + 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'), + description=_('dataset id'), default='xxx'), + 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('document id'), + description=_('document id'), default='xxx'), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active'), default=True), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ) } ) + + class BatchGenerateRelated(ApiMixin, serializers.Serializer): + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) + + def batch_generate_related(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + paragraph_id_list = instance.get("paragraph_id_list") + model_id = instance.get("model_id") + prompt = instance.get("prompt") + document_id = self.data.get('document_id') + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.update_status(QuerySet(Paragraph).filter(id__in=paragraph_id_list), + TaskType.GENERATE_PROBLEM, + State.PENDING) + ListenerManagement.get_aggregation_document_status(document_id)() + try: + generate_related_by_paragraph_id_list.delay(document_id, paragraph_id_list, model_id, + prompt) + except AlreadyQueued as e: + raise AppApiException(500, _('The task is being executed, please do not send it again.')) + + +def delete_problems_and_mappings(paragraph_ids): + problem_paragraph_mappings = ProblemParagraphMapping.objects.filter(paragraph_id__in=paragraph_ids) + problem_ids = set(problem_paragraph_mappings.values_list('problem_id', flat=True)) + + if problem_ids: + problem_paragraph_mappings.delete() + remaining_problem_counts = ProblemParagraphMapping.objects.filter(problem_id__in=problem_ids).values( + 'problem_id').annotate(count=Count('problem_id')) + remaining_problem_ids = {pc['problem_id'] for pc in remaining_problem_counts} + problem_ids_to_delete = problem_ids - remaining_problem_ids + Problem.objects.filter(id__in=problem_ids_to_delete).delete() + else: + problem_paragraph_mappings.delete() diff --git a/apps/dataset/serializers/problem_serializers.py b/apps/dataset/serializers/problem_serializers.py index 5d00d5be4cc..c4b7a3c14cd 100644 --- a/apps/dataset/serializers/problem_serializers.py +++ b/apps/dataset/serializers/problem_serializers.py @@ -8,6 +8,7 @@ """ import os import uuid +from functools import reduce from typing import Dict, List from django.db import transaction @@ -16,12 +17,15 @@ from rest_framework import serializers from common.db.search import native_search, native_page_search -from common.event import ListenerManagement, UpdateProblemArgs from common.mixins.api_mixin import ApiMixin from common.util.field_message import ErrMessage from common.util.file_util import get_file_content -from dataset.models import Problem, Paragraph, ProblemParagraphMapping +from dataset.models import Problem, Paragraph, ProblemParagraphMapping, DataSet +from dataset.serializers.common_serializers import get_embedding_model_id_by_dataset_id +from embedding.models import SourceType +from embedding.task import delete_embedding_by_source_ids, update_problem_embedding, embedding_by_data_list from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ class ProblemSerializer(serializers.ModelSerializer): @@ -32,9 +36,9 @@ class Meta: class ProblemInstanceSerializer(ApiMixin, serializers.Serializer): - id = serializers.CharField(required=False, error_messages=ErrMessage.char("问题id")) + id = serializers.CharField(required=False, error_messages=ErrMessage.char(_('problem id'))) - content = serializers.CharField(required=True, max_length=256, error_messages=ErrMessage.char("问题内容")) + content = serializers.CharField(required=True, max_length=256, error_messages=ErrMessage.char(_('content'))) @staticmethod def get_request_body_api(): @@ -43,18 +47,48 @@ def get_request_body_api(): properties={ 'id': openapi.Schema( type=openapi.TYPE_STRING, - title="问题id,修改的时候传递,创建的时候不传"), + title=_('Issue ID is passed when modifying, not when creating.')), 'content': openapi.Schema( - type=openapi.TYPE_STRING, title="内容") + type=openapi.TYPE_STRING, title=_('content'),) }) +class AssociationParagraph(serializers.Serializer): + paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('paragraph id'))) + document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id'))) + + +class BatchAssociation(serializers.Serializer): + problem_id_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_('problem id list')), + child=serializers.UUIDField(required=True, + error_messages=ErrMessage.uuid(_('problem id')))) + paragraph_list = AssociationParagraph(many=True) + + +def is_exits(exits_problem_paragraph_mapping_list, new_paragraph_mapping): + filter_list = [exits_problem_paragraph_mapping for exits_problem_paragraph_mapping in + exits_problem_paragraph_mapping_list if + str(exits_problem_paragraph_mapping.paragraph_id) == new_paragraph_mapping.paragraph_id + and str(exits_problem_paragraph_mapping.problem_id) == new_paragraph_mapping.problem_id + and str(exits_problem_paragraph_mapping.dataset_id) == new_paragraph_mapping.dataset_id] + return len(filter_list) > 0 + + +def to_problem_paragraph_mapping(problem, document_id: str, paragraph_id: str, dataset_id: str): + return ProblemParagraphMapping(id=uuid.uuid1(), + document_id=document_id, + paragraph_id=paragraph_id, + dataset_id=dataset_id, + problem_id=str(problem.id)), problem + + class ProblemSerializers(ApiMixin, serializers.Serializer): class Create(serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) - problem_list = serializers.ListField(required=True, error_messages=ErrMessage.list("问题列表"), + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) + problem_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_('problem list')), child=serializers.CharField(required=True, - error_messages=ErrMessage.char("问题"))) + max_length=256, + error_messages=ErrMessage.char(_('problem')))) def batch(self, with_valid=True): if with_valid: @@ -75,8 +109,8 @@ def batch(self, with_valid=True): return [ProblemSerializer(problem_instance).data for problem_instance in problem_instance_list] class Query(serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) - content = serializers.CharField(required=False, error_messages=ErrMessage.char("问题")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) + content = serializers.CharField(required=False, error_messages=ErrMessage.char(_('content'))) def get_query_set(self): query_set = QuerySet(model=Problem) @@ -98,7 +132,7 @@ def page(self, current_page, page_size): os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem.sql'))) class BatchOperate(serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) def delete(self, problem_id_list: List, with_valid=True): if with_valid: @@ -110,13 +144,54 @@ def delete(self, problem_id_list: List, with_valid=True): source_ids = [row.id for row in problem_paragraph_mapping_list] problem_paragraph_mapping_list.delete() QuerySet(Problem).filter(id__in=problem_id_list).delete() - ListenerManagement.delete_embedding_by_source_ids_signal.send(source_ids) + delete_embedding_by_source_ids(source_ids) return True + def association(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + BatchAssociation(data=instance).is_valid(raise_exception=True) + dataset_id = self.data.get('dataset_id') + paragraph_list = instance.get('paragraph_list') + problem_id_list = instance.get('problem_id_list') + problem_list = QuerySet(Problem).filter(id__in=problem_id_list) + exits_problem_paragraph_mapping = QuerySet(ProblemParagraphMapping).filter(problem_id__in=problem_id_list, + paragraph_id__in=[ + p.get('paragraph_id') + for p in + paragraph_list]) + problem_paragraph_mapping_list = [(problem_paragraph_mapping, problem) for + problem_paragraph_mapping, problem in reduce(lambda x, y: [*x, *y], + [[ + to_problem_paragraph_mapping( + problem, + paragraph.get( + 'document_id'), + paragraph.get( + 'paragraph_id'), + dataset_id) for + paragraph in + paragraph_list] + for problem in + problem_list], []) if + not is_exits(exits_problem_paragraph_mapping, problem_paragraph_mapping)] + QuerySet(ProblemParagraphMapping).bulk_create( + [problem_paragraph_mapping for problem_paragraph_mapping, problem in problem_paragraph_mapping_list]) + data_list = [{'text': problem.content, + 'is_active': True, + 'source_type': SourceType.PROBLEM, + 'source_id': str(problem_paragraph_mapping.id), + 'document_id': str(problem_paragraph_mapping.document_id), + 'paragraph_id': str(problem_paragraph_mapping.paragraph_id), + 'dataset_id': dataset_id, + } for problem_paragraph_mapping, problem in problem_paragraph_mapping_list] + model_id = get_embedding_model_id_by_dataset_id(self.data.get('dataset_id')) + embedding_by_data_list(data_list, model_id=model_id) + class Operate(serializers.Serializer): - dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id")) + dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id'))) - problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("问题id")) + problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('problem id'))) def list_paragraph(self, with_valid=True): if with_valid: @@ -145,7 +220,7 @@ def delete(self, with_valid=True): source_ids = [row.id for row in problem_paragraph_mapping_list] problem_paragraph_mapping_list.delete() QuerySet(Problem).filter(id=self.data.get('problem_id')).delete() - ListenerManagement.delete_embedding_by_source_ids_signal.send(source_ids) + delete_embedding_by_source_ids(source_ids) return True @transaction.atomic @@ -157,6 +232,8 @@ def edit(self, instance: Dict, with_valid=True): content = instance.get('content') problem = QuerySet(Problem).filter(id=problem_id, dataset_id=dataset_id).first() + QuerySet(DataSet).filter(id=dataset_id) problem.content = content problem.save() - ListenerManagement.update_problem_signal.send(UpdateProblemArgs(problem_id, content)) + model_id = get_embedding_model_id_by_dataset_id(dataset_id) + update_problem_embedding(problem_id, content, model_id) diff --git a/apps/dataset/sql/list_document.sql b/apps/dataset/sql/list_document.sql index 818d783c834..8b7891bf6e6 100644 --- a/apps/dataset/sql/list_document.sql +++ b/apps/dataset/sql/list_document.sql @@ -1,6 +1,11 @@ +SELECT * from ( SELECT "document".* , to_json("document"."meta") as meta, + to_json("document"."status_meta") as status_meta, (SELECT "count"("id") FROM "paragraph" WHERE document_id="document"."id") as "paragraph_count" FROM "document" "document" +${document_custom_sql} +) temp +${order_by_query} \ No newline at end of file diff --git a/apps/dataset/sql/list_paragraph_document_name.sql b/apps/dataset/sql/list_paragraph_document_name.sql new file mode 100644 index 00000000000..a95209bf5b8 --- /dev/null +++ b/apps/dataset/sql/list_paragraph_document_name.sql @@ -0,0 +1,5 @@ +SELECT + (SELECT "name" FROM "document" WHERE "id"=document_id) as document_name, + * +FROM + "paragraph" diff --git a/apps/dataset/sql/list_problem_mapping.sql b/apps/dataset/sql/list_problem_mapping.sql new file mode 100644 index 00000000000..8c8ac3c3005 --- /dev/null +++ b/apps/dataset/sql/list_problem_mapping.sql @@ -0,0 +1,2 @@ +SELECT "problem"."content",problem_paragraph_mapping.paragraph_id FROM problem problem +LEFT JOIN problem_paragraph_mapping problem_paragraph_mapping ON problem_paragraph_mapping.problem_id=problem."id" \ No newline at end of file diff --git a/apps/dataset/sql/update_document_char_length.sql b/apps/dataset/sql/update_document_char_length.sql index a09c8cabba1..2781809b23d 100644 --- a/apps/dataset/sql/update_document_char_length.sql +++ b/apps/dataset/sql/update_document_char_length.sql @@ -1,4 +1,8 @@ UPDATE "document" -SET "char_length" = ( SELECT "sum" ( "char_length" ( "content" ) ) FROM paragraph WHERE "document_id" = %s ) +SET "char_length" = ( SELECT CASE WHEN + "sum" ( "char_length" ( "content" ) ) IS NULL THEN + 0 ELSE "sum" ( "char_length" ( "content" ) ) + END FROM paragraph WHERE "document_id" = %s ), + "update_time" = CURRENT_TIMESTAMP WHERE "id" = %s \ No newline at end of file diff --git a/apps/dataset/sql/update_document_status_meta.sql b/apps/dataset/sql/update_document_status_meta.sql new file mode 100644 index 00000000000..6065931ff75 --- /dev/null +++ b/apps/dataset/sql/update_document_status_meta.sql @@ -0,0 +1,25 @@ +UPDATE "document" "document" +SET status_meta = jsonb_set ( "document".status_meta, '{aggs}', tmp.status_meta ) +FROM + ( + SELECT COALESCE + ( jsonb_agg ( jsonb_delete ( ( row_to_json ( record ) :: JSONB ), 'document_id' ) ), '[]' :: JSONB ) AS status_meta, + document_id AS document_id + FROM + ( + SELECT + "paragraph".status, + "count" ( "paragraph"."id" ), + "document"."id" AS document_id + FROM + "document" "document" + LEFT JOIN "paragraph" "paragraph" ON "document"."id" = paragraph.document_id + ${document_custom_sql} + GROUP BY + "paragraph".status, + "document"."id" + ) record + GROUP BY + document_id + ) tmp +WHERE "document".id="tmp".document_id \ No newline at end of file diff --git a/apps/dataset/sql/update_paragraph_status.sql b/apps/dataset/sql/update_paragraph_status.sql new file mode 100644 index 00000000000..1e2fc6f0864 --- /dev/null +++ b/apps/dataset/sql/update_paragraph_status.sql @@ -0,0 +1,13 @@ +UPDATE "${table_name}" +SET status = reverse ( + SUBSTRING ( reverse ( LPAD( status, ${bit_number}, 'n' ) ) :: TEXT FROM 1 FOR ${up_index} ) || ${status_number} || SUBSTRING ( reverse ( LPAD( status, ${bit_number}, 'n' ) ) :: TEXT FROM ${next_index} ) +), +status_meta = jsonb_set ( + "${table_name}".status_meta, + '{state_time,${current_index}}', + jsonb_set ( + COALESCE ( "${table_name}".status_meta #> '{state_time,${current_index}}', jsonb_build_object ( '${status_number}', '${current_time}' ) ), + '{${status_number}}', + CONCAT ( '"', '${current_time}', '"' ) :: JSONB + ) + ) \ No newline at end of file diff --git a/apps/dataset/swagger_api/document_api.py b/apps/dataset/swagger_api/document_api.py index 637a7e5098a..eeac9cdc8c6 100644 --- a/apps/dataset/swagger_api/document_api.py +++ b/apps/dataset/swagger_api/document_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class DocumentApi(ApiMixin): @@ -19,10 +20,48 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, properties={ 'id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), - title="主键id列表", - description="主键id列表"), - 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title="命中处理方式", + title=_('id list'), + description=_('id list')), + 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title=_('hit handling method'), description="directly_return|optimization"), - 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title="直接返回相似度") + 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('directly return similarity')) + } + ) + + class Cancel(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'type': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('task type'), + description=_('1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents')) + } + ) + + class BatchCancel(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), + title=_('id list'), + description=_('id list')), + 'type': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('task type'), + description=_('1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents'), default=1) + } + ) + + class EmbeddingState(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'state_list': openapi.Schema(type=openapi.TYPE_ARRAY, + items=openapi.Schema(type=openapi.TYPE_STRING), + title=_('state list'), + description=_('state list')) } ) diff --git a/apps/dataset/swagger_api/image_api.py b/apps/dataset/swagger_api/image_api.py index f69b94719f2..f2124cced88 100644 --- a/apps/dataset/swagger_api/image_api.py +++ b/apps/dataset/swagger_api/image_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class ImageApi(ApiMixin): @@ -18,5 +19,5 @@ def get_request_params_api(): in_=openapi.IN_FORM, type=openapi.TYPE_FILE, required=True, - description='上传图片文件') + description=_('image file')) ] diff --git a/apps/dataset/swagger_api/problem_api.py b/apps/dataset/swagger_api/problem_api.py index a7397aaaff5..da0256b35b6 100644 --- a/apps/dataset/swagger_api/problem_api.py +++ b/apps/dataset/swagger_api/problem_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class ProblemApi(ApiMixin): @@ -20,22 +21,52 @@ def get_response_body_api(): properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id", description="id", default="xx"), - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="问题内容", - description="问题内容", default='问题内容'), - 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量", + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('content'), + description=_('content'), default=_('content')), + 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('hit num'), description=_('hit num'), default=1), - 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id", - description="知识库id", default='xxx'), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'), + description=_('dataset id'), default='xxx'), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ) } ) + class BatchAssociation(ApiMixin): + @staticmethod + def get_request_params_api(): + return ProblemApi.BatchOperate.get_request_params_api() + + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['problem_id_list'], + properties={ + 'problem_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('problem id list'), + description=_('problem id list'), + items=openapi.Schema(type=openapi.TYPE_STRING)), + 'paragraph_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Associated paragraph information list'), + description=_('Associated paragraph information list'), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=['paragraph_id', 'document_id'], + properties={ + 'paragraph_id': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('paragraph id')), + 'document_id': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('document id')) + })) + + } + ) + class BatchOperate(ApiMixin): @staticmethod def get_request_params_api(): @@ -43,14 +74,14 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), ] @staticmethod def get_request_body_api(): return openapi.Schema( - title="问题id列表", - description="问题id列表", + title=_('problem id list'), + description=_('problem id list'), type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING) ) @@ -62,12 +93,12 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), openapi.Parameter(name='problem_id', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='问题id')] + description=_('problem id'))] @staticmethod def get_request_body_api(): @@ -75,8 +106,8 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['content'], properties={ - 'content': openapi.Schema(type=openapi.TYPE_STRING, title="问题内容", - description="问题内容"), + 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('content'), + description=_('content')), } ) @@ -92,17 +123,17 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['content'], properties={ - 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title="分段内容", - description="分段内容"), - 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title="分段标题", - description="分段标题"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), - 'hit_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="命中次数", description="命中次数"), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", - description="修改时间", + 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title=_('content'), + description=_('content')), + 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title=_('Section title'), + description=_('Section title')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), description=_('Is active')), + 'hit_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('Hit num'), description=_('Hit num')), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time'), default="1970-01-01 00:00:00"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", - description="创建时间", + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time'), default="1970-01-01 00:00:00" ), } @@ -115,12 +146,12 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id'), + description=_('dataset id')), openapi.Parameter(name='content', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='问题')] + description=_('content')),] class BatchCreate(ApiMixin): @staticmethod @@ -135,7 +166,7 @@ def get_request_params_api(): class Create(ApiMixin): @staticmethod def get_request_body_api(): - return openapi.Schema(type=openapi.TYPE_STRING, description="问题文本") + return openapi.Schema(type=openapi.TYPE_STRING, description=_('content'), title=_('content')) @staticmethod def get_request_params_api(): @@ -143,4 +174,4 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='知识库id')] + description=_('dataset id'))] diff --git a/apps/dataset/task/__init__.py b/apps/dataset/task/__init__.py new file mode 100644 index 00000000000..7bb1839d3aa --- /dev/null +++ b/apps/dataset/task/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/8/21 9:57 + @desc: +""" +from .sync import * +from .generate import * diff --git a/apps/dataset/task/generate.py b/apps/dataset/task/generate.py new file mode 100644 index 00000000000..53b0c71ff06 --- /dev/null +++ b/apps/dataset/task/generate.py @@ -0,0 +1,139 @@ +import logging +import traceback + +from celery_once import QueueOnce +from django.db.models import QuerySet +from django.db.models.functions import Reverse, Substr +from langchain_core.messages import HumanMessage + +from common.config.embedding_config import ModelManage +from common.event import ListenerManagement +from common.util.page_utils import page, page_desc +from dataset.models import Paragraph, Document, Status, TaskType, State +from dataset.task.tools import save_problem +from ops import celery_app +from setting.models import Model +from setting.models_provider import get_model +from django.utils.translation import gettext_lazy as _ + +max_kb_error = logging.getLogger("max_kb_error") +max_kb = logging.getLogger("max_kb") + + +def get_llm_model(model_id): + model = QuerySet(Model).filter(id=model_id).first() + return ModelManage.get_model(model_id, lambda _id: get_model(model)) + + +def generate_problem_by_paragraph(paragraph, llm_model, prompt): + try: + ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph.id), TaskType.GENERATE_PROBLEM, + State.STARTED) + res = llm_model.invoke( + [HumanMessage(content=prompt.replace('{data}', paragraph.content).replace('{title}', paragraph.title))]) + if (res.content is None) or (len(res.content) == 0): + return + problems = res.content.split('\n') + for problem in problems: + save_problem(paragraph.dataset_id, paragraph.document_id, paragraph.id, problem) + ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph.id), TaskType.GENERATE_PROBLEM, + State.SUCCESS) + except Exception as e: + ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph.id), TaskType.GENERATE_PROBLEM, + State.FAILURE) + + +def get_generate_problem(llm_model, prompt, post_apply=lambda: None, is_the_task_interrupted=lambda: False): + def generate_problem(paragraph_list): + for paragraph in paragraph_list: + if is_the_task_interrupted(): + return + generate_problem_by_paragraph(paragraph, llm_model, prompt) + post_apply() + + return generate_problem + + +def get_is_the_task_interrupted(document_id): + def is_the_task_interrupted(): + document = QuerySet(Document).filter(id=document_id).first() + if document is None or Status(document.status)[TaskType.GENERATE_PROBLEM] == State.REVOKE: + return True + return False + + return is_the_task_interrupted + + +@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, + name='celery:generate_related_by_dataset') +def generate_related_by_dataset_id(dataset_id, model_id, prompt, state_list=None): + document_list = QuerySet(Document).filter(dataset_id=dataset_id) + for document in document_list: + try: + generate_related_by_document_id.delay(document.id, model_id, prompt, state_list) + except Exception as e: + pass + + +@celery_app.task(base=QueueOnce, once={'keys': ['document_id']}, + name='celery:generate_related_by_document') +def generate_related_by_document_id(document_id, model_id, prompt, state_list=None): + if state_list is None: + state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value, + State.REVOKE.value, + State.REVOKED.value, State.IGNORED.value] + try: + is_the_task_interrupted = get_is_the_task_interrupted(document_id) + if is_the_task_interrupted(): + return + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.GENERATE_PROBLEM, + State.STARTED) + llm_model = get_llm_model(model_id) + + # 生成问题函数 + generate_problem = get_generate_problem(llm_model, prompt, + ListenerManagement.get_aggregation_document_status( + document_id), is_the_task_interrupted) + query_set = QuerySet(Paragraph).annotate( + reversed_status=Reverse('status'), + task_type_status=Substr('reversed_status', TaskType.GENERATE_PROBLEM.value, + 1), + ).filter(task_type_status__in=state_list, document_id=document_id) + page_desc(query_set, 10, generate_problem, is_the_task_interrupted) + except Exception as e: + max_kb_error.error(f'根据文档生成问题:{document_id}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Generate issue based on document: {document_id} error {error}{traceback}').format( + document_id=document_id, error=str(e), traceback=traceback.format_exc())) + finally: + ListenerManagement.post_update_document_status(document_id, TaskType.GENERATE_PROBLEM) + max_kb.info(_('End--->Generate problem: {document_id}').format(document_id=document_id)) + + +@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id_list']}, + name='celery:generate_related_by_paragraph_list') +def generate_related_by_paragraph_id_list(document_id, paragraph_id_list, model_id, prompt): + try: + is_the_task_interrupted = get_is_the_task_interrupted(document_id) + if is_the_task_interrupted(): + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.GENERATE_PROBLEM, + State.REVOKED) + return + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), + TaskType.GENERATE_PROBLEM, + State.STARTED) + llm_model = get_llm_model(model_id) + # 生成问题函数 + generate_problem = get_generate_problem(llm_model, prompt, ListenerManagement.get_aggregation_document_status( + document_id)) + + def is_the_task_interrupted(): + document = QuerySet(Document).filter(id=document_id).first() + if document is None or Status(document.status)[TaskType.GENERATE_PROBLEM] == State.REVOKE: + return True + return False + + page(QuerySet(Paragraph).filter(id__in=paragraph_id_list), 10, generate_problem, is_the_task_interrupted) + finally: + ListenerManagement.post_update_document_status(document_id, TaskType.GENERATE_PROBLEM) diff --git a/apps/dataset/task/sync.py b/apps/dataset/task/sync.py new file mode 100644 index 00000000000..16add2db1e4 --- /dev/null +++ b/apps/dataset/task/sync.py @@ -0,0 +1,61 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: sync.py + @date:2024/8/20 21:37 + @desc: +""" + +import logging +import traceback +from typing import List + +from celery_once import QueueOnce + +from common.util.fork import ForkManage, Fork +from dataset.task.tools import get_save_handler, get_sync_web_document_handler, get_sync_handler + +from ops import celery_app +from django.utils.translation import gettext_lazy as _ + +max_kb_error = logging.getLogger("max_kb_error") +max_kb = logging.getLogger("max_kb") + + +@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, name='celery:sync_web_dataset') +def sync_web_dataset(dataset_id: str, url: str, selector: str): + try: + max_kb.info(_('Start--->Start synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id)) + ForkManage(url, selector.split(" ") if selector is not None else []).fork(2, set(), + get_save_handler(dataset_id, + selector)) + + max_kb.info(_('End--->End synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id)) + except Exception as e: + max_kb_error.error(_('Synchronize web knowledge base:{dataset_id} error{error}{traceback}').format( + dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc())) + + +@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, name='celery:sync_replace_web_dataset') +def sync_replace_web_dataset(dataset_id: str, url: str, selector: str): + try: + max_kb.info(_('Start--->Start synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id)) + ForkManage(url, selector.split(" ") if selector is not None else []).fork(2, set(), + get_sync_handler(dataset_id + )) + max_kb.info(_('End--->End synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id)) + except Exception as e: + max_kb_error.error(_('Synchronize web knowledge base:{dataset_id} error{error}{traceback}').format( + dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc())) + + +@celery_app.task(name='celery:sync_web_document') +def sync_web_document(dataset_id, source_url_list: List[str], selector: str): + handler = get_sync_web_document_handler(dataset_id) + for source_url in source_url_list: + try: + result = Fork(base_fork_url=source_url, selector_list=selector.split(' ')).fork() + handler(source_url, selector, result) + except Exception as e: + pass diff --git a/apps/dataset/task/tools.py b/apps/dataset/task/tools.py new file mode 100644 index 00000000000..84d3ac8d35f --- /dev/null +++ b/apps/dataset/task/tools.py @@ -0,0 +1,114 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: tools.py + @date:2024/8/20 21:48 + @desc: +""" + +import logging +import re +import traceback + +from django.db.models import QuerySet + +from common.util.fork import ChildLink, Fork +from common.util.split_model import get_split_model +from dataset.models import Type, Document, DataSet, Status +from django.utils.translation import gettext_lazy as _ + +max_kb_error = logging.getLogger("max_kb_error") +max_kb = logging.getLogger("max_kb") + + +def get_save_handler(dataset_id, selector): + from dataset.serializers.document_serializers import DocumentSerializers + + def handler(child_link: ChildLink, response: Fork.Response): + if response.status == 200: + try: + document_name = child_link.tag.text if child_link.tag is not None and len( + child_link.tag.text.strip()) > 0 else child_link.url + paragraphs = get_split_model('web.md').parse(response.content) + DocumentSerializers.Create(data={'dataset_id': dataset_id}).save( + {'name': document_name, 'paragraphs': paragraphs, + 'meta': {'source_url': child_link.url, 'selector': selector}, + 'type': Type.web}, with_valid=True) + except Exception as e: + logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') + + return handler + + +def get_sync_handler(dataset_id): + from dataset.serializers.document_serializers import DocumentSerializers + dataset = QuerySet(DataSet).filter(id=dataset_id).first() + + def handler(child_link: ChildLink, response: Fork.Response): + if response.status == 200: + try: + + document_name = child_link.tag.text if child_link.tag is not None and len( + child_link.tag.text.strip()) > 0 else child_link.url + paragraphs = get_split_model('web.md').parse(response.content) + first = QuerySet(Document).filter(meta__source_url=child_link.url.strip(), + dataset=dataset).first() + if first is not None: + # 如果存在,使用文档同步 + DocumentSerializers.Sync(data={'document_id': first.id}).sync() + else: + # 插入 + DocumentSerializers.Create(data={'dataset_id': dataset.id}).save( + {'name': document_name, 'paragraphs': paragraphs, + 'meta': {'source_url': child_link.url.strip(), 'selector': dataset.meta.get('selector')}, + 'type': Type.web}, with_valid=True) + except Exception as e: + logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') + + return handler + + +def get_sync_web_document_handler(dataset_id): + from dataset.serializers.document_serializers import DocumentSerializers + + def handler(source_url: str, selector, response: Fork.Response): + if response.status == 200: + try: + paragraphs = get_split_model('web.md').parse(response.content) + # 插入 + DocumentSerializers.Create(data={'dataset_id': dataset_id}).save( + {'name': source_url[0:128], 'paragraphs': paragraphs, + 'meta': {'source_url': source_url, 'selector': selector}, + 'type': Type.web}, with_valid=True) + except Exception as e: + logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}') + else: + Document(name=source_url[0:128], + dataset_id=dataset_id, + meta={'source_url': source_url, 'selector': selector}, + type=Type.web, + char_length=0, + status=Status.error).save() + + return handler + + +def save_problem(dataset_id, document_id, paragraph_id, problem): + from dataset.serializers.paragraph_serializers import ParagraphSerializers + # print(f"dataset_id: {dataset_id}") + # print(f"document_id: {document_id}") + # print(f"paragraph_id: {paragraph_id}") + # print(f"problem: {problem}") + problem = re.sub(r"^\d+\.\s*", "", problem) + pattern = r"(.*?)" + match = re.search(pattern, problem) + problem = match.group(1) if match else None + if problem is None or len(problem) == 0: + return + try: + ParagraphSerializers.Problem( + data={"dataset_id": dataset_id, 'document_id': document_id, + 'paragraph_id': paragraph_id}).save(instance={"content": problem}, with_valid=True) + except Exception as e: + max_kb_error.error(_('Association problem failed {error}').format(error=str(e))) diff --git a/apps/dataset/template/csv_template_en.csv b/apps/dataset/template/csv_template_en.csv new file mode 100644 index 00000000000..7a036c37e6d --- /dev/null +++ b/apps/dataset/template/csv_template_en.csv @@ -0,0 +1,5 @@ +Section title (optional), Section content (required,question answer), Question (optional,one per line in the cell) +MaxKB product introduction,"MaxKB is a knowledge base question-answering system based on the LLM large language model. MaxKB = Max Knowledge Base,aims to become the most powerful brain of the enterprise。Out-of-the-box: supports direct document upload、automatic crawling of online documents、automatic text splitting and vectorization、and good intelligent question-answering interactive experience;Seamless embedding: supports zero-coding and rapid embedding into third-party business systems;Multi-model support: supports docking with mainstream large models,including Ollama local private large models (such as Llama 2、Llama 3、qwen)、Tongyi Qianwen、OpenAI、Azure OpenAI、Kimi、Zhipu AI、iFlytek Spark and Baidu Qianfan large models、etc.","What is MaxKB? +MaxKB product introduction +Large language model supported by MaxKB +MaxKB advantages" \ No newline at end of file diff --git a/apps/dataset/template/csv_template_zh.csv b/apps/dataset/template/csv_template_zh.csv new file mode 100644 index 00000000000..e9d9d8c3d32 --- /dev/null +++ b/apps/dataset/template/csv_template_zh.csv @@ -0,0 +1,8 @@ +分段标题(选填),分段内容(必填,问题答案)),问题(选填,单元格内一行一个) +MaxKB产品介绍,"MaxKB 是一款基于 LLM 大语言模型的知识库问答系统。MaxKB = Max Knowledge Base,旨在成为企业的最强大脑。 +开箱即用:支持直接上传文档、自动爬取在线文档,支持文本自动拆分、向量化,智能问答交互体验好; +无缝嵌入:支持零编码快速嵌入到第三方业务系统; +多模型支持:支持对接主流的大模型,包括 Ollama 本地私有大模型(如 Llama 2、Llama 3、qwen)、通义千问、OpenAI、Azure OpenAI、Kimi、智谱 AI、讯飞星火和百度千帆大模型等。","MaxKB是什么? +MaxKB产品介绍 +MaxKB支持的大语言模型 +MaxKB优势" diff --git a/apps/dataset/template/csv_template_zh_Hant.csv b/apps/dataset/template/csv_template_zh_Hant.csv new file mode 100644 index 00000000000..62eadb9947c --- /dev/null +++ b/apps/dataset/template/csv_template_zh_Hant.csv @@ -0,0 +1,8 @@ +分段標題(選填),分段內容(必填,問題答案)),問題(選填,單元格內一行一個) +MaxKB產品介紹,"MaxKB 是一款基於 LLM 大語言模型的知識庫問答系統。MaxKB = Max Knowledge Base,旨在成為企業的最強大大腦。 +開箱即用:支援直接上傳文檔、自動爬取線上文檔,支援文字自動分割、向量化,智慧問答互動體驗好; +無縫嵌入:支援零編碼快速嵌入到第三方業務系統; +多模型支援:支持對接主流的大模型,包括Ollama 本地私有大模型(如Llama 2、Llama 3、qwen)、通義千問、OpenAI、Azure OpenAI、Kimi、智譜AI、訊飛星火和百度千帆大模型等。 ","MaxKB是什麼? +MaxKB產品介紹 +MaxKB支援的大語言模型 +MaxKB優勢" \ No newline at end of file diff --git a/apps/dataset/template/excel_template_en.xlsx b/apps/dataset/template/excel_template_en.xlsx new file mode 100644 index 00000000000..26800ea0692 Binary files /dev/null and b/apps/dataset/template/excel_template_en.xlsx differ diff --git a/apps/dataset/template/excel_template_zh.xlsx b/apps/dataset/template/excel_template_zh.xlsx new file mode 100644 index 00000000000..fd896c18b67 Binary files /dev/null and b/apps/dataset/template/excel_template_zh.xlsx differ diff --git a/apps/dataset/template/excel_template_zh_Hant.xlsx b/apps/dataset/template/excel_template_zh_Hant.xlsx new file mode 100644 index 00000000000..5227b6963d0 Binary files /dev/null and b/apps/dataset/template/excel_template_zh_Hant.xlsx differ diff --git a/apps/dataset/template/table_template_en.csv b/apps/dataset/template/table_template_en.csv new file mode 100644 index 00000000000..bce374cd736 --- /dev/null +++ b/apps/dataset/template/table_template_en.csv @@ -0,0 +1,13 @@ +Position, Reimbursement type, First-tier city reimbursement standard (yuan), Second-tier city reimbursement standard (yuan), Third-tier city reimbursement standard (yuan) +Ordinary employees, Accommodation expenses, 500, 400, 300 +Department head, Accommodation fee, 600, 500, 400 +Department director, Accommodation fee, 700, 600, 500 +Regional general manager, Accommodation fee, 800, 700, 600 +Ordinary employees, Food expenses, 50, 40, 30 +Department head, Food expenses, 50, 40, 30 +Department director, Food expenses, 50, 40, 30 +Regional general manager, Food expenses, 50, 40, 30 +Ordinary employees, Transportation expenses, 50, 40, 30 +Department head, Transportation expenses, 50, 40, 30 +Department director, Transportation expenses, 50, 40, 30 +Regional general manager, Transportation expenses, 50, 40, 30 \ No newline at end of file diff --git a/apps/dataset/template/table_template_en.xlsx b/apps/dataset/template/table_template_en.xlsx new file mode 100644 index 00000000000..1e445822664 Binary files /dev/null and b/apps/dataset/template/table_template_en.xlsx differ diff --git a/apps/dataset/template/table_template_zh.csv b/apps/dataset/template/table_template_zh.csv new file mode 100644 index 00000000000..7cf0f6306e0 --- /dev/null +++ b/apps/dataset/template/table_template_zh.csv @@ -0,0 +1,13 @@ +职务,报销类型,一线城市报销标准(元),二线城市报销标准(元),三线城市报销标准(元) +普通员工,住宿费,500,400,300 +部门主管,住宿费,600,500,400 +部门总监,住宿费,700,600,500 +区域总经理,住宿费,800,700,600 +普通员工,伙食费,50,40,30 +部门主管,伙食费,50,40,30 +部门总监,伙食费,50,40,30 +区域总经理,伙食费,50,40,30 +普通员工,交通费,50,40,30 +部门主管,交通费,50,40,30 +部门总监,交通费,50,40,30 +区域总经理,交通费,50,40,30 diff --git a/apps/dataset/template/table_template_zh.xlsx b/apps/dataset/template/table_template_zh.xlsx new file mode 100644 index 00000000000..2bc94a5b80d Binary files /dev/null and b/apps/dataset/template/table_template_zh.xlsx differ diff --git a/apps/dataset/template/table_template_zh_Hant.csv b/apps/dataset/template/table_template_zh_Hant.csv new file mode 100644 index 00000000000..2e30ab49c33 --- /dev/null +++ b/apps/dataset/template/table_template_zh_Hant.csv @@ -0,0 +1,13 @@ +職務,報銷類型,一線城市報銷標準(元),二線城市報銷標準(元),三線城市報銷標準(元) +普通員工,住宿費,500,400,300 +部門主管,住宿費,600,500,400 +部門總監,住宿費,700,600,500 +區域總經理,住宿費,800,700,600 +普通員工,伙食費,50,40,30 +部門主管,伙食費,50,40,30 +部門總監,伙食費,50,40,30 +區域總經理,伙食費,50,40,30 +普通員工,交通費,50,40,30 +部門主管,交通費,50,40,30 +部門總監,交通費,50,40,30 +區域總經理,交通費,50,40,30 \ No newline at end of file diff --git a/apps/dataset/template/table_template_zh_Hant.xlsx b/apps/dataset/template/table_template_zh_Hant.xlsx new file mode 100644 index 00000000000..53f34e4ed9d Binary files /dev/null and b/apps/dataset/template/table_template_zh_Hant.xlsx differ diff --git a/apps/dataset/urls.py b/apps/dataset/urls.py index 237a81f5976..302b953ec36 100644 --- a/apps/dataset/urls.py +++ b/apps/dataset/urls.py @@ -6,16 +6,29 @@ urlpatterns = [ path('dataset', views.Dataset.as_view(), name="dataset"), path('dataset/web', views.Dataset.CreateWebDataset.as_view()), + path('dataset/qa', views.Dataset.CreateQADataset.as_view()), path('dataset/', views.Dataset.Operate.as_view(), name="dataset_key"), + path('dataset//export', views.Dataset.Export.as_view(), name="export"), + path('dataset//export_zip', views.Dataset.ExportZip.as_view(), name="export_zip"), + path('dataset//re_embedding', views.Dataset.Embedding.as_view(), name="dataset_key"), + path('dataset//generate_related', views.Dataset.GenerateRelated.as_view(), + name="dataset_generate_related"), path('dataset//application', views.Dataset.Application.as_view()), path('dataset//', views.Dataset.Page.as_view(), name="dataset"), path('dataset//sync_web', views.Dataset.SyncWeb.as_view()), path('dataset//hit_test', views.Dataset.HitTest.as_view()), path('dataset//document', views.Document.as_view(), name='document'), + path('dataset//model', views.Dataset.Model.as_view()), + path('dataset/document/template/export', views.Template.as_view()), + path('dataset/document/table_template/export', views.TableTemplate.as_view()), path('dataset//document/web', views.WebDocument.as_view()), + path('dataset//document/qa', views.QaDocument.as_view()), + path('dataset//document/table', views.TableDocument.as_view()), path('dataset//document/_bach', views.Document.Batch.as_view()), path('dataset//document/batch_hit_handling', views.Document.BatchEditHitHandling.as_view()), path('dataset//document//', views.Document.Page.as_view()), + path('dataset//document/batch_refresh', views.Document.BatchRefresh.as_view()), + path('dataset//document/batch_generate_related', views.Document.BatchGenerateRelated.as_view()), path('dataset//document/', views.Document.Operate.as_view(), name="document_operate"), path('dataset/document/split', views.Document.Split.as_view(), @@ -23,14 +36,25 @@ path('dataset/document/split_pattern', views.Document.SplitPattern.as_view(), name="document_operate"), path('dataset//document/migrate/', views.Document.Migrate.as_view()), + path('dataset//document//export', views.Document.Export.as_view(), + name="document_export"), + path('dataset//document//export_zip', views.Document.ExportZip.as_view(), + name="document_export"), + path('dataset//document//sync', views.Document.SyncWeb.as_view()), path('dataset//document//refresh', views.Document.Refresh.as_view()), + path('dataset//document//cancel_task', views.Document.CancelTask.as_view()), + path('dataset//document/cancel_task/_batch', + views.Document.CancelTask.Batch.as_view()), path('dataset//document//paragraph', views.Paragraph.as_view()), + path('dataset//document/batch_generate_related', views.Document.BatchGenerateRelated.as_view()), path( 'dataset//document//paragraph/migrate/dataset//document/', views.Paragraph.BatchMigrate.as_view()), path('dataset//document//paragraph/_batch', views.Paragraph.Batch.as_view()), path('dataset//document//paragraph//', views.Paragraph.Page.as_view(), name='paragraph_page'), + path('dataset//document//paragraph/batch_generate_related', + views.Paragraph.BatchGenerateRelated.as_view()), path('dataset//document//paragraph/', views.Paragraph.Operate.as_view()), path('dataset//document//paragraph//problem', @@ -47,5 +71,7 @@ path('dataset//problem/', views.Problem.Operate.as_view()), path('dataset//problem//paragraph', views.Problem.Paragraph.as_view()), path('image/', views.Image.Operate.as_view()), - path('image', views.Image.as_view()) + path('image', views.Image.as_view()), + path('file/', views.FileView.Operate.as_view()), + path('file', views.FileView.as_view()) ] diff --git a/apps/dataset/views/__init__.py b/apps/dataset/views/__init__.py index 6b2abcfb16c..e434cec8622 100644 --- a/apps/dataset/views/__init__.py +++ b/apps/dataset/views/__init__.py @@ -11,3 +11,4 @@ from .paragraph import * from .problem import * from .image import * +from .file import * diff --git a/apps/dataset/views/common.py b/apps/dataset/views/common.py new file mode 100644 index 00000000000..6637426e349 --- /dev/null +++ b/apps/dataset/views/common.py @@ -0,0 +1,56 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: common.py.py + @date:2025/3/25 15:43 + @desc: +""" +from django.db.models import QuerySet + +from dataset.models import DataSet, Document + + +def get_dataset_operation_object(dataset_id: str): + dataset_model = QuerySet(model=DataSet).filter(id=dataset_id).first() + if dataset_model is not None: + return { + "name": dataset_model.name, + "desc": dataset_model.desc, + "type": dataset_model.type, + "create_time": dataset_model.create_time, + "update_time": dataset_model.update_time + } + return {} + + +def get_document_operation_object(document_id: str): + document_model = QuerySet(model=Document).filter(id=document_id).first() + if document_model is not None: + return { + "name": document_model.name, + "type": document_model.type, + } + return {} + + +def get_document_operation_object_batch(document_id_list: str): + document_model_list = QuerySet(model=Document).filter(id__in=document_id_list) + if document_model_list is not None: + return { + "name": f'[{",".join([document_model.name for document_model in document_model_list])}]', + 'document_list': [{'name': document_model.name, 'type': document_model.type} for document_model in + document_model_list] + } + return {} + + +def get_dataset_document_operation_object(dataset_dict: dict, document_dict: dict): + return { + 'name': f'{dataset_dict.get("name", "")}/{document_dict.get("name", "")}', + 'dataset_name': dataset_dict.get("name", ""), + 'dataset_desc': dataset_dict.get("desc", ""), + 'dataset_type': dataset_dict.get("type", ""), + 'document_name': document_dict.get("name", ""), + 'document_type': document_dict.get("type", ""), + } diff --git a/apps/dataset/views/dataset.py b/apps/dataset/views/dataset.py index d3720977b2f..40d9a0c6514 100644 --- a/apps/dataset/views/dataset.py +++ b/apps/dataset/views/dataset.py @@ -9,16 +9,23 @@ from drf_yasg.utils import swagger_auto_schema from rest_framework.decorators import action +from rest_framework.parsers import MultiPartParser from rest_framework.views import APIView from rest_framework.views import Request +import dataset.models from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import PermissionConstants, CompareConstants, Permission, Group, Operate, \ ViewPermission, RoleConstants +from common.log.log import log from common.response import result from common.response.result import get_page_request_params, get_page_api_response, get_api_response from common.swagger_api.common_api import CommonApi +from dataset.serializers.common_serializers import GenerateRelatedSerializer from dataset.serializers.dataset_serializers import DataSetSerializers +from dataset.views.common import get_dataset_operation_object +from setting.serializers.provider_serializers import ModelSerializer +from django.utils.translation import gettext_lazy as _ class Dataset(APIView): @@ -28,34 +35,66 @@ class SyncWeb(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="同步Web站点知识库", - operation_id="同步Web站点知识库", + @swagger_auto_schema(operation_summary=_("Synchronize the knowledge base of the website"), + operation_id=_("Synchronize the knowledge base of the website"), manual_parameters=DataSetSerializers.SyncWeb.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库"]) + tags=[_('Knowledge Base')]) @has_permissions(ViewPermission( [RoleConstants.ADMIN, RoleConstants.USER], [lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=keywords.get('dataset_id'))], compare=CompareConstants.AND), PermissionConstants.DATASET_EDIT, compare=CompareConstants.AND) + @log(menu='Knowledge Base', operate="Synchronize the knowledge base of the website", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) def put(self, request: Request, dataset_id: str): return result.success(DataSetSerializers.SyncWeb( data={'sync_type': request.query_params.get('sync_type'), 'id': dataset_id, 'user_id': str(request.user.id)}).sync()) + class CreateQADataset(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("Create QA knowledge base"), + operation_id=_("Create QA knowledge base"), + manual_parameters=DataSetSerializers.Create.CreateQASerializers.get_request_params_api(), + responses=get_api_response( + DataSetSerializers.Create.CreateQASerializers.get_response_body_api()), + tags=[_('Knowledge Base')] + ) + @has_permissions(PermissionConstants.DATASET_CREATE, compare=CompareConstants.AND) + @log(menu='Knowledge Base', operate="Create QA knowledge base", + get_operation_object=lambda r, keywords: {'name': r.data.get('name'), 'desc': r.data.get('desc'), + 'file_list': r.FILES.getlist('file')}) + def post(self, request: Request): + return result.success(DataSetSerializers.Create(data={'user_id': request.user.id}).save_qa({ + 'file_list': request.FILES.getlist('file'), + 'name': request.data.get('name'), + 'desc': request.data.get('desc') + })) + class CreateWebDataset(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建web站点知识库", - operation_id="创建web站点知识库", + @swagger_auto_schema(operation_summary=_('Create a web site knowledge base'), + operation_id=_('Create a web site knowledge base'), request_body=DataSetSerializers.Create.CreateWebSerializers.get_request_body_api(), responses=get_api_response( DataSetSerializers.Create.CreateWebSerializers.get_response_body_api()), - tags=["知识库"] + tags=[_('Knowledge Base')] ) @has_permissions(PermissionConstants.DATASET_CREATE, compare=CompareConstants.AND) + @log(menu='Knowledge Base', operate="Create a web site knowledge base", + get_operation_object=lambda r, keywords: {'name': r.data.get('name'), 'desc': r.data.get('desc'), + 'file_list': r.FILES.getlist('file'), + 'meta': {'source_url': r.data.get('source_url'), + 'selector': r.data.get('selector'), + 'embedding_mode_id': r.data.get('embedding_mode_id')}} + ) def post(self, request: Request): return result.success(DataSetSerializers.Create(data={'user_id': request.user.id}).save_web(request.data)) @@ -63,36 +102,39 @@ class Application(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取知识库可用应用列表", - operation_id="获取知识库可用应用列表", + @swagger_auto_schema(operation_summary=_('Get a list of applications available in the knowledge base'), + operation_id=_('Get a list of applications available in the knowledge base'), manual_parameters=DataSetSerializers.Application.get_request_params_api(), responses=result.get_api_array_response( DataSetSerializers.Application.get_response_body_api()), - tags=["知识库"]) + tags=[_('Knowledge Base')]) def get(self, request: Request, dataset_id: str): return result.success(DataSetSerializers.Operate( data={'id': dataset_id, 'user_id': str(request.user.id)}).list_application()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取知识库列表", - operation_id="获取知识库列表", + @swagger_auto_schema(operation_summary=_('Get a list of knowledge bases'), + operation_id=_('Get a list of knowledge bases'), manual_parameters=DataSetSerializers.Query.get_request_params_api(), responses=result.get_api_array_response(DataSetSerializers.Query.get_response_body_api()), - tags=["知识库"]) + tags=[_('Knowledge Base')]) @has_permissions(PermissionConstants.DATASET_READ, compare=CompareConstants.AND) def get(self, request: Request): - d = DataSetSerializers.Query(data={**request.query_params, 'user_id': str(request.user.id)}) + data = {key: str(value) for key, value in request.query_params.items()} + d = DataSetSerializers.Query(data={**data, 'user_id': str(request.user.id)}) d.is_valid() return result.success(d.list()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建知识库", - operation_id="创建知识库", + @swagger_auto_schema(operation_summary=_('Create a knowledge base'), + operation_id=_('Create a knowledge base'), request_body=DataSetSerializers.Create.get_request_body_api(), responses=get_api_response(DataSetSerializers.Create.get_response_body_api()), - tags=["知识库"] + tags=[_('Knowledge Base')] ) @has_permissions(PermissionConstants.DATASET_CREATE, compare=CompareConstants.AND) + @log(menu='Knowledge Base', operate="Create a knowledge base", + get_operation_object=lambda r, keywords: {'name': r.data.get('name'), 'desc': r.data.get('desc')}) def post(self, request: Request): return result.success(DataSetSerializers.Create(data={'user_id': request.user.id}).save(request.data)) @@ -100,10 +142,10 @@ class HitTest(APIView): authentication_classes = [TokenAuth] @action(methods="GET", detail=False) - @swagger_auto_schema(operation_summary="命中测试列表", operation_id="命中测试列表", + @swagger_auto_schema(operation_summary=_('Hit test list'), operation_id=_('Hit test list'), manual_parameters=CommonApi.HitTestApi.get_request_params_api(), responses=result.get_api_array_response(CommonApi.HitTestApi.get_response_body_api()), - tags=["知识库"]) + tags=[_('Knowledge Base')]) @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=keywords.get('dataset_id'))) def get(self, request: Request, dataset_id: str): @@ -115,27 +157,96 @@ def get(self, request: Request, dataset_id: str): 'search_mode': request.query_params.get('search_mode')}).hit_test( )) + class Embedding(APIView): + authentication_classes = [TokenAuth] + + @action(methods="PUT", detail=False) + @swagger_auto_schema(operation_summary=_('Re-vectorize'), operation_id=_('Re-vectorize'), + manual_parameters=DataSetSerializers.Operate.get_request_params_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base')] + ) + @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=keywords.get('dataset_id'))) + @log(menu='Knowledge Base', operate="Re-vectorize", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) + def put(self, request: Request, dataset_id: str): + return result.success( + DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).re_embedding()) + + class GenerateRelated(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Generate related'), operation_id=_('Generate related'), + manual_parameters=DataSetSerializers.Operate.get_request_params_api(), + request_body=GenerateRelatedSerializer.get_request_body_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base')] + ) + @log(menu='document', operate="Generate related documents", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')) + ) + def put(self, request: Request, dataset_id: str): + return result.success( + DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).generate_related( + request.data)) + + class Export(APIView): + authentication_classes = [TokenAuth] + + @action(methods="GET", detail=False) + @swagger_auto_schema(operation_summary=_('Export knowledge base'), operation_id=_('Export knowledge base'), + manual_parameters=DataSetSerializers.Operate.get_request_params_api(), + tags=[_('Knowledge Base')] + ) + @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=keywords.get('dataset_id'))) + @log(menu='Knowledge Base', operate="Export knowledge base", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) + def get(self, request: Request, dataset_id: str): + return DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).export_excel() + + class ExportZip(APIView): + authentication_classes = [TokenAuth] + + @action(methods="GET", detail=False) + @swagger_auto_schema(operation_summary=_('Export knowledge base containing images'), + operation_id=_('Export knowledge base containing images'), + manual_parameters=DataSetSerializers.Operate.get_request_params_api(), + tags=[_('Knowledge Base')] + ) + @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=keywords.get('dataset_id'))) + @log(menu='Knowledge Base', operate="Export knowledge base containing images", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) + def get(self, request: Request, dataset_id: str): + return DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).export_zip() + class Operate(APIView): authentication_classes = [TokenAuth] @action(methods="DELETE", detail=False) - @swagger_auto_schema(operation_summary="删除知识库", operation_id="删除知识库", + @swagger_auto_schema(operation_summary=_('Delete knowledge base'), operation_id=_('Delete knowledge base'), manual_parameters=DataSetSerializers.Operate.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库"]) + tags=[_('Knowledge Base')]) @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=keywords.get('dataset_id')), lambda r, k: Permission(group=Group.DATASET, operate=Operate.DELETE, dynamic_tag=k.get('dataset_id')), compare=CompareConstants.AND) + @log(menu='Knowledge Base', operate="Delete knowledge base", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) def delete(self, request: Request, dataset_id: str): operate = DataSetSerializers.Operate(data={'id': dataset_id}) return result.success(operate.delete()) @action(methods="GET", detail=False) - @swagger_auto_schema(operation_summary="查询知识库详情根据知识库id", operation_id="查询知识库详情根据知识库id", + @swagger_auto_schema(operation_summary=_('Query knowledge base details based on knowledge base id'), + operation_id=_('Query knowledge base details based on knowledge base id'), manual_parameters=DataSetSerializers.Operate.get_request_params_api(), responses=get_api_response(DataSetSerializers.Operate.get_response_body_api()), - tags=["知识库"]) + tags=[_('Knowledge Base')]) @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=keywords.get('dataset_id'))) def get(self, request: Request, dataset_id: str): @@ -143,14 +254,17 @@ def get(self, request: Request, dataset_id: str): user_id=request.user.id)) @action(methods="PUT", detail=False) - @swagger_auto_schema(operation_summary="修改知识库信息", operation_id="修改知识库信息", + @swagger_auto_schema(operation_summary=_('Modify knowledge base information'), + operation_id=_('Modify knowledge base information'), manual_parameters=DataSetSerializers.Operate.get_request_params_api(), request_body=DataSetSerializers.Operate.get_request_body_api(), responses=get_api_response(DataSetSerializers.Operate.get_response_body_api()), - tags=["知识库"] + tags=[_('Knowledge Base')] ) @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=keywords.get('dataset_id'))) + @log(menu='Knowledge Base', operate="Modify knowledge base information", + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) def put(self, request: Request, dataset_id: str): return result.success( DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).edit(request.data, @@ -160,17 +274,34 @@ class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取知识库分页列表", - operation_id="获取知识库分页列表", + @swagger_auto_schema(operation_summary=_('Get the knowledge base paginated list'), + operation_id=_('Get the knowledge base paginated list'), manual_parameters=get_page_request_params( DataSetSerializers.Query.get_request_params_api()), responses=get_page_api_response(DataSetSerializers.Query.get_response_body_api()), - tags=["知识库"] + tags=[_('Knowledge Base')] ) @has_permissions(PermissionConstants.DATASET_READ, compare=CompareConstants.AND) def get(self, request: Request, current_page, page_size): d = DataSetSerializers.Query( data={'name': request.query_params.get('name', None), 'desc': request.query_params.get("desc", None), - 'user_id': str(request.user.id)}) + 'user_id': str(request.user.id), + 'select_user_id': request.query_params.get('select_user_id', None)}) d.is_valid() return result.success(d.page(current_page, page_size)) + + class Model(APIView): + authentication_classes = [TokenAuth] + + @action(methods=["GET"], detail=False) + @has_permissions(ViewPermission( + [RoleConstants.ADMIN, RoleConstants.USER], + [lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=keywords.get('dataset_id'))], + compare=CompareConstants.AND)) + def get(self, request: Request, dataset_id: str): + return result.success( + ModelSerializer.Query( + data={'user_id': request.user.id, 'model_type': 'LLM'}).list( + with_valid=True) + ) diff --git a/apps/dataset/views/document.py b/apps/dataset/views/document.py index a727a31fac7..05e741ad87a 100644 --- a/apps/dataset/views/document.py +++ b/apps/dataset/views/document.py @@ -7,6 +7,7 @@ @desc: """ +from django.utils.translation import gettext_lazy as _ from drf_yasg.utils import swagger_auto_schema from rest_framework.decorators import action from rest_framework.parsers import MultiPartParser @@ -15,54 +16,140 @@ from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import Permission, Group, Operate, CompareConstants +from common.log.log import log from common.response import result from common.util.common import query_params_to_single_dict from dataset.serializers.common_serializers import BatchSerializer from dataset.serializers.document_serializers import DocumentSerializers, DocumentWebInstanceSerializer from dataset.swagger_api.document_api import DocumentApi +from dataset.views.common import get_dataset_document_operation_object, get_dataset_operation_object, \ + get_document_operation_object_batch, get_document_operation_object + + +class Template(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get QA template'), + operation_id=_('Get QA template'), + manual_parameters=DocumentSerializers.Export.get_request_params_api(), + tags=[_('Knowledge Base/Documentation')]) + def get(self, request: Request): + return DocumentSerializers.Export(data={'type': request.query_params.get('type')}).export(with_valid=True) + + +class TableTemplate(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get form template'), + operation_id=_('Get form template'), + manual_parameters=DocumentSerializers.Export.get_request_params_api(), + tags=[_('Knowledge Base/Documentation')]) + def get(self, request: Request): + return DocumentSerializers.Export(data={'type': request.query_params.get('type')}).table_export(with_valid=True) class WebDocument(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建Web站点文档", - operation_id="创建Web站点文档", + @swagger_auto_schema(operation_summary=_('Create Web site documents'), + operation_id=_('Create Web site documents'), request_body=DocumentWebInstanceSerializer.get_request_body_api(), manual_parameters=DocumentSerializers.Create.get_request_params_api(), responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Create Web site documents", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + {'name': f'[{",".join([url for url in r.data.get("source_url_list", [])])}]', + 'document_list': [{'name': url} for url in r.data.get("source_url_list", [])]})) def post(self, request: Request, dataset_id: str): return result.success( DocumentSerializers.Create(data={'dataset_id': dataset_id}).save_web(request.data, with_valid=True)) +class QaDocument(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Import QA and create documentation'), + operation_id=_('Import QA and create documentation'), + manual_parameters=DocumentWebInstanceSerializer.get_request_params_api(), + responses=result.get_api_response(DocumentSerializers.Create.get_response_body_api()), + tags=[_('Knowledge Base/Documentation')]) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Import QA and create documentation", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + {'name': f'[{",".join([file.name for file in r.FILES.getlist("file")])}]', + 'document_list': [{'name': file.name} for file in r.FILES.getlist("file")]})) + def post(self, request: Request, dataset_id: str): + return result.success( + DocumentSerializers.Create(data={'dataset_id': dataset_id}).save_qa( + {'file_list': request.FILES.getlist('file')}, + with_valid=True)) + + +class TableDocument(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Import tables and create documents'), + operation_id=_('Import tables and create documents'), + manual_parameters=DocumentWebInstanceSerializer.get_request_params_api(), + responses=result.get_api_response(DocumentSerializers.Create.get_response_body_api()), + tags=[_('Knowledge Base/Documentation')]) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Import tables and create documents", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + {'name': f'[{",".join([file.name for file in r.FILES.getlist("file")])}]', + 'document_list': [{'name': file.name} for file in r.FILES.getlist("file")]})) + def post(self, request: Request, dataset_id: str): + return result.success( + DocumentSerializers.Create(data={'dataset_id': dataset_id}).save_table( + {'file_list': request.FILES.getlist('file')}, + with_valid=True)) + + class Document(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建文档", - operation_id="创建文档", + @swagger_auto_schema(operation_summary=_('Create document'), + operation_id=_('Create document'), request_body=DocumentSerializers.Create.get_request_body_api(), manual_parameters=DocumentSerializers.Create.get_request_params_api(), responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Create document", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + {'name': r.data.get('name')})) def post(self, request: Request, dataset_id: str): return result.success( DocumentSerializers.Create(data={'dataset_id': dataset_id}).save(request.data, with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="文档列表", - operation_id="文档列表", + @swagger_auto_schema(operation_summary=_('Document list'), + operation_id=_('Document list'), manual_parameters=DocumentSerializers.Query.get_request_params_api(), responses=result.get_api_response(DocumentSerializers.Query.get_response_body_api()), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -76,16 +163,20 @@ class BatchEditHitHandling(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="批量修改文档命中处理方式", - operation_id="批量修改文档命中处理方式", + @swagger_auto_schema(operation_summary=_('Modify document hit processing methods in batches'), + operation_id=_('Modify document hit processing methods in batches'), request_body= DocumentApi.BatchEditHitHandlingApi.get_request_body_api(), manual_parameters=DocumentSerializers.Create.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Modify document hit processing methods in batches", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data.get('id_list')))) def put(self, request: Request, dataset_id: str): return result.success( DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_edit_hit_handling(request.data)) @@ -94,76 +185,195 @@ class Batch(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="批量创建文档", - operation_id="批量创建文档", + @swagger_auto_schema(operation_summary=_('Create documents in batches'), + operation_id=_('Create documents in batches'), request_body= DocumentSerializers.Batch.get_request_body_api(), manual_parameters=DocumentSerializers.Create.get_request_params_api(), responses=result.get_api_array_response( DocumentSerializers.Operate.get_response_body_api()), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Create documents in batches", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + {'name': f'[{",".join([document.get("name") for document in r.data])}]', + 'document_list': r.data}) + ) def post(self, request: Request, dataset_id: str): return result.success(DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_save(request.data)) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="批量同步文档", - operation_id="批量同步文档", + @swagger_auto_schema(operation_summary=_('Batch sync documents'), + operation_id=_('Batch sync documents'), request_body= BatchSerializer.get_request_body_api(), manual_parameters=DocumentSerializers.Create.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Batch sync documents", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data.get('id_list'))) + ) def put(self, request: Request, dataset_id: str): return result.success(DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_sync(request.data)) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="批量删除文档", - operation_id="批量删除文档", + @swagger_auto_schema(operation_summary=_('Delete documents in batches'), + operation_id=_('Delete documents in batches'), request_body= BatchSerializer.get_request_body_api(), manual_parameters=DocumentSerializers.Create.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Delete documents in batches", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data.get('id_list')))) def delete(self, request: Request, dataset_id: str): return result.success(DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_delete(request.data)) + class SyncWeb(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Synchronize web site types'), + operation_id=_('Synchronize web site types'), + manual_parameters=DocumentSerializers.Operate.get_request_params_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base/Documentation')] + ) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Synchronize web site types", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + )) + def put(self, request: Request, dataset_id: str, document_id: str): + return result.success( + DocumentSerializers.Sync(data={'document_id': document_id, 'dataset_id': dataset_id}).sync( + )) + + class CancelTask(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Cancel task'), + operation_id=_('Cancel task'), + manual_parameters=DocumentSerializers.Operate.get_request_params_api(), + request_body=DocumentApi.Cancel.get_request_body_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base/Documentation')] + ) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Cancel task", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + )) + def put(self, request: Request, dataset_id: str, document_id: str): + return result.success( + DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).cancel( + request.data + )) + + class Batch(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Cancel tasks in batches'), + operation_id=_('Cancel tasks in batches'), + request_body=DocumentApi.BatchCancel.get_request_body_api(), + manual_parameters=DocumentSerializers.Create.get_request_params_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base/Documentation')] + ) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Cancel tasks in batches", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data.get('id_list')) + ) + ) + def put(self, request: Request, dataset_id: str): + return result.success( + DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_cancel(request.data)) + class Refresh(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="刷新文档向量库", - operation_id="刷新文档向量库", + @swagger_auto_schema(operation_summary=_('Refresh document vector library'), + operation_id=_('Refresh document vector library'), + request_body=DocumentApi.EmbeddingState.get_request_body_api(), manual_parameters=DocumentSerializers.Operate.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档"] + tags=[_('Knowledge Base/Documentation')] ) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Refresh document vector library", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def put(self, request: Request, dataset_id: str, document_id: str): return result.success( DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).refresh( + request.data.get('state_list') )) + class BatchRefresh(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Batch refresh document vector library'), + operation_id=_('Batch refresh document vector library'), + request_body= + DocumentApi.BatchEditHitHandlingApi.get_request_body_api(), + manual_parameters=DocumentSerializers.Create.get_request_params_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base/Documentation')]) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Batch refresh document vector library", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data.get('id_list')) + ) + ) + def put(self, request: Request, dataset_id: str): + return result.success( + DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_refresh(request.data)) + class Migrate(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="批量迁移文档", - operation_id="批量迁移文档", + @swagger_auto_schema(operation_summary=_('Migrate documents in batches'), + operation_id=_('Migrate documents in batches'), manual_parameters=DocumentSerializers.Migrate.get_request_params_api(), request_body=DocumentSerializers.Migrate.get_request_body_api(), responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()), - tags=["知识库/文档"] + tags=[_('Knowledge Base/Documentation')] ) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, @@ -172,6 +382,12 @@ class Migrate(APIView): dynamic_tag=k.get('target_dataset_id')), compare=CompareConstants.AND ) + @log(menu='document', operate="Migrate documents in batches", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data) + ) + ) def put(self, request: Request, dataset_id: str, target_dataset_id: str): return result.success( DocumentSerializers.Migrate( @@ -180,15 +396,55 @@ def put(self, request: Request, dataset_id: str, target_dataset_id: str): )) + class Export(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Export document'), + operation_id=_('Export document'), + manual_parameters=DocumentSerializers.Operate.get_request_params_api(), + tags=[_('Knowledge Base/Documentation')]) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Export document", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) + def get(self, request: Request, dataset_id: str, document_id: str): + return DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).export() + + class ExportZip(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Export Zip document'), + operation_id=_('Export Zip document'), + manual_parameters=DocumentSerializers.Operate.get_request_params_api(), + tags=[_('Knowledge Base/Documentation')]) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Export Zip document", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) + def get(self, request: Request, dataset_id: str, document_id: str): + return DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).export_zip() + class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取文档详情", - operation_id="获取文档详情", + @swagger_auto_schema(operation_summary=_('Get document details'), + operation_id=_('Get document details'), manual_parameters=DocumentSerializers.Operate.get_request_params_api(), responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -198,16 +454,22 @@ def get(self, request: Request, dataset_id: str, document_id: str): return result.success(operate.one()) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改文档", - operation_id="修改文档", + @swagger_auto_schema(operation_summary=_('Modify document'), + operation_id=_('Modify document'), manual_parameters=DocumentSerializers.Operate.get_request_params_api(), request_body=DocumentSerializers.Operate.get_request_body_api(), responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()), - tags=["知识库/文档"] + tags=[_('Knowledge Base/Documentation')] ) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Modify document", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def put(self, request: Request, dataset_id: str, document_id: str): return result.success( DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).edit( @@ -215,37 +477,44 @@ def put(self, request: Request, dataset_id: str, document_id: str): with_valid=True)) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除文档", - operation_id="删除文档", + @swagger_auto_schema(operation_summary=_('Delete document'), + operation_id=_('Delete document'), manual_parameters=DocumentSerializers.Operate.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Delete document", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def delete(self, request: Request, dataset_id: str, document_id: str): operate = DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}) operate.is_valid(raise_exception=True) return result.success(operate.delete()) class SplitPattern(APIView): + authentication_classes = [TokenAuth] + @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取分段标识列表", - operation_id="获取分段标识列表", - tags=["知识库/文档"], - security=[]) + @swagger_auto_schema(operation_summary=_('Get a list of segment IDs'), + operation_id=_('Get a list of segment IDs'), + tags=[_('Knowledge Base/Documentation')]) def get(self, request: Request): return result.success(DocumentSerializers.SplitPattern.list()) class Split(APIView): + authentication_classes = [TokenAuth] parser_classes = [MultiPartParser] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="分段文档", - operation_id="分段文档", + @swagger_auto_schema(operation_summary=_('Segmented document'), + operation_id=_('Segmented document'), manual_parameters=DocumentSerializers.Split.get_request_params_api(), - tags=["知识库/文档"], - security=[]) + tags=[_('Knowledge Base/Documentation')]) def post(self, request: Request): split_data = {'file': request.FILES.getlist('file')} request_data = request.data @@ -265,11 +534,11 @@ class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取知识库分页列表", - operation_id="获取知识库分页列表", + @swagger_auto_schema(operation_summary=_('Get the knowledge base paginated list'), + operation_id=_('Get the knowledge base paginated list'), manual_parameters=DocumentSerializers.Query.get_request_params_api(), responses=result.get_page_api_response(DocumentSerializers.Query.get_response_body_api()), - tags=["知识库/文档"]) + tags=[_('Knowledge Base/Documentation')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -278,3 +547,20 @@ def get(self, request: Request, dataset_id: str, current_page, page_size): data={**query_params_to_single_dict(request.query_params), 'dataset_id': dataset_id}) d.is_valid(raise_exception=True) return result.success(d.page(current_page, page_size)) + + class BatchGenerateRelated(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='document', operate="Batch generate related documents", + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object_batch(r.data.get('document_id_list')) + ) + ) + def put(self, request: Request, dataset_id: str): + return result.success(DocumentSerializers.BatchGenerateRelated(data={'dataset_id': dataset_id}) + .batch_generate_related(request.data)) diff --git a/apps/dataset/views/file.py b/apps/dataset/views/file.py new file mode 100644 index 00000000000..f2e1454bb6b --- /dev/null +++ b/apps/dataset/views/file.py @@ -0,0 +1,46 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: image.py + @date:2024/4/22 16:23 + @desc: +""" +from drf_yasg import openapi +from drf_yasg.utils import swagger_auto_schema +from rest_framework.decorators import action +from rest_framework.parsers import MultiPartParser +from rest_framework.views import APIView +from rest_framework.views import Request + +from common.auth import TokenAuth +from common.log.log import log +from common.response import result +from dataset.serializers.file_serializers import FileSerializer +from django.utils.translation import gettext_lazy as _ + + +class FileView(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Upload file'), + operation_id=_('Upload file'), + manual_parameters=[openapi.Parameter(name='file', + in_=openapi.IN_FORM, + type=openapi.TYPE_FILE, + required=True, + description=_('Upload file'))], + tags=[_('file')]) + @log(menu='file', operate='Upload file') + def post(self, request: Request): + return result.success(FileSerializer(data={'file': request.FILES.get('file')}).upload()) + + class Operate(APIView): + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get file'), + operation_id=_('Get file'), + tags=[_('file')]) + def get(self, request: Request, file_id: str): + return FileSerializer.Operate(data={'id': file_id}).get() diff --git a/apps/dataset/views/image.py b/apps/dataset/views/image.py index 124336f877b..a165dbc1bc5 100644 --- a/apps/dataset/views/image.py +++ b/apps/dataset/views/image.py @@ -14,8 +14,10 @@ from rest_framework.views import Request from common.auth import TokenAuth +from common.log.log import log from common.response import result from dataset.serializers.image_serializers import ImageSerializer +from django.utils.translation import gettext_lazy as _ class Image(APIView): @@ -23,21 +25,21 @@ class Image(APIView): parser_classes = [MultiPartParser] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="上传图片", - operation_id="上传图片", + @swagger_auto_schema(operation_summary=_('Upload image'), + operation_id=_('Upload image'), manual_parameters=[openapi.Parameter(name='file', in_=openapi.IN_FORM, type=openapi.TYPE_FILE, required=True, - description='上传文件')], - tags=["图片"]) + description=_('Upload image'))], + tags=[_('Image')]) def post(self, request: Request): return result.success(ImageSerializer(data={'image': request.FILES.get('file')}).upload()) class Operate(APIView): @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取图片", - operation_id="获取图片", - tags=["图片"]) + @swagger_auto_schema(operation_summary=_('Get Image'), + operation_id=_('Get Image'), + tags=[_('Image')]) def get(self, request: Request, image_id: str): return ImageSerializer.Operate(data={'id': image_id}).get() diff --git a/apps/dataset/views/paragraph.py b/apps/dataset/views/paragraph.py index af968b8ab27..965f22f18bb 100644 --- a/apps/dataset/views/paragraph.py +++ b/apps/dataset/views/paragraph.py @@ -13,21 +13,26 @@ from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import Permission, Group, Operate, CompareConstants +from common.log.log import log from common.response import result from common.util.common import query_params_to_single_dict from dataset.serializers.common_serializers import BatchSerializer from dataset.serializers.paragraph_serializers import ParagraphSerializers +from django.utils.translation import gettext_lazy as _ + +from dataset.views import get_dataset_document_operation_object, get_dataset_operation_object, \ + get_document_operation_object class Paragraph(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="段落列表", - operation_id="段落列表", + @swagger_auto_schema(operation_summary=_('Paragraph list'), + operation_id=_('Paragraph list'), manual_parameters=ParagraphSerializers.Query.get_request_params_api(), responses=result.get_api_array_response(ParagraphSerializers.Query.get_response_body_api()), - tags=["知识库/文档/段落"] + tags=[_('Knowledge Base/Documentation/Paragraph')] ) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, @@ -40,15 +45,21 @@ def get(self, request: Request, dataset_id: str, document_id: str): return result.success(q.list()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建段落", - operation_id="创建段落", + @swagger_auto_schema(operation_summary=_('Create Paragraph'), + operation_id=_('Create Paragraph'), manual_parameters=ParagraphSerializers.Create.get_request_params_api(), request_body=ParagraphSerializers.Create.get_request_body_api(), responses=result.get_api_response(ParagraphSerializers.Query.get_response_body_api()), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Create Paragraph', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def post(self, request: Request, dataset_id: str, document_id: str): return result.success( ParagraphSerializers.Create(data={'dataset_id': dataset_id, 'document_id': document_id}).save(request.data)) @@ -57,27 +68,33 @@ class Problem(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="添加关联问题", - operation_id="添加段落关联问题", + @swagger_auto_schema(operation_summary=_('Add associated questions'), + operation_id=_('Add associated questions'), manual_parameters=ParagraphSerializers.Problem.get_request_params_api(), request_body=ParagraphSerializers.Problem.get_request_body_api(), responses=result.get_api_response(ParagraphSerializers.Problem.get_response_body_api()), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Add associated questions', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def post(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str): return result.success(ParagraphSerializers.Problem( data={"dataset_id": dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id}).save( request.data, with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取段落问题列表", - operation_id="获取段落问题列表", + @swagger_auto_schema(operation_summary=_('Get a list of paragraph questions'), + operation_id=_('Get a list of paragraph questions'), manual_parameters=ParagraphSerializers.Problem.get_request_params_api(), responses=result.get_api_array_response( ParagraphSerializers.Problem.get_response_body_api()), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -90,14 +107,20 @@ class UnAssociation(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="解除关联问题", - operation_id="解除关联问题", + @swagger_auto_schema(operation_summary=_('Disassociation issue'), + operation_id=_('Disassociation issue'), manual_parameters=ParagraphSerializers.Association.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Disassociation issue', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str, problem_id: str): return result.success(ParagraphSerializers.Association( data={'dataset_id': dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id, @@ -107,14 +130,20 @@ class Association(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="关联问题", - operation_id="关联问题", + @swagger_auto_schema(operation_summary=_('Related questions'), + operation_id=_('Related questions'), manual_parameters=ParagraphSerializers.Association.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Related questions', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str, problem_id: str): return result.success(ParagraphSerializers.Association( data={'dataset_id': dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id, @@ -124,15 +153,21 @@ class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['UPDATE'], detail=False) - @swagger_auto_schema(operation_summary="修改段落数据", - operation_id="修改段落数据", + @swagger_auto_schema(operation_summary=_('Modify paragraph data'), + operation_id=_('Modify paragraph data'), manual_parameters=ParagraphSerializers.Operate.get_request_params_api(), request_body=ParagraphSerializers.Operate.get_request_body_api(), responses=result.get_api_response(ParagraphSerializers.Operate.get_response_body_api()) - , tags=["知识库/文档/段落"]) + , tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Modify paragraph data', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str): o = ParagraphSerializers.Operate( data={"paragraph_id": paragraph_id, 'dataset_id': dataset_id, 'document_id': document_id}) @@ -140,11 +175,11 @@ def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: return result.success(o.edit(request.data)) @action(methods=['UPDATE'], detail=False) - @swagger_auto_schema(operation_summary="获取段落详情", - operation_id="获取段落详情", + @swagger_auto_schema(operation_summary=_('Get paragraph details'), + operation_id=_('Get paragraph details'), manual_parameters=ParagraphSerializers.Operate.get_request_params_api(), responses=result.get_api_response(ParagraphSerializers.Operate.get_response_body_api()), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -155,14 +190,20 @@ def get(self, request: Request, dataset_id: str, document_id: str, paragraph_id: return result.success(o.one()) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除段落", - operation_id="删除段落", + @swagger_auto_schema(operation_summary=_('Delete paragraph'), + operation_id=_('Delete paragraph'), manual_parameters=ParagraphSerializers.Operate.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Delete paragraph', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def delete(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str): o = ParagraphSerializers.Operate( data={"dataset_id": dataset_id, 'document_id': document_id, "paragraph_id": paragraph_id}) @@ -173,16 +214,22 @@ class Batch(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="批量删除段落", - operation_id="批量删除段落", + @swagger_auto_schema(operation_summary=_('Delete paragraphs in batches'), + operation_id=_('Delete paragraphs in batches'), request_body= BatchSerializer.get_request_body_api(), manual_parameters=ParagraphSerializers.Create.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Delete paragraphs in batches', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def delete(self, request: Request, dataset_id: str, document_id: str): return result.success(ParagraphSerializers.Batch( data={"dataset_id": dataset_id, 'document_id': document_id}).batch_delete(request.data)) @@ -191,12 +238,12 @@ class BatchMigrate(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="批量迁移段落", - operation_id="批量迁移段落", + @swagger_auto_schema(operation_summary=_('Migrate paragraphs in batches'), + operation_id=_('Migrate paragraphs in batches'), manual_parameters=ParagraphSerializers.Migrate.get_request_params_api(), request_body=ParagraphSerializers.Migrate.get_request_body_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落"] + tags=[_('Knowledge Base/Documentation/Paragraph')] ) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, @@ -205,6 +252,12 @@ class BatchMigrate(APIView): dynamic_tag=k.get('target_dataset_id')), compare=CompareConstants.AND ) + @log(menu='Paragraph', operate='Migrate paragraphs in batches', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) def put(self, request: Request, dataset_id: str, target_dataset_id: str, document_id: str, target_document_id): return result.success( ParagraphSerializers.Migrate( @@ -217,12 +270,12 @@ class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="分页获取段落列表", - operation_id="分页获取段落列表", + @swagger_auto_schema(operation_summary=_('Get paragraph list by pagination'), + operation_id=_('Get paragraph list by pagination'), manual_parameters=result.get_page_request_params( ParagraphSerializers.Query.get_request_params_api()), responses=result.get_page_api_response(ParagraphSerializers.Query.get_response_body_api()), - tags=["知识库/文档/段落"]) + tags=[_('Knowledge Base/Documentation/Paragraph')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -232,3 +285,21 @@ def get(self, request: Request, dataset_id: str, document_id: str, current_page, 'document_id': document_id}) d.is_valid(raise_exception=True) return result.success(d.page(current_page, page_size)) + + class BatchGenerateRelated(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='Paragraph', operate='Batch generate related', + get_operation_object=lambda r, keywords: get_dataset_document_operation_object( + get_dataset_operation_object(keywords.get('dataset_id')), + get_document_operation_object(keywords.get('document_id')) + ) + ) + def put(self, request: Request, dataset_id: str, document_id: str): + return result.success( + ParagraphSerializers.BatchGenerateRelated(data={'dataset_id': dataset_id, 'document_id': document_id}) + .batch_generate_related(request.data)) diff --git a/apps/dataset/views/problem.py b/apps/dataset/views/problem.py index beebcc67318..3619530f15f 100644 --- a/apps/dataset/views/problem.py +++ b/apps/dataset/views/problem.py @@ -13,21 +13,25 @@ from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import Permission, Group, Operate +from common.log.log import log from common.response import result from common.util.common import query_params_to_single_dict from dataset.serializers.problem_serializers import ProblemSerializers from dataset.swagger_api.problem_api import ProblemApi +from django.utils.translation import gettext_lazy as _ + +from dataset.views import get_dataset_operation_object class Problem(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="问题列表", - operation_id="问题列表", + @swagger_auto_schema(operation_summary=_('Question list'), + operation_id=_('Question list'), manual_parameters=ProblemApi.Query.get_request_params_api(), responses=result.get_api_array_response(ProblemApi.get_response_body_api()), - tags=["知识库/文档/段落/问题"] + tags=[_('Knowledge Base/Documentation/Paragraph/Question')] ) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, @@ -39,15 +43,18 @@ def get(self, request: Request, dataset_id: str): return result.success(q.list()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建问题", - operation_id="创建问题", + @swagger_auto_schema(operation_summary=_('Create question'), + operation_id=_('Create question'), manual_parameters=ProblemApi.BatchCreate.get_request_params_api(), request_body=ProblemApi.BatchCreate.get_request_body_api(), responses=result.get_api_response(ProblemApi.Query.get_response_body_api()), - tags=["知识库/文档/段落/问题"]) + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='problem', operate='Create question', + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')) + ) def post(self, request: Request, dataset_id: str): return result.success( ProblemSerializers.Create( @@ -57,11 +64,11 @@ class Paragraph(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取关联段落列表", - operation_id="获取关联段落列表", + @swagger_auto_schema(operation_summary=_('Get a list of associated paragraphs'), + operation_id=_('Get a list of associated paragraphs'), manual_parameters=ProblemApi.Paragraph.get_request_params_api(), responses=result.get_api_array_response(ProblemApi.Paragraph.get_response_body_api()), - tags=["知识库/文档/段落/问题"]) + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) @@ -74,47 +81,69 @@ class OperateBatch(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="批量删除问题", - operation_id="批量删除问题", + @swagger_auto_schema(operation_summary=_('Batch deletion issues'), + operation_id=_('Batch deletion issues'), request_body= ProblemApi.BatchOperate.get_request_body_api(), manual_parameters=ProblemApi.BatchOperate.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落/问题"]) + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='problem', operate='Batch deletion issues', + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) def delete(self, request: Request, dataset_id: str): return result.success( ProblemSerializers.BatchOperate(data={'dataset_id': dataset_id}).delete(request.data)) + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Batch associated paragraphs'), + operation_id=_('Batch associated paragraphs'), + request_body=ProblemApi.BatchAssociation.get_request_body_api(), + manual_parameters=ProblemApi.BatchOperate.get_request_params_api(), + responses=result.get_default_response(), + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) + @has_permissions( + lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, + dynamic_tag=k.get('dataset_id'))) + @log(menu='problem', operate='Batch associated paragraphs', + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) + def post(self, request: Request, dataset_id: str): + return result.success( + ProblemSerializers.BatchOperate(data={'dataset_id': dataset_id}).association(request.data)) + class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除问题", - operation_id="删除问题", + @swagger_auto_schema(operation_summary=_('Delete question'), + operation_id=_('Delete question'), manual_parameters=ProblemApi.Operate.get_request_params_api(), responses=result.get_default_response(), - tags=["知识库/文档/段落/问题"]) + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='problem', operate='Delete question', + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) def delete(self, request: Request, dataset_id: str, problem_id: str): return result.success(ProblemSerializers.Operate( data={**query_params_to_single_dict(request.query_params), 'dataset_id': dataset_id, 'problem_id': problem_id}).delete()) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改问题", - operation_id="修改问题", + @swagger_auto_schema(operation_summary=_('Modify question'), + operation_id=_('Modify question'), manual_parameters=ProblemApi.Operate.get_request_params_api(), request_body=ProblemApi.Operate.get_request_body_api(), responses=result.get_api_response(ProblemApi.get_response_body_api()), - tags=["知识库/文档/段落/问题"]) + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE, dynamic_tag=k.get('dataset_id'))) + @log(menu='problem', operate='Modify question', + get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))) def put(self, request: Request, dataset_id: str, problem_id: str): return result.success(ProblemSerializers.Operate( data={**query_params_to_single_dict(request.query_params), 'dataset_id': dataset_id, @@ -124,12 +153,12 @@ class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="分页获取问题列表", - operation_id="分页获取问题列表", + @swagger_auto_schema(operation_summary=_('Get the list of questions by page'), + operation_id=_('Get the list of questions by page'), manual_parameters=result.get_page_request_params( ProblemApi.Query.get_request_params_api()), responses=result.get_page_api_response(ProblemApi.get_response_body_api()), - tags=["知识库/文档/段落/问题"]) + tags=[_('Knowledge Base/Documentation/Paragraph/Question')]) @has_permissions( lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE, dynamic_tag=k.get('dataset_id'))) diff --git a/apps/embedding/migrations/0003_alter_embedding_unique_together.py b/apps/embedding/migrations/0003_alter_embedding_unique_together.py new file mode 100644 index 00000000000..9cb45061bfa --- /dev/null +++ b/apps/embedding/migrations/0003_alter_embedding_unique_together.py @@ -0,0 +1,17 @@ +# Generated by Django 4.2.14 on 2024-07-23 18:14 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('embedding', '0002_embedding_search_vector'), + ] + + operations = [ + migrations.AlterUniqueTogether( + name='embedding', + unique_together=set(), + ), + ] diff --git a/apps/embedding/models/embedding.py b/apps/embedding/models/embedding.py index 24c78f41f05..5f954e36b6e 100644 --- a/apps/embedding/models/embedding.py +++ b/apps/embedding/models/embedding.py @@ -50,4 +50,3 @@ class Embedding(models.Model): class Meta: db_table = "embedding" - unique_together = ['source_id', 'source_type'] diff --git a/apps/embedding/task/__init__.py b/apps/embedding/task/__init__.py new file mode 100644 index 00000000000..e5e7dd3b408 --- /dev/null +++ b/apps/embedding/task/__init__.py @@ -0,0 +1 @@ +from .embedding import * diff --git a/apps/embedding/task/embedding.py b/apps/embedding/task/embedding.py new file mode 100644 index 00000000000..3b26bd7a1db --- /dev/null +++ b/apps/embedding/task/embedding.py @@ -0,0 +1,261 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/8/19 14:13 + @desc: +""" +import logging +import traceback +from typing import List + +from celery_once import QueueOnce +from django.db.models import QuerySet + +from common.config.embedding_config import ModelManage +from common.event import ListenerManagement, UpdateProblemArgs, UpdateEmbeddingDatasetIdArgs, \ + UpdateEmbeddingDocumentIdArgs +from dataset.models import Document, TaskType, State +from ops import celery_app +from setting.models import Model +from setting.models_provider import get_model +from django.utils.translation import gettext_lazy as _ + +max_kb_error = logging.getLogger("max_kb_error") +max_kb = logging.getLogger("max_kb") + + +def get_embedding_model(model_id, exception_handler=lambda e: max_kb_error.error( + _('Failed to obtain vector model: {error} {traceback}').format( + error=str(e), + traceback=traceback.format_exc() + ))): + try: + model = QuerySet(Model).filter(id=model_id).first() + embedding_model = ModelManage.get_model(model_id, + lambda _id: get_model(model)) + except Exception as e: + exception_handler(e) + raise e + return embedding_model + + +@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id']}, name='celery:embedding_by_paragraph') +def embedding_by_paragraph(paragraph_id, model_id): + embedding_model = get_embedding_model(model_id) + ListenerManagement.embedding_by_paragraph(paragraph_id, embedding_model) + + +@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id_list']}, name='celery:embedding_by_paragraph_data_list') +def embedding_by_paragraph_data_list(data_list, paragraph_id_list, model_id): + embedding_model = get_embedding_model(model_id) + ListenerManagement.embedding_by_paragraph_data_list(data_list, paragraph_id_list, embedding_model) + + +@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id_list']}, name='celery:embedding_by_paragraph_list') +def embedding_by_paragraph_list(paragraph_id_list, model_id): + embedding_model = get_embedding_model(model_id) + ListenerManagement.embedding_by_paragraph_list(paragraph_id_list, embedding_model) + + +@celery_app.task(base=QueueOnce, once={'keys': ['document_id']}, name='celery:embedding_by_document') +def embedding_by_document(document_id, model_id, state_list=None): + """ + 向量化文档 + @param state_list: + @param document_id: 文档id + @param model_id 向量模型 + :return: None + """ + + if state_list is None: + state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value, + State.REVOKE.value, + State.REVOKED.value, State.IGNORED.value] + + def exception_handler(e): + ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING, + State.FAILURE) + max_kb_error.error( + _('Failed to obtain vector model: {error} {traceback}').format( + error=str(e), + traceback=traceback.format_exc() + )) + + embedding_model = get_embedding_model(model_id, exception_handler) + ListenerManagement.embedding_by_document(document_id, embedding_model, state_list) + + +@celery_app.task(name='celery:embedding_by_document_list') +def embedding_by_document_list(document_id_list, model_id): + """ + 向量化文档 + @param document_id_list: 文档id列表 + @param model_id 向量模型 + :return: None + """ + for document_id in document_id_list: + embedding_by_document.delay(document_id, model_id) + + +@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, name='celery:embedding_by_dataset') +def embedding_by_dataset(dataset_id, model_id): + """ + 向量化知识库 + @param dataset_id: 知识库id + @param model_id 向量模型 + :return: None + """ + max_kb.info(_('Start--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id)) + try: + ListenerManagement.delete_embedding_by_dataset(dataset_id) + document_list = QuerySet(Document).filter(dataset_id=dataset_id) + max_kb.info(_('Dataset documentation: {document_names}').format( + document_names=", ".join([d.name for d in document_list]))) + for document in document_list: + try: + embedding_by_document.delay(document.id, model_id) + except Exception as e: + pass + except Exception as e: + max_kb_error.error( + _('Vectorized dataset: {dataset_id} error {error} {traceback}'.format(dataset_id=dataset_id, + error=str(e), + traceback=traceback.format_exc()))) + finally: + max_kb.info(_('End--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id)) + + +def embedding_by_problem(args, model_id): + """ + 向量话问题 + @param args: 问题对象 + @param model_id: 模型id + @return: + """ + embedding_model = get_embedding_model(model_id) + ListenerManagement.embedding_by_problem(args, embedding_model) + + +def embedding_by_data_list(args: List, model_id): + embedding_model = get_embedding_model(model_id) + ListenerManagement.embedding_by_data_list(args, embedding_model) + + +def delete_embedding_by_document(document_id): + """ + 删除指定文档id的向量 + @param document_id: 文档id + @return: None + """ + + ListenerManagement.delete_embedding_by_document(document_id) + + +def delete_embedding_by_document_list(document_id_list: List[str]): + """ + 删除指定文档列表的向量数据 + @param document_id_list: 文档id列表 + @return: None + """ + ListenerManagement.delete_embedding_by_document_list(document_id_list) + + +def delete_embedding_by_dataset(dataset_id): + """ + 删除指定数据集向量数据 + @param dataset_id: 数据集id + @return: None + """ + ListenerManagement.delete_embedding_by_dataset(dataset_id) + + +def delete_embedding_by_paragraph(paragraph_id): + """ + 删除指定段落的向量数据 + @param paragraph_id: 段落id + @return: None + """ + ListenerManagement.delete_embedding_by_paragraph(paragraph_id) + + +def delete_embedding_by_source(source_id): + """ + 删除指定资源id的向量数据 + @param source_id: 资源id + @return: None + """ + ListenerManagement.delete_embedding_by_source(source_id) + + +def disable_embedding_by_paragraph(paragraph_id): + """ + 禁用某个段落id的向量 + @param paragraph_id: 段落id + @return: None + """ + ListenerManagement.disable_embedding_by_paragraph(paragraph_id) + + +def enable_embedding_by_paragraph(paragraph_id): + """ + 开启某个段落id的向量数据 + @param paragraph_id: 段落id + @return: None + """ + ListenerManagement.enable_embedding_by_paragraph(paragraph_id) + + +def delete_embedding_by_source_ids(source_ids: List[str]): + """ + 删除向量根据source_id_list + @param source_ids: + @return: + """ + ListenerManagement.delete_embedding_by_source_ids(source_ids) + + +def update_problem_embedding(problem_id: str, problem_content: str, model_id): + """ + 更新问题 + @param problem_id: + @param problem_content: + @param model_id: + @return: + """ + model = get_embedding_model(model_id) + ListenerManagement.update_problem(UpdateProblemArgs(problem_id, problem_content, model)) + + +def update_embedding_dataset_id(paragraph_id_list, target_dataset_id): + """ + 修改向量数据到指定知识库 + @param paragraph_id_list: 指定段落的向量数据 + @param target_dataset_id: 知识库id + @return: + """ + + ListenerManagement.update_embedding_dataset_id( + UpdateEmbeddingDatasetIdArgs(paragraph_id_list, target_dataset_id)) + + +def delete_embedding_by_paragraph_ids(paragraph_ids: List[str]): + """ + 删除指定段落列表的向量数据 + @param paragraph_ids: 段落列表 + @return: None + """ + ListenerManagement.delete_embedding_by_paragraph_ids(paragraph_ids) + + +def update_embedding_document_id(paragraph_id_list, target_document_id, target_dataset_id, + target_embedding_model_id=None): + target_embedding_model = get_embedding_model( + target_embedding_model_id) if target_embedding_model_id is not None else None + ListenerManagement.update_embedding_document_id( + UpdateEmbeddingDocumentIdArgs(paragraph_id_list, target_document_id, target_dataset_id, target_embedding_model)) + + +def delete_embedding_by_dataset_id_list(dataset_id_list): + ListenerManagement.delete_embedding_by_dataset_id_list(dataset_id_list) diff --git a/apps/embedding/vector/base_vector.py b/apps/embedding/vector/base_vector.py index 2bfd0e977d5..0aadef6c183 100644 --- a/apps/embedding/vector/base_vector.py +++ b/apps/embedding/vector/base_vector.py @@ -8,17 +8,31 @@ """ import threading from abc import ABC, abstractmethod +from functools import reduce from typing import List, Dict -from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_core.embeddings import Embeddings -from common.config.embedding_config import EmbeddingModel +from common.chunk import text_to_chunk from common.util.common import sub_array from embedding.models import SourceType, SearchMode lock = threading.Lock() +def chunk_data(data: Dict): + if str(data.get('source_type')) == SourceType.PARAGRAPH.value: + text = data.get('text') + chunk_list = text_to_chunk(text) + return [{**data, 'text': chunk} for chunk in chunk_list] + return [data] + + +def chunk_data_list(data_list: List[Dict]): + result = [chunk_data(data) for data in data_list] + return reduce(lambda x, y: [*x, *y], result, []) + + class BaseVectorStore(ABC): vector_exists = False @@ -51,7 +65,7 @@ def save_pre_handler(self): def save(self, text, source_type: SourceType, dataset_id: str, document_id: str, paragraph_id: str, source_id: str, is_active: bool, - embedding=None): + embedding: Embeddings): """ 插入向量数据 :param source_id: 资源id @@ -64,52 +78,50 @@ def save(self, text, source_type: SourceType, dataset_id: str, document_id: str, :param paragraph_id 段落id :return: bool """ - - if embedding is None: - embedding = EmbeddingModel.get_embedding_model() self.save_pre_handler() - self._save(text, source_type, dataset_id, document_id, paragraph_id, source_id, is_active, embedding) - - def batch_save(self, data_list: List[Dict], embedding=None): - # 获取锁 - lock.acquire() - try: - """ - 批量插入 - :param data_list: 数据列表 - :param embedding: 向量化处理器 - :return: bool - """ - if embedding is None: - embedding = EmbeddingModel.get_embedding_model() - self.save_pre_handler() - result = sub_array(data_list) - for child_array in result: - self._batch_save(child_array, embedding) - finally: - # 释放锁 - lock.release() - return True + data = {'document_id': document_id, 'paragraph_id': paragraph_id, 'dataset_id': dataset_id, + 'is_active': is_active, 'source_id': source_id, 'source_type': source_type, 'text': text} + chunk_list = chunk_data(data) + result = sub_array(chunk_list) + for child_array in result: + self._batch_save(child_array, embedding, lambda: False) + + def batch_save(self, data_list: List[Dict], embedding: Embeddings, is_the_task_interrupted): + """ + 批量插入 + @param data_list: 数据列表 + @param embedding: 向量化处理器 + @param is_the_task_interrupted: 判断是否中断任务 + :return: bool + """ + self.save_pre_handler() + chunk_list = chunk_data_list(data_list) + result = sub_array(chunk_list) + for child_array in result: + if not is_the_task_interrupted(): + self._batch_save(child_array, embedding, is_the_task_interrupted) + else: + break @abstractmethod def _save(self, text, source_type: SourceType, dataset_id: str, document_id: str, paragraph_id: str, source_id: str, is_active: bool, - embedding: HuggingFaceEmbeddings): + embedding: Embeddings): pass @abstractmethod - def _batch_save(self, text_list: List[Dict], embedding: HuggingFaceEmbeddings): + def _batch_save(self, text_list: List[Dict], embedding: Embeddings, is_the_task_interrupted): pass def search(self, query_text, dataset_id_list: list[str], exclude_document_id_list: list[str], exclude_paragraph_list: list[str], is_active: bool, - embedding: HuggingFaceEmbeddings): + embedding: Embeddings): if dataset_id_list is None or len(dataset_id_list) == 0: return [] embedding_query = embedding.embed_query(query_text) result = self.query(embedding_query, dataset_id_list, exclude_document_id_list, exclude_paragraph_list, - is_active, 1, 0.65) + is_active, 1, 3, 0.65) return result[0] @abstractmethod @@ -123,7 +135,7 @@ def query(self, query_text: str, query_embedding: List[float], dataset_id_list: def hit_test(self, query_text, dataset_id: list[str], exclude_document_id_list: list[str], top_number: int, similarity: float, search_mode: SearchMode, - embedding: HuggingFaceEmbeddings): + embedding: Embeddings): pass @abstractmethod @@ -142,14 +154,6 @@ def update_by_source_id(self, source_id: str, instance: Dict): def update_by_source_ids(self, source_ids: List[str], instance: Dict): pass - @abstractmethod - def embed_documents(self, text_list: List[str]): - pass - - @abstractmethod - def embed_query(self, text: str): - pass - @abstractmethod def delete_by_dataset_id(self, dataset_id: str): pass @@ -159,7 +163,7 @@ def delete_by_document_id(self, document_id: str): pass @abstractmethod - def delete_bu_document_id_list(self, document_id_list: List[str]): + def delete_by_document_id_list(self, document_id_list: List[str]): pass @abstractmethod diff --git a/apps/embedding/vector/pg_vector.py b/apps/embedding/vector/pg_vector.py index 5c0d045363b..7929685a37c 100644 --- a/apps/embedding/vector/pg_vector.py +++ b/apps/embedding/vector/pg_vector.py @@ -12,10 +12,11 @@ from abc import ABC, abstractmethod from typing import Dict, List -from django.db.models import QuerySet -from langchain_community.embeddings import HuggingFaceEmbeddings +import jieba +from django.contrib.postgres.search import SearchVector +from django.db.models import QuerySet, Value +from langchain_core.embeddings import Embeddings -from common.config.embedding_config import EmbeddingModel from common.db.search import generate_sql_by_query_dict from common.db.sql_execute import select_list from common.util.file_util import get_file_content @@ -28,19 +29,13 @@ class PGVector(BaseVectorStore): def delete_by_source_ids(self, source_ids: List[str], source_type: str): + if len(source_ids) == 0: + return QuerySet(Embedding).filter(source_id__in=source_ids, source_type=source_type).delete() def update_by_source_ids(self, source_ids: List[str], instance: Dict): QuerySet(Embedding).filter(source_id__in=source_ids).update(**instance) - def embed_documents(self, text_list: List[str]): - embedding = EmbeddingModel.get_embedding_model() - return embedding.embed_documents(text_list) - - def embed_query(self, text: str): - embedding = EmbeddingModel.get_embedding_model() - return embedding.embed_query(text) - def vector_is_create(self) -> bool: # 项目启动默认是创建好的 不需要再创建 return True @@ -50,7 +45,7 @@ def vector_create(self): def _save(self, text, source_type: SourceType, dataset_id: str, document_id: str, paragraph_id: str, source_id: str, is_active: bool, - embedding: HuggingFaceEmbeddings): + embedding: Embeddings): text_embedding = embedding.embed_query(text) embedding = Embedding(id=uuid.uuid1(), dataset_id=dataset_id, @@ -64,7 +59,7 @@ def _save(self, text, source_type: SourceType, dataset_id: str, document_id: str embedding.save() return True - def _batch_save(self, text_list: List[Dict], embedding: HuggingFaceEmbeddings): + def _batch_save(self, text_list: List[Dict], embedding: Embeddings, is_the_task_interrupted): texts = [row.get('text') for row in text_list] embeddings = embedding.embed_documents(texts) embedding_list = [Embedding(id=uuid.uuid1(), @@ -75,15 +70,17 @@ def _batch_save(self, text_list: List[Dict], embedding: HuggingFaceEmbeddings): source_id=text_list[index].get('source_id'), source_type=text_list[index].get('source_type'), embedding=embeddings[index], - search_vector=to_ts_vector(text_list[index]['text'])) for index in - range(0, len(text_list))] - QuerySet(Embedding).bulk_create(embedding_list) if len(embedding_list) > 0 else None + search_vector=SearchVector(Value(to_ts_vector(text_list[index]['text'])))) for + index in + range(0, len(texts))] + if not is_the_task_interrupted(): + QuerySet(Embedding).bulk_create(embedding_list) if len(embedding_list) > 0 else None return True def hit_test(self, query_text, dataset_id_list: list[str], exclude_document_id_list: list[str], top_number: int, similarity: float, search_mode: SearchMode, - embedding: HuggingFaceEmbeddings): + embedding: Embeddings): if dataset_id_list is None or len(dataset_id_list) == 0: return [] exclude_dict = {} @@ -105,9 +102,9 @@ def query(self, query_text: str, query_embedding: List[float], dataset_id_list: return [] query_set = QuerySet(Embedding).filter(dataset_id__in=dataset_id_list, is_active=is_active) if exclude_document_id_list is not None and len(exclude_document_id_list) > 0: - exclude_dict.__setitem__('document_id__in', exclude_document_id_list) + query_set = query_set.exclude(document_id__in=exclude_document_id_list) if exclude_paragraph_list is not None and len(exclude_paragraph_list) > 0: - exclude_dict.__setitem__('paragraph_id__in', exclude_paragraph_list) + query_set = query_set.exclude(paragraph_id__in=exclude_paragraph_list) query_set = query_set.exclude(**exclude_dict) for search_handle in search_handle_list: if search_handle.support(search_mode): @@ -132,7 +129,9 @@ def delete_by_document_id(self, document_id: str): QuerySet(Embedding).filter(document_id=document_id).delete() return True - def delete_bu_document_id_list(self, document_id_list: List[str]): + def delete_by_document_id_list(self, document_id_list: List[str]): + if len(document_id_list) == 0: + return True return QuerySet(Embedding).filter(document_id__in=document_id_list).delete() def delete_by_source_id(self, source_id: str, source_type: str): diff --git a/apps/function_lib/__init__.py b/apps/function_lib/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/function_lib/admin.py b/apps/function_lib/admin.py new file mode 100644 index 00000000000..8c38f3f3dad --- /dev/null +++ b/apps/function_lib/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/apps/function_lib/apps.py b/apps/function_lib/apps.py new file mode 100644 index 00000000000..11957d6cf2c --- /dev/null +++ b/apps/function_lib/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class FunctionLibConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'function_lib' diff --git a/apps/function_lib/migrations/0001_initial.py b/apps/function_lib/migrations/0001_initial.py new file mode 100644 index 00000000000..bb2fd60e997 --- /dev/null +++ b/apps/function_lib/migrations/0001_initial.py @@ -0,0 +1,34 @@ +# Generated by Django 4.2.15 on 2024-08-13 10:04 + +import django.contrib.postgres.fields +from django.db import migrations, models +import django.db.models.deletion +import uuid + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('users', '0004_alter_user_email'), + ] + + operations = [ + migrations.CreateModel( + name='FunctionLib', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, verbose_name='主键id')), + ('name', models.CharField(max_length=64, verbose_name='函数名称')), + ('desc', models.CharField(max_length=128, verbose_name='描述')), + ('code', models.CharField(max_length=102400, verbose_name='python代码')), + ('input_field_list', django.contrib.postgres.fields.ArrayField(base_field=models.JSONField(default=dict, verbose_name='输入字段'), default=list, size=None, verbose_name='输入字段列表')), + ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.user', verbose_name='用户id')), + ], + options={ + 'db_table': 'function_lib', + }, + ), + ] diff --git a/apps/function_lib/migrations/0002_functionlib_is_active_functionlib_permission_type.py b/apps/function_lib/migrations/0002_functionlib_is_active_functionlib_permission_type.py new file mode 100644 index 00000000000..c665ef22a43 --- /dev/null +++ b/apps/function_lib/migrations/0002_functionlib_is_active_functionlib_permission_type.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.15 on 2024-09-14 11:23 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('function_lib', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='functionlib', + name='is_active', + field=models.BooleanField(default=True), + ), + migrations.AddField( + model_name='functionlib', + name='permission_type', + field=models.CharField(choices=[('PUBLIC', '公开'), ('PRIVATE', '私有')], default='PRIVATE', max_length=20, verbose_name='权限类型'), + ), + ] diff --git a/apps/function_lib/migrations/0003_functionlib_function_type_functionlib_icon_and_more.py b/apps/function_lib/migrations/0003_functionlib_function_type_functionlib_icon_and_more.py new file mode 100644 index 00000000000..8df64e28197 --- /dev/null +++ b/apps/function_lib/migrations/0003_functionlib_function_type_functionlib_icon_and_more.py @@ -0,0 +1,194 @@ +# Generated by Django 4.2.15 on 2025-03-13 07:21 + +from django.db import migrations, models + +function_template = ''' +INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-03-10 06:20:35.945414 +00:00', '2025-03-10 09:19:23.608026 +00:00', 'c75cb48e-fd77-11ef-84d2-5618c4394482', '博查搜索', '从博查搜索任何信息和网页URL', e'def bocha_search(query, apikey): + import requests + import json + url = "https://api.bochaai.com/v1/web-search" + payload = json.dumps({ + "query": query, + "Boolean": "true", + "count": 8 + }) + + headers = { + "Authorization": "Bearer " + apikey, #鉴权参数,示例:Bearer xxxxxx,API KEY请先前往博查AI开放平台(https://open.bochaai.com)> API KEY 管理中获取。 + "Content-Type": "application/json" + } + + response = requests.request("POST", url, headers=headers, data=payload) + if response.status_code == 200: + return response.json() + else: + raise Exception(f"API请求失败: {response.status_code}, 错误信息: {response.text}") + return (response.text)', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', TRUE, 'PUBLIC', 'INTERNAL', '/ui/fx/bochaai/icon.png', '[{"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "apikey", "label": "API Key", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "API Key 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "API Key 长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', '', NULL); +INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-02-26 03:36:48.187286 +00:00', '2025-03-11 07:23:46.123972 +00:00', 'e89ad2ae-f3f2-11ef-ad09-0242ac110002', 'Google Search', 'Google Web Search', e'def google_search(query, apikey, cx): + import requests + import json + url = "https://customsearch.googleapis.com/customsearch/v1" + params = { + "q": query, + "key": apikey, + "cx": cx, + "num": 10, # 每次最多返回10条 + } + + response = requests.get(url, params=params) + if response.status_code == 200: + return response.json() + else: + raise Exception(f"API请求失败: {response.status_code}, 错误信息: {response.text}") + return (response.text)', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', TRUE, 'PUBLIC', 'INTERNAL', '/ui/fx/google_search/icon.png', '[{"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "apikey", "label": "API Key", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "API Key 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "API Key 长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}, {"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "cx", "label": "cx", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "cx 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "cx长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', '', NULL); +INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-02-25 07:44:40.141515 +00:00', '2025-03-11 06:33:53.248495 +00:00', '5e912f00-f34c-11ef-8a9c-5618c4394482', 'LangSearch', e'A Web Search tool supporting natural language search +', e' +def langsearch(query, apikey): + import json + import requests + + url = "https://api.langsearch.com/v1/web-search" + payload = json.dumps({ + "query": query, + "summary": True, + "freshness": "noLimit", + "livecrawl": True, + "count": 20 + }) + headers = { + "Authorization": apikey, + "Content-Type": "application/json" + } + # key从官网申请 https://langsearch.com/ + response = requests.request("POST", url, headers=headers, data=payload) + if response.status_code == 200: + return response.json() + else: + raise Exception(f"API请求失败: {response.status_code}, 错误信息: {response.text}") + return (response.text)', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', TRUE, 'PUBLIC', 'INTERNAL', '/ui/fx/langsearch/icon.png', '[{"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "apikey", "label": "API Key", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "API Key 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "API Key 长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', '', NULL); +INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-03-17 08:16:32.626245 +00:00', '2025-03-17 08:16:32.626308 +00:00', '22c21b76-0308-11f0-9694-5618c4394482', 'MySQL 查询', '一个连接MySQL数据库执行SQL查询的工具', e' +def query_mysql(host,port, user, password, database, sql): + import pymysql + import json + from pymysql.cursors import DictCursor + + try: + # 创建连接 + db = pymysql.connect( + host=host, + port=int(port), + user=user, + password=password, + database=database, + cursorclass=DictCursor # 使用字典游标 + ) + + # 使用 cursor() 方法创建一个游标对象 cursor + cursor = db.cursor() + + # 使用 execute() 方法执行 SQL 查询 + cursor.execute(sql) + + # 使用 fetchall() 方法获取所有数据 + data = cursor.fetchall() + + # 处理 bytes 类型的数据 + for row in data: + for key, value in row.items(): + if isinstance(value, bytes): + row[key] = value.decode("utf-8") # 转换为字符串 + + # 将数据序列化为 JSON + json_data = json.dumps(data, ensure_ascii=False) + return json_data + + # 关闭数据库连接 + db.close() + + except Exception as e: + print(f"Error while connecting to MySQL: {e}") + raise e', '{"{\\"name\\": \\"sql\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', true, 'PUBLIC', 'INTERNAL', '/ui/fx/mysql/icon.png', '[{"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "host", "label": "host", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "host 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "host长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}, {"attrs": {"maxlength": 20, "minlength": 1, "show-word-limit": true}, "field": "port", "label": "port", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "port 为必填属性", "required": true}, {"max": 20, "min": 1, "message": "port长度在 1 到 20 个字符", "trigger": "blur"}]}, "default_value": "3306", "show_default_value": false}, {"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "user", "label": "user", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "user 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "user长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "root", "show_default_value": false}, {"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "password", "label": "password", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "password 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "password长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}, {"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "database", "label": "database", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "database 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "database长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', null, null); +INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-03-17 07:37:54.620836 +00:00', '2025-03-17 07:37:54.620887 +00:00', 'bd1e8b88-0302-11f0-87bb-5618c4394482', 'PostgreSQL 查询', '一个连接PostgreSQL数据库执行SQL查询的工具', e' +def queryPgSQL(database, user, password, host, port, query): + import psycopg2 + import json + from datetime import datetime + + # 自定义 JSON 序列化函数 + def default_serializer(obj): + if isinstance(obj, datetime): + return obj.isoformat() # 将 datetime 转换为 ISO 格式字符串 + raise TypeError(f"Type {type(obj)} not serializable") + + # 数据库连接信息 + conn_params = { + "dbname": database, + "user": user, + "password": password, + "host": host, + "port": port + } + try: + # 建立连接 + conn = psycopg2.connect(**conn_params) + print("连接成功!") + # 创建游标对象 + cursor = conn.cursor() + # 执行查询语句 + cursor.execute(query) + # 获取查询结果 + rows = cursor.fetchall() + # 处理 bytes 类型的数据 + columns = [desc[0] for desc in cursor.description] + result = [dict(zip(columns, row)) for row in rows] + # 转换为 JSON 格式 + json_result = json.dumps(result, default=default_serializer, ensure_ascii=False) + return json_result + except Exception as e: + print(f"发生错误:{e}") + raise e + finally: + # 关闭游标和连接 + if cursor: + cursor.close() + if conn: + conn.close()', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', true, 'PUBLIC', 'INTERNAL', '/ui/fx/postgresql/icon.png', '[{"attrs":{"maxlength":200,"minlength":1,"show-word-limit":true},"field":"host","label":"host","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"host 为必填属性","required":true},{"max":200,"min":1,"message":"host长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"x","show_default_value":false},{"attrs":{"maxlength":20,"minlength":1,"show-word-limit":true},"field":"port","label":"port","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"port 为必填属性","required":true},{"max":20,"min":1,"message":"port长度在 1 到 20 个字符","trigger":"blur"}]},"default_value":"5432","show_default_value":false},{"attrs":{"maxlength":200,"minlength":1,"show-word-limit":true},"field":"user","label":"user","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"user 为必填属性","required":true},{"max":200,"min":1,"message":"user长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"root","show_default_value":false},{"attrs":{"type":"password","maxlength":200,"minlength":1,"show-password":true,"show-word-limit":true},"field":"password","label":"password","required":true,"input_type":"PasswordInput","props_info":{"rules":[{"message":"password 为必填属性","required":true},{"max":200,"min":1,"message":"password长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"x","show_default_value":false},{"attrs":{"maxlength":200,"minlength":1,"show-word-limit":true},"field":"database","label":"database","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"database 为必填属性","required":true},{"max":200,"min":1,"message":"database长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"x","show_default_value":false}]', null, null); + +''' + + +class Migration(migrations.Migration): + dependencies = [ + ('function_lib', '0002_functionlib_is_active_functionlib_permission_type'), + ] + + operations = [ + migrations.AddField( + model_name='functionlib', + name='function_type', + field=models.CharField(choices=[('INTERNAL', '内置'), ('PUBLIC', '公开')], + default='PUBLIC', max_length=20, verbose_name='函数类型'), + ), + migrations.AddField( + model_name='functionlib', + name='icon', + field=models.CharField(default='/ui/favicon.ico', max_length=256, + verbose_name='函数库icon'), + ), + migrations.AddField( + model_name='functionlib', + name='init_field_list', + field=models.JSONField(default=list, verbose_name='启动字段列表'), + ), + migrations.AddField( + model_name='functionlib', + name='init_params', + field=models.CharField(max_length=102400, null=True, verbose_name='初始化参数'), + ), + migrations.AddField( + model_name='functionlib', + name='template_id', + field=models.UUIDField(default=None, null=True, verbose_name='模版id'), + ), + migrations.RunSQL(function_template) + ] diff --git a/apps/function_lib/migrations/__init__.py b/apps/function_lib/migrations/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/function_lib/models/__init__.py b/apps/function_lib/models/__init__.py new file mode 100644 index 00000000000..a68550e90ef --- /dev/null +++ b/apps/function_lib/models/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/2 14:55 + @desc: +""" diff --git a/apps/function_lib/models/function.py b/apps/function_lib/models/function.py new file mode 100644 index 00000000000..037f5099527 --- /dev/null +++ b/apps/function_lib/models/function.py @@ -0,0 +1,47 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: function_lib.py + @date:2024/8/2 14:59 + @desc: +""" +import uuid + +from django.contrib.postgres.fields import ArrayField +from django.db import models + +from common.mixins.app_model_mixin import AppModelMixin +from users.models import User + + +class PermissionType(models.TextChoices): + PUBLIC = "PUBLIC", '公开' + PRIVATE = "PRIVATE", "私有" + +class FunctionType(models.TextChoices): + INTERNAL = "INTERNAL", '内置' + PUBLIC = "PUBLIC", "公开" + + +class FunctionLib(AppModelMixin): + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") + user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户id") + name = models.CharField(max_length=64, verbose_name="函数名称") + desc = models.CharField(max_length=128, verbose_name="描述") + code = models.CharField(max_length=102400, verbose_name="python代码") + input_field_list = ArrayField(verbose_name="输入字段列表", + base_field=models.JSONField(verbose_name="输入字段", default=dict) + , default=list) + init_field_list = models.JSONField(verbose_name="启动字段列表", default=list) + icon = models.CharField(max_length=256, verbose_name="函数库icon", default="/ui/favicon.ico") + is_active = models.BooleanField(default=True) + permission_type = models.CharField(max_length=20, verbose_name='权限类型', choices=PermissionType.choices, + default=PermissionType.PRIVATE) + function_type = models.CharField(max_length=20, verbose_name='函数类型', choices=FunctionType.choices, + default=FunctionType.PUBLIC) + template_id = models.UUIDField(max_length=128, verbose_name="模版id", null=True, default=None) + init_params = models.CharField(max_length=102400, verbose_name="初始化参数", null=True) + + class Meta: + db_table = "function_lib" diff --git a/apps/function_lib/serializers/__init__.py b/apps/function_lib/serializers/__init__.py new file mode 100644 index 00000000000..a68550e90ef --- /dev/null +++ b/apps/function_lib/serializers/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/2 14:55 + @desc: +""" diff --git a/apps/function_lib/serializers/function_lib_serializer.py b/apps/function_lib/serializers/function_lib_serializer.py new file mode 100644 index 00000000000..440eb22c786 --- /dev/null +++ b/apps/function_lib/serializers/function_lib_serializer.py @@ -0,0 +1,426 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: function_lib_serializer.py + @date:2024/8/2 17:35 + @desc: +""" +import io +import json +import pickle +import re +import uuid + +from django.core import validators +from django.db import transaction +from django.db.models import QuerySet, Q, OuterRef, Exists +from django.http import HttpResponse +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers, status + +from common.db.search import page_search +from common.exception.app_exception import AppApiException +from common.field.common import UploadedFileField, UploadedImageField +from common.response import result +from common.util.common import restricted_loads +from common.util.field_message import ErrMessage +from common.util.function_code import FunctionExecutor +from common.util.rsa_util import rsa_long_decrypt, rsa_long_encrypt +from dataset.models import File +from function_lib.models.function import FunctionLib, PermissionType, FunctionType +from smartdoc.const import CONFIG + +function_executor = FunctionExecutor(CONFIG.get('SANDBOX')) + +class FlibInstance: + def __init__(self, function_lib: dict, version: str): + self.function_lib = function_lib + self.version = version + +def encryption(message: str): + """ + 加密敏感字段数据 加密方式是 如果密码是 1234567890 那么给前端则是 123******890 + :param message: + :return: + """ + if type(message) != str: + return message + if message == "": + return "" + max_pre_len = 8 + max_post_len = 4 + message_len = len(message) + pre_len = int(message_len / 5 * 2) + post_len = int(message_len / 5 * 1) + pre_str = "".join([message[index] for index in + range(0, + max_pre_len if pre_len > max_pre_len else 1 if pre_len <= 0 else int( + pre_len))]) + end_str = "".join( + [message[index] for index in + range(message_len - (int(post_len) if pre_len < max_post_len else max_post_len), + message_len)]) + content = "***************" + return pre_str + content + end_str + + +class FunctionLibModelSerializer(serializers.ModelSerializer): + class Meta: + model = FunctionLib + fields = ['id', 'name', 'icon', 'desc', 'code', 'input_field_list','init_field_list', 'init_params', 'permission_type', 'is_active', 'user_id', 'template_id', + 'create_time', 'update_time'] + + +class FunctionLibInputField(serializers.Serializer): + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('variable name'))) + is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('required'))) + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')), validators=[ + validators.RegexValidator(regex=re.compile("^string|int|dict|array|float$"), + message=_('fields only support string|int|dict|array|float'), code=500) + ]) + source = serializers.CharField(required=True, error_messages=ErrMessage.char(_('source')), validators=[ + validators.RegexValidator(regex=re.compile("^custom|reference$"), + message=_('The field only supports custom|reference'), code=500) + ]) + + +class DebugField(serializers.Serializer): + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('variable name'))) + value = serializers.CharField(required=False, allow_blank=True, allow_null=True, + error_messages=ErrMessage.char(_('variable value'))) + + +class DebugInstance(serializers.Serializer): + debug_field_list = DebugField(required=True, many=True) + input_field_list = FunctionLibInputField(required=True, many=True) + init_field_list = serializers.ListField(required=False, default=list) + init_params = serializers.JSONField(required=False, default=dict) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function content'))) + + +class EditFunctionLib(serializers.Serializer): + name = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function name'))) + + desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function description'))) + + code = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function content'))) + + input_field_list = FunctionLibInputField(required=False, many=True) + + init_field_list = serializers.ListField(required=False, default=list) + + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) + + +class CreateFunctionLib(serializers.Serializer): + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function name'))) + + desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function description'))) + + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function content'))) + + input_field_list = FunctionLibInputField(required=True, many=True) + + init_field_list = serializers.ListField(required=False, default=list) + + permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('permission')), validators=[ + validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), + message="权限只支持PUBLIC|PRIVATE", code=500) + ]) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) + + +class FunctionLibSerializer(serializers.Serializer): + class Query(serializers.Serializer): + name = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function name'))) + + desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function description'))) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) + + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) + select_user_id = serializers.CharField(required=False, allow_null=True, allow_blank=True) + function_type = serializers.CharField(required=False, allow_null=True, allow_blank=True) + + + def get_query_set(self): + query_set = QuerySet(FunctionLib).filter( + (Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC'))) + if self.data.get('name') is not None: + query_set = query_set.filter(name__icontains=self.data.get('name')) + if self.data.get('desc') is not None: + query_set = query_set.filter(desc__contains=self.data.get('desc')) + if self.data.get('is_active') is not None: + query_set = query_set.filter(is_active=self.data.get('is_active')) + if self.data.get('select_user_id') is not None: + query_set = query_set.filter(user_id=self.data.get('select_user_id')) + if self.data.get('function_type') is not None: + query_set = query_set.filter(function_type=self.data.get('function_type')) + query_set = query_set.order_by("-create_time") + + return query_set + + def list(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + rs = [] + for item in self.get_query_set(): + data = {**FunctionLibModelSerializer(item).data, 'init_params': None} + rs.append(data) + return rs + + def page(self, current_page: int, page_size: int, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + + def post_records_handler(row): + return { + **FunctionLibModelSerializer(row).data, + 'init_params': None + } + + return page_search(current_page, page_size, self.get_query_set(), + post_records_handler=post_records_handler) + + class Create(serializers.Serializer): + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) + + def insert(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + CreateFunctionLib(data=instance).is_valid(raise_exception=True) + function_lib = FunctionLib(id=uuid.uuid1(), name=instance.get('name'), desc=instance.get('desc'), + code=instance.get('code'), + user_id=self.data.get('user_id'), + input_field_list=instance.get('input_field_list'), + init_field_list=instance.get('init_field_list'), + permission_type=instance.get('permission_type'), + is_active=False) + function_lib.save() + return FunctionLibModelSerializer(function_lib).data + + class Debug(serializers.Serializer): + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) + + def debug(self, debug_instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + DebugInstance(data=debug_instance).is_valid(raise_exception=True) + input_field_list = debug_instance.get('input_field_list') + code = debug_instance.get('code') + debug_field_list = debug_instance.get('debug_field_list') + init_params = debug_instance.get('init_params') + params = {field.get('name'): self.convert_value(field.get('name'), field.get('value'), field.get('type'), + field.get('is_required')) + for field in + [{'value': self.get_field_value(debug_field_list, field.get('name'), field.get('is_required')), + **field} for field in + input_field_list]} + # 合并初始化参数 + if init_params is not None: + all_params = init_params | params + else: + all_params = params + return function_executor.exec_code(code, all_params) + + @staticmethod + def get_field_value(debug_field_list, name, is_required): + result = [field for field in debug_field_list if field.get('name') == name] + if len(result) > 0: + return result[-1].get('value') + if is_required: + raise AppApiException(500, f"{name}" + _('field has no value set')) + return None + + @staticmethod + def convert_value(name: str, value: str, _type: str, is_required: bool): + if not is_required and value is None: + return None + try: + if _type == 'int': + return int(value) + if _type == 'float': + return float(value) + if _type == 'dict': + v = json.loads(value) + if isinstance(v, dict): + return v + raise Exception(_('type error')) + if _type == 'array': + v = json.loads(value) + if isinstance(v, list): + return v + raise Exception(_('type error')) + return value + except Exception as e: + raise AppApiException(500, _('Field: {name} Type: {_type} Value: {value} Type conversion error').format( + name=name, type=_type, value=value + )) + + class Operate(serializers.Serializer): + id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('function id'))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + if not QuerySet(FunctionLib).filter(id=self.data.get('id')).exists(): + raise AppApiException(500, _('Function does not exist')) + + def delete(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + fun = QuerySet(FunctionLib).filter(id=self.data.get('id')).first() + if fun.template_id is None and fun.icon != '/ui/favicon.ico': + QuerySet(File).filter(id=fun.icon.split('/')[-1]).delete() + QuerySet(FunctionLib).filter(id=self.data.get('id')).delete() + return True + + def edit(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + EditFunctionLib(data=instance).is_valid(raise_exception=True) + edit_field_list = ['name', 'desc', 'code', 'icon', 'input_field_list', 'init_field_list', 'init_params', 'permission_type', 'is_active'] + edit_dict = {field: instance.get(field) for field in edit_field_list if ( + field in instance and instance.get(field) is not None)} + + function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first() + if 'init_params' in edit_dict: + if edit_dict['init_field_list'] is not None: + rm_key = [] + for key in edit_dict['init_params']: + if key not in [field['field'] for field in edit_dict['init_field_list']]: + rm_key.append(key) + for key in rm_key: + edit_dict['init_params'].pop(key) + if function_lib.init_params: + old_init_params = json.loads(rsa_long_decrypt(function_lib.init_params)) + for key in edit_dict['init_params']: + if key in old_init_params and edit_dict['init_params'][key] == encryption(old_init_params[key]): + edit_dict['init_params'][key] = old_init_params[key] + edit_dict['init_params'] = rsa_long_encrypt(json.dumps(edit_dict['init_params'])) + QuerySet(FunctionLib).filter(id=self.data.get('id')).update(**edit_dict) + return self.one(False) + + def one(self, with_valid=True): + if with_valid: + super().is_valid(raise_exception=True) + if not QuerySet(FunctionLib).filter(id=self.data.get('id')).filter( + Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC')).exists(): + raise AppApiException(500, _('Function does not exist')) + function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first() + if function_lib.init_params: + function_lib.init_params = json.loads(rsa_long_decrypt(function_lib.init_params)) + if function_lib.init_field_list: + password_fields = [i["field"] for i in function_lib.init_field_list if i.get("input_type") == "PasswordInput"] + if function_lib.init_params: + for k in function_lib.init_params: + if k in password_fields and function_lib.init_params[k]: + function_lib.init_params[k] = encryption(function_lib.init_params[k]) + return {**FunctionLibModelSerializer(function_lib).data, 'init_params': function_lib.init_params} + + def export(self, with_valid=True): + try: + if with_valid: + self.is_valid() + id = self.data.get('id') + function_lib = QuerySet(FunctionLib).filter(id=id).first() + application_dict = FunctionLibModelSerializer(function_lib).data + mk_instance = FlibInstance(application_dict, 'v1') + application_pickle = pickle.dumps(mk_instance) + response = HttpResponse(content_type='text/plain', content=application_pickle) + response['Content-Disposition'] = f'attachment; filename="{function_lib.name}.fx"' + return response + except Exception as e: + return result.error(str(e), response_status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + class Import(serializers.Serializer): + file = UploadedFileField(required=True, error_messages=ErrMessage.image(_("file"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + + @transaction.atomic + def import_(self, with_valid=True): + if with_valid: + self.is_valid() + user_id = self.data.get('user_id') + flib_instance_bytes = self.data.get('file').read() + try: + flib_instance = restricted_loads(flib_instance_bytes) + except Exception as e: + raise AppApiException(1001, _("Unsupported file format")) + function_lib = flib_instance.function_lib + function_lib_model = FunctionLib(id=uuid.uuid1(), name=function_lib.get('name'), + desc=function_lib.get('desc'), + code=function_lib.get('code'), + user_id=user_id, + input_field_list=function_lib.get('input_field_list'), + init_field_list=function_lib.get('init_field_list', []), + permission_type='PRIVATE', + is_active=False) + function_lib_model.save() + return True + + class IconOperate(serializers.Serializer): + id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("function ID"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + image = UploadedImageField(required=True, error_messages=ErrMessage.image(_("picture"))) + + def edit(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + functionLib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first() + if functionLib is None: + raise AppApiException(500, _('Function does not exist')) + # 删除旧的图片 + if functionLib.icon != '/ui/favicon.ico': + QuerySet(File).filter(id=functionLib.icon.split('/')[-1]).delete() + if self.data.get('image') is None: + functionLib.icon = '/ui/favicon.ico' + else: + meta = { + 'debug': False + } + file_id = uuid.uuid1() + file = File(id=file_id, file_name=self.data.get('image').name, meta=meta) + file.save(self.data.get('image').read()) + + functionLib.icon = f'/api/file/{file_id}' + functionLib.save() + + return functionLib.icon + + class InternalFunction(serializers.Serializer): + id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("function ID"))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID"))) + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_("function name"))) + + def add(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + + internal_function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first() + if internal_function_lib is None: + raise AppApiException(500, _('Function does not exist')) + + function_lib = FunctionLib( + id=uuid.uuid1(), + name=self.data.get('name'), + desc=internal_function_lib.desc, + code=internal_function_lib.code, + user_id=self.data.get('user_id'), + input_field_list=internal_function_lib.input_field_list, + init_field_list=internal_function_lib.init_field_list, + permission_type=PermissionType.PRIVATE, + template_id=internal_function_lib.id, + function_type=FunctionType.PUBLIC, + icon=internal_function_lib.icon, + is_active=False + ) + function_lib.save() + + return FunctionLibModelSerializer(function_lib).data diff --git a/apps/function_lib/serializers/py_lint_serializer.py b/apps/function_lib/serializers/py_lint_serializer.py new file mode 100644 index 00000000000..6fa6d4c44a3 --- /dev/null +++ b/apps/function_lib/serializers/py_lint_serializer.py @@ -0,0 +1,59 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: py_lint_serializer.py + @date:2024/9/30 15:38 + @desc: +""" +import os +import uuid + +from pylint.lint import Run +from pylint.reporters import JSON2Reporter +from rest_framework import serializers + +from common.util.field_message import ErrMessage +from smartdoc.const import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ + + +class PyLintInstance(serializers.Serializer): + code = serializers.CharField(required=True, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char(_('function content'))) + + +def to_dict(message, file_name): + return {'line': message.line, + 'column': message.column, + 'endLine': message.end_line, + 'endColumn': message.end_column, + 'message': (message.msg or "").replace(file_name, 'code'), + 'type': message.category} + + +def get_file_name(): + file_name = f"{uuid.uuid1()}" + py_lint_dir = os.path.join(PROJECT_DIR, 'data', 'py_lint') + if not os.path.exists(py_lint_dir): + os.makedirs(py_lint_dir) + return os.path.join(py_lint_dir, file_name) + + +class PyLintSerializer(serializers.Serializer): + + def pylint(self, instance, is_valid=True): + if is_valid: + self.is_valid(raise_exception=True) + PyLintInstance(data=instance).is_valid(raise_exception=True) + code = instance.get('code') + file_name = get_file_name() + with open(file_name, 'w') as file: + file.write(code) + reporter = JSON2Reporter() + Run([file_name, + "--disable=line-too-long", + '--module-rgx=[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'], + reporter=reporter, exit=False) + os.remove(file_name) + return [to_dict(m, os.path.basename(file_name)) for m in reporter.messages] diff --git a/apps/function_lib/swagger_api/__init__.py b/apps/function_lib/swagger_api/__init__.py new file mode 100644 index 00000000000..a68550e90ef --- /dev/null +++ b/apps/function_lib/swagger_api/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/2 14:55 + @desc: +""" diff --git a/apps/function_lib/swagger_api/function_lib_api.py b/apps/function_lib/swagger_api/function_lib_api.py new file mode 100644 index 00000000000..f0d409efaf8 --- /dev/null +++ b/apps/function_lib/swagger_api/function_lib_api.py @@ -0,0 +1,264 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: function_lib_api.py + @date:2024/8/2 17:11 + @desc: +""" +from drf_yasg import openapi + +from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ + + +class FunctionLibApi(ApiMixin): + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['id', 'name', 'desc', 'code', 'input_field_list', 'create_time', + 'update_time'], + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'input_field_list': openapi.Schema(type=openapi.TYPE_STRING, title=_('input field'), + description=_('input field')), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time')), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time')), + } + ) + + class Query(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='name', + in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=False, + description=_('function name')), + openapi.Parameter(name='desc', + in_=openapi.IN_QUERY, + type=openapi.TYPE_STRING, + required=False, + description=_('function description')), + ] + + class Debug(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'debug_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, + description=_('Input variable list'), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'name': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('variable name'), + description=_('variable name')), + 'value': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('variable value'), + description=_('variable value')), + })), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, + description=_('Input variable list'), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=['name', 'is_required', 'source'], + properties={ + 'name': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('variable name'), + description=_('variable name')), + 'is_required': openapi.Schema( + type=openapi.TYPE_BOOLEAN, + title=_('required'), + description=_('required')), + 'type': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') + ), + 'source': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('source'), + description=_( + 'The source only supports custom|reference')), + + })) + } + ) + + class Edit(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), + description=_('permission')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active')), + 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, + description=_('Input variable list'), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=[], + properties={ + 'name': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('variable name'), + description=_('variable name')), + 'is_required': openapi.Schema( + type=openapi.TYPE_BOOLEAN, + title=_('required'), + description=_('required')), + 'type': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') + ), + 'source': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('source'), + description=_( + 'The source only supports custom|reference')), + + })) + } + ) + + class Create(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['name', 'code', 'input_field_list', 'permission_type'], + properties={ + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), + description=_('permission')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active')), + 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, + description=_('Input variable list'), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=['name', 'is_required', 'source'], + properties={ + 'name': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('variable name'), + description=_('variable name')), + 'is_required': openapi.Schema( + type=openapi.TYPE_BOOLEAN, + title=_('required'), + description=_('required')), + 'type': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') + ), + 'source': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('source'), + description=_( + 'The source only supports custom|reference')), + + })) + } + ) + + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['id', 'name', 'code', 'input_field_list', 'permission_type'], + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')), + + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), + description=_('permission')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active')), + 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, + description=_('Input variable list'), + items=openapi.Schema(type=openapi.TYPE_OBJECT, + required=['name', 'is_required', 'source'], + properties={ + 'name': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('variable name'), + description=_('variable name')), + 'is_required': openapi.Schema( + type=openapi.TYPE_BOOLEAN, + title=_('required'), + description=_('required')), + 'type': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') + ), + 'source': openapi.Schema( + type=openapi.TYPE_STRING, + title=_('source'), + description=_( + 'The source only supports custom|reference')), + + })) + } + ) + + class Export(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='id', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('ID')), + + ] + + class Import(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='file', + in_=openapi.IN_FORM, + type=openapi.TYPE_FILE, + required=True, + description=_('Upload image files')) + ] diff --git a/apps/function_lib/swagger_api/py_lint_api.py b/apps/function_lib/swagger_api/py_lint_api.py new file mode 100644 index 00000000000..1577dfe60a1 --- /dev/null +++ b/apps/function_lib/swagger_api/py_lint_api.py @@ -0,0 +1,25 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: py_lint_api.py + @date:2024/9/30 15:48 + @desc: +""" +from drf_yasg import openapi + +from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ + + +class PyLintApi(ApiMixin): + @staticmethod + def get_request_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['code'], + properties={ + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')) + } + ) diff --git a/apps/function_lib/task/__init__.py b/apps/function_lib/task/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/function_lib/tests.py b/apps/function_lib/tests.py new file mode 100644 index 00000000000..7ce503c2dd9 --- /dev/null +++ b/apps/function_lib/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/apps/function_lib/urls.py b/apps/function_lib/urls.py new file mode 100644 index 00000000000..036d6892601 --- /dev/null +++ b/apps/function_lib/urls.py @@ -0,0 +1,17 @@ +from django.urls import path + +from . import views + +app_name = "function_lib" +urlpatterns = [ + path('function_lib', views.FunctionLibView.as_view()), + path('function_lib/debug', views.FunctionLibView.Debug.as_view()), + path('function_lib//export', views.FunctionLibView.Export.as_view()), + path('function_lib/import', views.FunctionLibView.Import.as_view()), + path('function_lib//edit_icon', views.FunctionLibView.EditIcon.as_view()), + path('function_lib//add_internal_fun', views.FunctionLibView.AddInternalFun.as_view()), + path('function_lib/pylint', views.PyLintView.as_view()), + path('function_lib/', views.FunctionLibView.Operate.as_view()), + path("function_lib//", views.FunctionLibView.Page.as_view(), + name="function_lib_page") +] diff --git a/apps/function_lib/views/__init__.py b/apps/function_lib/views/__init__.py new file mode 100644 index 00000000000..ad3240be184 --- /dev/null +++ b/apps/function_lib/views/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/8/2 14:53 + @desc: +""" +from .function_lib_views import * +from .py_lint import * diff --git a/apps/function_lib/views/common.py b/apps/function_lib/views/common.py new file mode 100644 index 00000000000..5509964f57f --- /dev/null +++ b/apps/function_lib/views/common.py @@ -0,0 +1,20 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: common.py + @date:2025/3/25 17:27 + @desc: +""" +from django.db.models import QuerySet + +from function_lib.models.function import FunctionLib + + +def get_function_lib_operation_object(function_lib_id): + function_lib_model = QuerySet(model=FunctionLib).filter(id=function_lib_id).first() + if function_lib_model is not None: + return { + "name": function_lib_model.name + } + return {} diff --git a/apps/function_lib/views/function_lib_views.py b/apps/function_lib/views/function_lib_views.py new file mode 100644 index 00000000000..e865566e3be --- /dev/null +++ b/apps/function_lib/views/function_lib_views.py @@ -0,0 +1,184 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: function_lib_views.py + @date:2024/8/2 17:08 + @desc: +""" +from django.utils.translation import gettext_lazy as _ +from drf_yasg.utils import swagger_auto_schema +from rest_framework.decorators import action +from rest_framework.parsers import MultiPartParser +from rest_framework.request import Request +from rest_framework.views import APIView + +from common.auth import TokenAuth, has_permissions +from common.constants.permission_constants import RoleConstants +from common.log.log import log +from common.response import result +from function_lib.serializers.function_lib_serializer import FunctionLibSerializer +from function_lib.swagger_api.function_lib_api import FunctionLibApi +from function_lib.views.common import get_function_lib_operation_object + + +class FunctionLibView(APIView): + authentication_classes = [TokenAuth] + + @action(methods=["GET"], detail=False) + @swagger_auto_schema(operation_summary=_('Get function list'), + operation_id=_('Get function list'), + tags=[_('Function')], + manual_parameters=FunctionLibApi.Query.get_request_params_api()) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Get function list") + def get(self, request: Request): + return result.success( + FunctionLibSerializer.Query( + data={'name': request.query_params.get('name'), + 'desc': request.query_params.get('desc'), + 'function_type': request.query_params.get('function_type'), + 'user_id': request.user.id}).list()) + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Create function'), + operation_id=_('Create function'), + request_body=FunctionLibApi.Create.get_request_body_api(), + responses=result.get_api_response(FunctionLibApi.Create.get_response_body_api()), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Create function", + get_operation_object=lambda r, k: r.data.get('name')) + def post(self, request: Request): + return result.success(FunctionLibSerializer.Create(data={'user_id': request.user.id}).insert(request.data)) + + class Debug(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Debug function'), + operation_id=_('Debug function'), + request_body=FunctionLibApi.Debug.get_request_body_api(), + responses=result.get_default_response(), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + def post(self, request: Request): + return result.success( + FunctionLibSerializer.Debug(data={'user_id': request.user.id}).debug( + request.data)) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Update function'), + operation_id=_('Update function'), + request_body=FunctionLibApi.Edit.get_request_body_api(), + responses=result.get_api_response(FunctionLibApi.Edit.get_request_body_api()), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Update function", + get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('function_lib_id'))) + def put(self, request: Request, function_lib_id: str): + return result.success( + FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).edit( + request.data)) + + @action(methods=['DELETE'], detail=False) + @swagger_auto_schema(operation_summary=_('Delete function'), + operation_id=_('Delete function'), + responses=result.get_default_response(), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Delete function", + get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('function_lib_id'))) + def delete(self, request: Request, function_lib_id: str): + return result.success( + FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).delete()) + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get function details'), + operation_id=_('Get function details'), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + def get(self, request: Request, function_lib_id: str): + return result.success( + FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).one()) + + class Page(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get function list by pagination'), + operation_id=_('Get function list by pagination'), + manual_parameters=result.get_page_request_params( + FunctionLibApi.Query.get_request_params_api()), + responses=result.get_page_api_response(FunctionLibApi.get_response_body_api()), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + def get(self, request: Request, current_page: int, page_size: int): + return result.success( + FunctionLibSerializer.Query( + data={'name': request.query_params.get('name'), + 'desc': request.query_params.get('desc'), + 'function_type': request.query_params.get('function_type'), + 'user_id': request.user.id, + 'select_user_id': request.query_params.get('select_user_id')}).page( + current_page, page_size)) + + class Import(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods="POST", detail=False) + @swagger_auto_schema(operation_summary=_("Import function"), operation_id=_("Import function"), + manual_parameters=FunctionLibApi.Import.get_request_params_api(), + tags=[_("function")] + ) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Import function") + def post(self, request: Request): + return result.success(FunctionLibSerializer.Import( + data={'user_id': request.user.id, 'file': request.FILES.get('file')}).import_()) + + class Export(APIView): + authentication_classes = [TokenAuth] + + @action(methods="GET", detail=False) + @swagger_auto_schema(operation_summary=_("Export function"), operation_id=_("Export function"), + manual_parameters=FunctionLibApi.Export.get_request_params_api(), + tags=[_("function")] + ) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Export function", + get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('id'))) + def get(self, request: Request, id: str): + return FunctionLibSerializer.Operate( + data={'id': id, 'user_id': request.user.id}).export() + + class EditIcon(APIView): + authentication_classes = [TokenAuth] + parser_classes = [MultiPartParser] + + @action(methods=['PUT'], detail=False) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Edit icon", + get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('id'))) + def put(self, request: Request, id: str): + return result.success( + FunctionLibSerializer.IconOperate( + data={'id': id, 'user_id': request.user.id, + 'image': request.FILES.get('file')}).edit(request.data)) + + class AddInternalFun(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + @log(menu='Function', operate="Add internal function", + get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('id'))) + def post(self, request: Request, id: str): + return result.success( + FunctionLibSerializer.InternalFunction( + data={'id': id, 'user_id': request.user.id, 'name': request.data.get('name')}) + .add()) diff --git a/apps/function_lib/views/py_lint.py b/apps/function_lib/views/py_lint.py new file mode 100644 index 00000000000..a0bee2a4c02 --- /dev/null +++ b/apps/function_lib/views/py_lint.py @@ -0,0 +1,33 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: py_lint.py + @date:2024/9/30 15:35 + @desc: +""" +from drf_yasg.utils import swagger_auto_schema +from rest_framework.decorators import action +from rest_framework.request import Request +from rest_framework.views import APIView + +from common.auth import TokenAuth, has_permissions +from common.constants.permission_constants import RoleConstants +from common.response import result +from function_lib.serializers.py_lint_serializer import PyLintSerializer +from function_lib.swagger_api.py_lint_api import PyLintApi +from django.utils.translation import gettext_lazy as _ + + +class PyLintView(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Check code'), + operation_id=_('Check code'), + request_body=PyLintApi.get_request_body_api(), + responses=result.get_api_response(PyLintApi.get_request_body_api()), + tags=[_('Function')]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + def post(self, request: Request): + return result.success(PyLintSerializer(data={'user_id': request.user.id}).pylint(request.data)) diff --git a/apps/locales/en_US/LC_MESSAGES/django.po b/apps/locales/en_US/LC_MESSAGES/django.po new file mode 100644 index 00000000000..e068ff410a0 --- /dev/null +++ b/apps/locales/en_US/LC_MESSAGES/django.po @@ -0,0 +1,7502 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-03-20 14:22+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: apps/xpack/auth/user_key.py:26 +#: apps/xpack/serializers/license_serializers.py:96 +#: apps/xpack/serializers/license_tools.py:109 +msgid "The license is invalid" +msgstr "" + +#: apps/xpack/auth/user_key.py:32 apps/xpack/auth/user_key.py:34 +msgid "secret_key is invalid" +msgstr "Secret key is invalid" + +#: apps/xpack/middleware/swagger_middleware.py:19 +msgid "The license has not been uploaded or the license has expired" +msgstr "" + +#: apps/xpack/serializers/application_setting_serializer.py:20 +msgid "theme color" +msgstr "Theme color" + +#: apps/xpack/serializers/application_setting_serializer.py:22 +msgid "header font color" +msgstr "Header font color" + +#: apps/xpack/serializers/application_setting_serializer.py:26 +msgid "float location type" +msgstr "Float location type" + +#: apps/xpack/serializers/application_setting_serializer.py:27 +msgid "float location value" +msgstr "Float location value" + +#: apps/xpack/serializers/application_setting_serializer.py:31 +msgid "float location x" +msgstr "Float location x" + +#: apps/xpack/serializers/application_setting_serializer.py:32 +msgid "float location y" +msgstr "Float location y" + +#: apps/xpack/serializers/application_setting_serializer.py:36 +#: apps/xpack/swagger_api/application_setting_api.py:23 +msgid "show source" +msgstr "Show Source" + +#: apps/xpack/serializers/application_setting_serializer.py:37 +#: community/apps/application/serializers/application_serializers.py:354 +#: community/apps/application/swagger_api/application_api.py:169 +#: community/apps/application/swagger_api/application_api.py:170 +#: community/apps/users/serializers/user_serializers.py:273 +#: community/apps/users/views/user.py:85 community/apps/users/views/user.py:86 +msgid "language" +msgstr "Language" + +#: apps/xpack/serializers/application_setting_serializer.py:38 +#: apps/xpack/swagger_api/application_setting_api.py:30 +msgid "show history" +msgstr "Show History" + +#: apps/xpack/serializers/application_setting_serializer.py:39 +#: apps/xpack/swagger_api/application_setting_api.py:37 +msgid "draggable" +msgstr "Draggable" + +#: apps/xpack/serializers/application_setting_serializer.py:40 +#: apps/xpack/swagger_api/application_setting_api.py:44 +msgid "show guide" +msgstr "Show Guide" + +#: apps/xpack/serializers/application_setting_serializer.py:41 +#: apps/xpack/swagger_api/application_setting_api.py:51 +msgid "avatar" +msgstr "Avatar" + +#: apps/xpack/serializers/application_setting_serializer.py:42 +msgid "avatar url" +msgstr "Avatar URL" + +#: apps/xpack/serializers/application_setting_serializer.py:43 +#: apps/xpack/swagger_api/application_setting_api.py:86 +msgid "user avatar" +msgstr "User avatar" + +#: apps/xpack/serializers/application_setting_serializer.py:44 +msgid "user avatar url" +msgstr "User avatar URL" + +#: apps/xpack/serializers/application_setting_serializer.py:45 +#: apps/xpack/swagger_api/application_setting_api.py:58 +msgid "float icon" +msgstr "Float icon" + +#: apps/xpack/serializers/application_setting_serializer.py:46 +msgid "float icon url" +msgstr "Float icon URL" + +#: apps/xpack/serializers/application_setting_serializer.py:47 +#: apps/xpack/swagger_api/application_setting_api.py:65 +msgid "disclaimer" +msgstr "Disclaimer" + +#: apps/xpack/serializers/application_setting_serializer.py:48 +#: apps/xpack/swagger_api/application_setting_api.py:72 +msgid "disclaimer value" +msgstr "Disclaimer value" + +#: apps/xpack/serializers/application_setting_serializer.py:70 +#: apps/xpack/serializers/dataset_lark_serializer.py:373 +#: community/apps/dataset/serializers/dataset_serializers.py:548 +msgid "application id" +msgstr "Application ID" + +#: apps/xpack/serializers/application_setting_serializer.py:96 +#: apps/xpack/serializers/platform_serializer.py:83 +#: apps/xpack/serializers/platform_serializer.py:105 +#: apps/xpack/serializers/platform_serializer.py:174 +#: apps/xpack/serializers/platform_serializer.py:185 +#: community/apps/application/serializers/application_serializers.py:1237 +#: community/apps/application/serializers/chat_message_serializers.py:424 +#: community/apps/application/serializers/chat_serializers.py:294 +#: community/apps/application/serializers/chat_serializers.py:396 +msgid "Application does not exist" +msgstr "" + +#: apps/xpack/serializers/application_setting_serializer.py:116 +msgid "Float location field type error" +msgstr "" + +#: apps/xpack/serializers/application_setting_serializer.py:122 +msgid "Custom theme field type error" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:19 +msgid "LDAP server cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:20 +msgid "Base DN cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:21 +msgid "Password cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:22 +msgid "OU cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:23 +msgid "LDAP filter cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:24 +msgid "LDAP mapping cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:29 +msgid "Authorization address cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:31 +msgid "Token address cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:33 +msgid "User information address cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:34 +msgid "Scope cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:35 +msgid "Client ID cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:36 +msgid "Client secret cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:38 +msgid "Redirect address cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:39 +msgid "Field mapping cannot be empty" +msgstr "" + +#: apps/xpack/serializers/auth_config_serializer.py:166 +#: apps/xpack/serializers/qr_login/qr_login.py:33 +#: community/apps/users/serializers/user_serializers.py:89 +msgid "The user has been disabled, please contact the administrator!" +msgstr "" + +#: apps/xpack/serializers/cas.py:32 +msgid "HttpClient query failed: " +msgstr "" + +#: apps/xpack/serializers/cas.py:56 +msgid "CAS authentication failed" +msgstr "" + +#: apps/xpack/serializers/channel/chat_manage.py:76 +#: apps/xpack/serializers/channel/chat_manage.py:134 +msgid "" +"Sorry, no relevant content was found. Please re-describe your problem or " +"provide more information. " +msgstr "" + +#: apps/xpack/serializers/channel/chat_manage.py:82 +msgid "Think: " +msgstr "" + +#: apps/xpack/serializers/channel/chat_manage.py:85 +#: apps/xpack/serializers/channel/chat_manage.py:87 +msgid "AI reply: " +msgstr "" + +#: apps/xpack/serializers/channel/chat_manage.py:298 +msgid "Thinking, please wait a moment!" +msgstr "Thinking, please wait a moment......" + +#: apps/xpack/serializers/channel/ding_talk.py:19 +#: apps/xpack/serializers/channel/wechat.py:89 +#: apps/xpack/serializers/channel/wechat.py:130 +#: apps/xpack/serializers/channel/wecom.py:76 +#: apps/xpack/serializers/channel/wecom.py:259 +msgid "The corresponding platform configuration was not found" +msgstr "" + +#: apps/xpack/serializers/channel/ding_talk.py:27 +#: apps/xpack/serializers/channel/feishu.py:112 +msgid "Currently only text messages are supported" +msgstr "" + +#: apps/xpack/serializers/channel/ding_talk.py:91 +#: apps/xpack/serializers/channel/wechat.py:161 +#: apps/xpack/serializers/channel/wecom.py:189 +msgid "Image download failed, check network" +msgstr "" + +#: apps/xpack/serializers/channel/ding_talk.py:92 +#: apps/xpack/serializers/channel/wechat.py:159 +#: apps/xpack/serializers/channel/wecom.py:185 +msgid "Please analyze the content of the image." +msgstr "" + +#: apps/xpack/serializers/channel/ding_talk.py:95 +#, python-brace-format +msgid "DingTalk application: {user}" +msgstr "" + +#: apps/xpack/serializers/channel/ding_talk.py:106 +#: apps/xpack/serializers/channel/ding_talk.py:151 +msgid "Content generated by AI" +msgstr "" + +#: apps/xpack/serializers/channel/feishu.py:87 +#: apps/xpack/serializers/channel/feishu.py:107 +msgid "Lark application: " +msgstr "" + +#: apps/xpack/serializers/channel/slack.py:116 +msgid "The corresponding platform configuration for Slack was not found" +msgstr "" + +#: apps/xpack/serializers/channel/slack.py:206 +msgid "Thinking..." +msgstr "" + +#: apps/xpack/serializers/channel/slack.py:321 +msgid "Invalid json format." +msgstr "" + +#: apps/xpack/serializers/channel/slack.py:327 +msgid "Invalid Slack request" +msgstr "" + +#: apps/xpack/serializers/channel/slack.py:335 +#| msgid "application name" +msgid "Slack application: {user}" +msgstr "" + +#: apps/xpack/serializers/channel/slack.py:471 +msgid "Stop" +msgstr "" + +#: apps/xpack/serializers/channel/wechat.py:141 +#, python-brace-format +msgid "WeChat Official Account: {account}" +msgstr "" + +#: apps/xpack/serializers/channel/wechat.py:148 +#: apps/xpack/serializers/channel/wecom.py:171 +#: apps/xpack/serializers/channel/wecom.py:175 +msgid "" +"The app does not enable the speech-to-text function or the speech-to-text " +"function fails." +msgstr "" + +#: apps/xpack/serializers/channel/wechat.py:187 +msgid "Message types not supported yet" +msgstr "" + +#: apps/xpack/serializers/channel/wechat.py:194 +msgid "Welcome to subscribe" +msgstr "" + +#: apps/xpack/serializers/channel/wecom.py:84 +msgid "Enterprise WeChat user: " +msgstr "" + +#: apps/xpack/serializers/channel/wecom.py:95 +msgid "Enterprise WeChat customer service: " +msgstr "" + +#: apps/xpack/serializers/channel/wecom.py:132 +#: apps/xpack/serializers/channel/wecom.py:148 +msgid "This type of message is not supported yet" +msgstr "" + +#: apps/xpack/serializers/channel/wecom.py:254 +msgid "Signature missing" +msgstr "" + +#: apps/xpack/serializers/channel/wecom.py:266 +#: apps/xpack/serializers/channel/wecom.py:273 +#, python-brace-format +msgid "An error occurred while processing the GET request {e}" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:58 +#: community/apps/dataset/serializers/dataset_serializers.py:82 +#: community/apps/dataset/serializers/dataset_serializers.py:214 +#: community/apps/dataset/serializers/dataset_serializers.py:295 +#: community/apps/dataset/serializers/dataset_serializers.py:296 +#: community/apps/dataset/serializers/dataset_serializers.py:357 +#: community/apps/dataset/serializers/dataset_serializers.py:358 +#: community/apps/dataset/serializers/dataset_serializers.py:502 +#: community/apps/dataset/serializers/dataset_serializers.py:503 +#: community/apps/dataset/serializers/dataset_serializers.py:568 +#: community/apps/dataset/serializers/dataset_serializers.py:607 +#: community/apps/dataset/serializers/dataset_serializers.py:701 +#: community/apps/dataset/serializers/dataset_serializers.py:933 +#: community/apps/dataset/serializers/dataset_serializers.py:934 +#: community/apps/dataset/serializers/document_serializers.py:816 +#: community/apps/function_lib/serializers/function_lib_serializer.py:141 +#: community/apps/function_lib/serializers/function_lib_serializer.py:186 +#: community/apps/function_lib/serializers/function_lib_serializer.py:203 +#: community/apps/function_lib/serializers/function_lib_serializer.py:262 +#: community/apps/setting/serializers/provider_serializers.py:76 +#: community/apps/setting/serializers/provider_serializers.py:127 +#: community/apps/setting/serializers/provider_serializers.py:174 +#: community/apps/setting/serializers/provider_serializers.py:256 +#: community/apps/setting/serializers/provider_serializers.py:277 +#: community/apps/setting/serializers/provider_serializers.py:301 +#: community/apps/setting/serializers/team_serializers.py:42 +#: community/apps/users/serializers/user_serializers.py:272 +msgid "user id" +msgstr "User ID" + +#: apps/xpack/serializers/dataset_lark_serializer.py:61 +#: apps/xpack/serializers/dataset_lark_serializer.py:112 +#: apps/xpack/serializers/dataset_lark_serializer.py:113 +#: apps/xpack/serializers/dataset_lark_serializer.py:367 +#: community/apps/dataset/serializers/dataset_serializers.py:137 +#: community/apps/dataset/serializers/dataset_serializers.py:201 +#: community/apps/dataset/serializers/dataset_serializers.py:221 +#: community/apps/dataset/serializers/dataset_serializers.py:244 +#: community/apps/dataset/serializers/dataset_serializers.py:273 +#: community/apps/dataset/serializers/dataset_serializers.py:274 +#: community/apps/dataset/serializers/dataset_serializers.py:291 +#: community/apps/dataset/serializers/dataset_serializers.py:292 +#: community/apps/dataset/serializers/dataset_serializers.py:319 +#: community/apps/dataset/serializers/dataset_serializers.py:353 +#: community/apps/dataset/serializers/dataset_serializers.py:354 +#: community/apps/dataset/serializers/dataset_serializers.py:382 +#: community/apps/dataset/serializers/dataset_serializers.py:383 +#: community/apps/dataset/serializers/dataset_serializers.py:498 +#: community/apps/dataset/serializers/dataset_serializers.py:499 +#: community/apps/dataset/serializers/dataset_serializers.py:527 +#: community/apps/dataset/serializers/dataset_serializers.py:528 +#: community/apps/dataset/serializers/dataset_serializers.py:542 +#: community/apps/dataset/serializers/dataset_serializers.py:907 +#: community/apps/dataset/serializers/dataset_serializers.py:908 +#: community/apps/dataset/serializers/dataset_serializers.py:929 +#: community/apps/dataset/serializers/dataset_serializers.py:930 +msgid "dataset name" +msgstr "Knowledge Base Name" + +#: apps/xpack/serializers/dataset_lark_serializer.py:63 +#: apps/xpack/serializers/dataset_lark_serializer.py:114 +#: apps/xpack/serializers/dataset_lark_serializer.py:115 +#: apps/xpack/serializers/dataset_lark_serializer.py:369 +#: community/apps/dataset/serializers/dataset_serializers.py:142 +#: community/apps/dataset/serializers/dataset_serializers.py:206 +#: community/apps/dataset/serializers/dataset_serializers.py:226 +#: community/apps/dataset/serializers/dataset_serializers.py:249 +#: community/apps/dataset/serializers/dataset_serializers.py:278 +#: community/apps/dataset/serializers/dataset_serializers.py:279 +#: community/apps/dataset/serializers/dataset_serializers.py:293 +#: community/apps/dataset/serializers/dataset_serializers.py:294 +#: community/apps/dataset/serializers/dataset_serializers.py:324 +#: community/apps/dataset/serializers/dataset_serializers.py:355 +#: community/apps/dataset/serializers/dataset_serializers.py:356 +#: community/apps/dataset/serializers/dataset_serializers.py:384 +#: community/apps/dataset/serializers/dataset_serializers.py:385 +#: community/apps/dataset/serializers/dataset_serializers.py:500 +#: community/apps/dataset/serializers/dataset_serializers.py:501 +#: community/apps/dataset/serializers/dataset_serializers.py:529 +#: community/apps/dataset/serializers/dataset_serializers.py:530 +#: community/apps/dataset/serializers/dataset_serializers.py:544 +#: community/apps/dataset/serializers/dataset_serializers.py:909 +#: community/apps/dataset/serializers/dataset_serializers.py:910 +#: community/apps/dataset/serializers/dataset_serializers.py:931 +#: community/apps/dataset/serializers/dataset_serializers.py:932 +msgid "dataset description" +msgstr "Knowledge Base Description" + +#: apps/xpack/serializers/dataset_lark_serializer.py:65 +#: apps/xpack/serializers/dataset_lark_serializer.py:118 +#: apps/xpack/serializers/dataset_lark_serializer.py:377 +msgid "app id" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:66 +#: apps/xpack/serializers/dataset_lark_serializer.py:119 +#: apps/xpack/serializers/dataset_lark_serializer.py:120 +#: apps/xpack/serializers/dataset_lark_serializer.py:378 +msgid "app secret" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:67 +#: apps/xpack/serializers/dataset_lark_serializer.py:121 +#: apps/xpack/serializers/dataset_lark_serializer.py:122 +#: apps/xpack/serializers/dataset_lark_serializer.py:132 +#: apps/xpack/serializers/dataset_lark_serializer.py:165 +#: apps/xpack/serializers/dataset_lark_serializer.py:379 +msgid "folder token" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:69 +#: apps/xpack/serializers/dataset_lark_serializer.py:116 +#: apps/xpack/serializers/dataset_lark_serializer.py:117 +#: community/apps/dataset/serializers/dataset_serializers.py:231 +#: community/apps/dataset/serializers/dataset_serializers.py:254 +#: community/apps/dataset/serializers/dataset_serializers.py:330 +#: community/apps/dataset/serializers/dataset_serializers.py:386 +#: community/apps/dataset/serializers/dataset_serializers.py:387 +#: community/apps/dataset/serializers/dataset_serializers.py:531 +#: community/apps/dataset/serializers/dataset_serializers.py:532 +msgid "embedding mode" +msgstr "Embedding mode" + +#: apps/xpack/serializers/dataset_lark_serializer.py:79 +#: apps/xpack/serializers/dataset_lark_serializer.py:389 +msgid "Network error or folder token error!" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:87 +#: apps/xpack/serializers/dataset_lark_serializer.py:444 +#: community/apps/dataset/serializers/dataset_serializers.py:424 +#: community/apps/dataset/serializers/dataset_serializers.py:476 +#: community/apps/dataset/serializers/dataset_serializers.py:865 +msgid "Knowledge base name duplicate!" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:130 +#: apps/xpack/serializers/dataset_lark_serializer.py:164 +#: apps/xpack/serializers/dataset_lark_serializer.py:201 +#: apps/xpack/serializers/dataset_lark_serializer.py:221 +#: apps/xpack/serializers/dataset_lark_serializer.py:346 +#: apps/xpack/serializers/dataset_lark_serializer.py:363 +#: community/apps/common/swagger_api/common_api.py:68 +#: community/apps/common/swagger_api/common_api.py:69 +#: community/apps/dataset/serializers/dataset_serializers.py:84 +#: community/apps/dataset/serializers/dataset_serializers.py:93 +#: community/apps/dataset/serializers/dataset_serializers.py:605 +#: community/apps/dataset/serializers/dataset_serializers.py:688 +#: community/apps/dataset/serializers/dataset_serializers.py:699 +#: community/apps/dataset/serializers/dataset_serializers.py:955 +#: community/apps/dataset/serializers/document_serializers.py:169 +#: community/apps/dataset/serializers/document_serializers.py:286 +#: community/apps/dataset/serializers/document_serializers.py:407 +#: community/apps/dataset/serializers/document_serializers.py:573 +#: community/apps/dataset/serializers/document_serializers.py:1055 +#: community/apps/dataset/serializers/document_serializers.py:1216 +#: community/apps/dataset/serializers/paragraph_serializers.py:96 +#: community/apps/dataset/serializers/paragraph_serializers.py:162 +#: community/apps/dataset/serializers/paragraph_serializers.py:195 +#: community/apps/dataset/serializers/paragraph_serializers.py:196 +#: community/apps/dataset/serializers/paragraph_serializers.py:208 +#: community/apps/dataset/serializers/paragraph_serializers.py:266 +#: community/apps/dataset/serializers/paragraph_serializers.py:285 +#: community/apps/dataset/serializers/paragraph_serializers.py:302 +#: community/apps/dataset/serializers/paragraph_serializers.py:459 +#: community/apps/dataset/serializers/paragraph_serializers.py:567 +#: community/apps/dataset/serializers/paragraph_serializers.py:638 +#: community/apps/dataset/serializers/paragraph_serializers.py:647 +#: community/apps/dataset/serializers/paragraph_serializers.py:715 +#: community/apps/dataset/serializers/paragraph_serializers.py:716 +#: community/apps/dataset/serializers/paragraph_serializers.py:732 +#: community/apps/dataset/serializers/problem_serializers.py:87 +#: community/apps/dataset/serializers/problem_serializers.py:112 +#: community/apps/dataset/serializers/problem_serializers.py:135 +#: community/apps/dataset/serializers/problem_serializers.py:192 +#: community/apps/dataset/swagger_api/problem_api.py:28 +#: community/apps/dataset/swagger_api/problem_api.py:29 +#: community/apps/dataset/swagger_api/problem_api.py:77 +#: community/apps/dataset/swagger_api/problem_api.py:96 +#: community/apps/dataset/swagger_api/problem_api.py:149 +#: community/apps/dataset/swagger_api/problem_api.py:177 +msgid "dataset id" +msgstr "Dataset ID" + +#: apps/xpack/serializers/dataset_lark_serializer.py:145 +#: apps/xpack/serializers/dataset_lark_serializer.py:146 +#: apps/xpack/serializers/dataset_lark_serializer.py:212 +#: community/apps/dataset/serializers/document_serializers.py:812 +#: community/apps/dataset/serializers/document_serializers.py:813 +#: community/apps/setting/swagger_api/provide_api.py:22 +#: community/apps/setting/swagger_api/provide_api.py:48 +#: community/apps/setting/swagger_api/provide_api.py:49 +#: community/apps/setting/swagger_api/provide_api.py:76 +#: community/apps/setting/swagger_api/provide_api.py:77 +#: community/apps/setting/swagger_api/provide_api.py:143 +#: community/apps/setting/swagger_api/provide_api.py:144 +msgid "name" +msgstr "Name" + +#: apps/xpack/serializers/dataset_lark_serializer.py:147 +#: apps/xpack/serializers/dataset_lark_serializer.py:148 +#: apps/xpack/serializers/dataset_lark_serializer.py:211 +#: community/apps/application/serializers/application_serializers.py:257 +msgid "token" +msgstr "Token" + +#: apps/xpack/serializers/dataset_lark_serializer.py:149 +#: apps/xpack/serializers/dataset_lark_serializer.py:150 +#: apps/xpack/serializers/dataset_lark_serializer.py:210 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:26 +#: community/apps/dataset/serializers/document_serializers.py:229 +#: community/apps/function_lib/serializers/function_lib_serializer.py:72 +#: community/apps/function_lib/swagger_api/function_lib_api.py:92 +#: community/apps/function_lib/swagger_api/function_lib_api.py:138 +#: community/apps/function_lib/swagger_api/function_lib_api.py:184 +#: community/apps/setting/serializers/team_serializers.py:59 +#: community/apps/setting/serializers/team_serializers.py:74 +#: community/apps/setting/serializers/team_serializers.py:85 +#: community/apps/setting/serializers/valid_serializers.py:37 +msgid "type" +msgstr "Type" + +#: apps/xpack/serializers/dataset_lark_serializer.py:151 +#: apps/xpack/serializers/dataset_lark_serializer.py:152 +#| msgid "id does not exist" +msgid "is exist" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:173 +#: apps/xpack/serializers/dataset_lark_serializer.py:230 +#: apps/xpack/task/sync.py:120 +#| msgid "Knowledge base id" +msgid "Knowledge base not found!" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:185 +#: apps/xpack/serializers/dataset_lark_serializer.py:252 +msgid "Failed to get lark document list!" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:262 +#: community/apps/common/swagger_api/common_api.py:70 +#: community/apps/common/swagger_api/common_api.py:71 +#: community/apps/dataset/serializers/document_serializers.py:293 +#: community/apps/dataset/serializers/document_serializers.py:386 +#: community/apps/dataset/serializers/document_serializers.py:490 +#: community/apps/dataset/serializers/document_serializers.py:572 +#: community/apps/dataset/serializers/document_serializers.py:581 +#: community/apps/dataset/serializers/document_serializers.py:586 +#: community/apps/dataset/serializers/document_serializers.py:854 +#: community/apps/dataset/serializers/document_serializers.py:982 +#: community/apps/dataset/serializers/document_serializers.py:1191 +#: community/apps/dataset/serializers/paragraph_serializers.py:98 +#: community/apps/dataset/serializers/paragraph_serializers.py:167 +#: community/apps/dataset/serializers/paragraph_serializers.py:212 +#: community/apps/dataset/serializers/paragraph_serializers.py:271 +#: community/apps/dataset/serializers/paragraph_serializers.py:286 +#: community/apps/dataset/serializers/paragraph_serializers.py:303 +#: community/apps/dataset/serializers/paragraph_serializers.py:426 +#: community/apps/dataset/serializers/paragraph_serializers.py:431 +#: community/apps/dataset/serializers/paragraph_serializers.py:462 +#: community/apps/dataset/serializers/paragraph_serializers.py:570 +#: community/apps/dataset/serializers/paragraph_serializers.py:642 +#: community/apps/dataset/serializers/paragraph_serializers.py:650 +#: community/apps/dataset/serializers/paragraph_serializers.py:682 +#: community/apps/dataset/serializers/paragraph_serializers.py:717 +#: community/apps/dataset/serializers/paragraph_serializers.py:718 +#: community/apps/dataset/serializers/paragraph_serializers.py:733 +#: community/apps/dataset/serializers/problem_serializers.py:58 +#: community/apps/dataset/swagger_api/problem_api.py:64 +msgid "document id" +msgstr "Document ID" + +#: apps/xpack/serializers/dataset_lark_serializer.py:269 +#: apps/xpack/serializers/dataset_lark_serializer.py:289 +#: community/apps/dataset/serializers/document_serializers.py:497 +#: community/apps/dataset/serializers/document_serializers.py:593 +#: community/apps/dataset/serializers/document_serializers.py:1197 +msgid "document id not exist" +msgstr "Document ID does not exist" + +#: apps/xpack/serializers/dataset_lark_serializer.py:271 +msgid "Synchronization is only supported for lark documents" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:374 +#: community/apps/dataset/serializers/dataset_serializers.py:549 +#: community/apps/dataset/serializers/dataset_serializers.py:914 +#: community/apps/dataset/serializers/dataset_serializers.py:915 +msgid "application id list" +msgstr "Application ID list" + +#: apps/xpack/serializers/dataset_lark_serializer.py:416 +#: community/apps/dataset/serializers/dataset_serializers.py:175 +#: community/apps/dataset/serializers/dataset_serializers.py:837 +#: community/apps/function_lib/serializers/function_lib_serializer.py:125 +#: community/apps/function_lib/swagger_api/function_lib_api.py:119 +#: community/apps/function_lib/swagger_api/function_lib_api.py:120 +#: community/apps/function_lib/swagger_api/function_lib_api.py:165 +#: community/apps/function_lib/swagger_api/function_lib_api.py:166 +#: community/apps/setting/swagger_api/provide_api.py:81 +msgid "permission" +msgstr "Permission" + +#: apps/xpack/serializers/dataset_lark_serializer.py:463 +#: community/apps/dataset/serializers/dataset_serializers.py:884 +#, python-brace-format +msgid "Unknown application id {dataset_id}, cannot be associated" +msgstr "" + +#: apps/xpack/serializers/license_serializers.py:52 +msgid "license file" +msgstr "License file" + +#: apps/xpack/serializers/license_tools.py:134 +msgid "License usage limit exceeded." +msgstr "" + +#: apps/xpack/serializers/license_tools.py:158 +msgid "The network is busy, try again later." +msgstr "" + +#: apps/xpack/serializers/oauth2.py:79 apps/xpack/serializers/oauth2.py:82 +msgid "Failed to obtain user information" +msgstr "" + +#: apps/xpack/serializers/operate_log.py:36 +#: community/apps/application/serializers/application_statistics_serializers.py:27 +#: community/apps/application/serializers/chat_serializers.py:116 +#: community/apps/application/swagger_api/application_statistics_api.py:26 +msgid "Start time" +msgstr "Start Time" + +#: apps/xpack/serializers/operate_log.py:37 +#: community/apps/application/serializers/application_statistics_serializers.py:28 +#: community/apps/application/serializers/chat_serializers.py:117 +#: community/apps/application/swagger_api/application_statistics_api.py:31 +#: community/apps/application/swagger_api/chat_api.py:270 +msgid "End time" +msgstr "End Time" + +#: apps/xpack/serializers/operate_log.py:38 +#: apps/xpack/swagger_api/operate_log.py:17 +#: apps/xpack/swagger_api/operate_log.py:18 +#: apps/xpack/swagger_api/operate_log.py:45 +#: apps/xpack/swagger_api/operate_log.py:46 +msgid "menu" +msgstr "" + +#: apps/xpack/serializers/operate_log.py:39 +#: apps/xpack/swagger_api/operate_log.py:20 +#: apps/xpack/swagger_api/operate_log.py:21 +#: apps/xpack/swagger_api/operate_log.py:48 +#: apps/xpack/swagger_api/operate_log.py:49 +msgid "operate" +msgstr "" + +#: apps/xpack/serializers/operate_log.py:40 +#: apps/xpack/swagger_api/operate_log.py:51 +#: apps/xpack/swagger_api/operate_log.py:52 +#| msgid "user id" +msgid "user" +msgstr "User" + +#: apps/xpack/serializers/operate_log.py:41 +#: apps/xpack/swagger_api/operate_log.py:54 +#: apps/xpack/swagger_api/operate_log.py:55 +#: community/apps/dataset/serializers/document_serializers.py:417 +msgid "status" +msgstr "Status" + +#: apps/xpack/serializers/operate_log.py:42 +#: apps/xpack/swagger_api/operate_log.py:57 +#: apps/xpack/swagger_api/operate_log.py:58 +msgid "ip_address" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:14 +msgid "app_id is required" +msgstr "App ID is required" + +#: apps/xpack/serializers/platform_serializer.py:15 +msgid "app_secret is required" +msgstr "App Secret is required" + +#: apps/xpack/serializers/platform_serializer.py:16 +msgid "token is required" +msgstr "Token is required" + +#: apps/xpack/serializers/platform_serializer.py:17 +msgid "callback_url is required" +msgstr "Callback URL is required" + +#: apps/xpack/serializers/platform_serializer.py:23 +#: apps/xpack/serializers/platform_serializer.py:32 +msgid "App ID is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:24 +#: apps/xpack/serializers/platform_source_serializer.py:24 +msgid "Agent ID is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:25 +msgid "Secret is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:26 +msgid "Token is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:28 +#: apps/xpack/serializers/platform_serializer.py:36 +#: apps/xpack/serializers/platform_serializer.py:42 +#: apps/xpack/serializers/platform_serializer.py:48 +#: apps/xpack/serializers/platform_source_serializer.py:19 +msgid "Callback URL is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:33 +#: apps/xpack/serializers/platform_source_serializer.py:18 +msgid "App Secret is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:35 +msgid "Verification Token is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:40 +msgid "Client ID is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:41 +msgid "Client Secret is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:46 +#| msgid "app_secret is required" +msgid "Signing Secret is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:47 +#| msgid "token is required" +msgid "Bot User Token is required" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:68 +msgid "Check if the fields are correct" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:114 +#: apps/xpack/views/platform.py:85 apps/xpack/views/platform.py:101 +#: community/apps/application/flow/step_node/application_node/i_application_node.py:13 +#: community/apps/application/serializers/application_serializers.py:335 +#: community/apps/application/serializers/application_serializers.py:581 +#: community/apps/application/serializers/application_serializers.py:696 +#: community/apps/application/serializers/application_serializers.py:791 +#: community/apps/application/serializers/application_serializers.py:1230 +#: community/apps/application/serializers/application_serializers.py:1272 +#: community/apps/application/serializers/application_statistics_serializers.py:26 +#: community/apps/application/serializers/application_version_serializers.py:35 +#: community/apps/application/serializers/application_version_serializers.py:59 +#: community/apps/application/serializers/chat_message_serializers.py:207 +#: community/apps/application/serializers/chat_message_serializers.py:270 +#: community/apps/application/serializers/chat_serializers.py:77 +#: community/apps/application/serializers/chat_serializers.py:102 +#: community/apps/application/serializers/chat_serializers.py:119 +#: community/apps/application/serializers/chat_serializers.py:287 +#: community/apps/application/serializers/chat_serializers.py:363 +#: community/apps/application/serializers/chat_serializers.py:440 +#: community/apps/application/swagger_api/application_api.py:87 +#: community/apps/application/swagger_api/application_api.py:101 +#: community/apps/application/swagger_api/application_api.py:112 +#: community/apps/application/swagger_api/application_api.py:143 +#: community/apps/application/swagger_api/application_api.py:392 +#: community/apps/application/swagger_api/application_api.py:413 +#: community/apps/application/swagger_api/application_api.py:424 +#: community/apps/application/swagger_api/application_statistics_api.py:21 +#: community/apps/application/swagger_api/application_version_api.py:42 +#: community/apps/application/swagger_api/application_version_api.py:56 +#: community/apps/application/swagger_api/chat_api.py:23 +#: community/apps/application/swagger_api/chat_api.py:33 +#: community/apps/application/swagger_api/chat_api.py:167 +#: community/apps/application/swagger_api/chat_api.py:168 +#: community/apps/application/swagger_api/chat_api.py:199 +#: community/apps/application/swagger_api/chat_api.py:222 +#: community/apps/application/swagger_api/chat_api.py:249 +#: community/apps/application/swagger_api/chat_api.py:281 +#: community/apps/application/swagger_api/chat_api.py:350 +#: community/apps/application/swagger_api/chat_api.py:410 +#: community/apps/application/swagger_api/chat_api.py:427 +#: community/apps/application/swagger_api/chat_api.py:460 +#: community/apps/application/views/chat_views.py:477 +msgid "Application ID" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:116 +msgid "Platform type, for example: wechat" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:125 +#: apps/xpack/serializers/platform_serializer.py:126 +msgid "Platform type" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:128 +msgid "Status" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:138 +#: apps/xpack/serializers/platform_serializer.py:139 +msgid "Configuration information" +msgstr "" + +#: apps/xpack/serializers/platform_serializer.py:191 +#, python-brace-format +msgid "The platform configuration corresponding to {type} was not found" +msgstr "" + +#: apps/xpack/serializers/platform_source_serializer.py:23 +#: apps/xpack/serializers/platform_source_serializer.py:32 +msgid "Corp ID is required" +msgstr "" + +#: apps/xpack/serializers/platform_source_serializer.py:28 +#: apps/xpack/serializers/platform_source_serializer.py:33 +msgid "App Key is required" +msgstr "" + +#: apps/xpack/serializers/platform_source_serializer.py:78 +msgid "Configuration information is wrong and failed to save" +msgstr "" + +#: apps/xpack/serializers/platform_source_serializer.py:104 +msgid "Connection failed" +msgstr "" + +#: apps/xpack/serializers/platform_source_serializer.py:123 +msgid "Platform does not exist" +msgstr "" + +#: apps/xpack/serializers/platform_source_serializer.py:134 +msgid "Unsupported platform type" +msgstr "" + +#: apps/xpack/serializers/qr_login/qr_login.py:28 +msgid "Team" +msgstr "Team Member" + +#: apps/xpack/serializers/system_params_serializers.py:63 +msgid "theme" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:70 +msgid "website icon" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:77 +msgid "login logo" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:84 +msgid "Login background image" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:91 +msgid "website title" +msgstr "Website title" + +#: apps/xpack/serializers/system_params_serializers.py:98 +msgid "website slogan" +msgstr "Website slogan" + +#: apps/xpack/serializers/system_params_serializers.py:105 +msgid "Show user manual" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:112 +msgid "User manual address" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:119 +msgid "Show forum" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:126 +msgid "Forum address" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:133 +msgid "Show project" +msgstr "" + +#: apps/xpack/serializers/system_params_serializers.py:140 +msgid "Project address" +msgstr "" + +#: apps/xpack/serializers/tools.py:58 +#, python-brace-format +msgid "" +"Thinking about 【{question}】...If you want me to continue answering, please " +"reply {trigger_message}" +msgstr "" + +#: apps/xpack/serializers/tools.py:158 +msgid "" +"\n" +" ------------\n" +"[To be continued, reply \"Continue to answer the question]" +msgstr "" + +#: apps/xpack/serializers/tools.py:238 +#, python-brace-format +msgid "" +"To be continued, reply \"{trigger_message}\" to continue answering the " +"question" +msgstr "" + +#: apps/xpack/swagger_api/application_setting_api.py:79 +msgid "Custom theme {theme_color: , header_font_color: }" +msgstr "" + +#: apps/xpack/swagger_api/application_setting_api.py:93 +msgid "Float location {top: 0, left: 0}" +msgstr "" + +#: apps/xpack/swagger_api/application_setting_api.py:101 +#: apps/xpack/swagger_api/application_setting_api.py:102 +#: apps/xpack/swagger_api/auth_api.py:10 apps/xpack/swagger_api/auth_api.py:11 +#: apps/xpack/swagger_api/auth_api.py:81 apps/xpack/swagger_api/auth_api.py:82 +msgid "Authentication configuration" +msgstr "" + +#: apps/xpack/swagger_api/application_setting_api.py:106 +#: apps/xpack/swagger_api/application_setting_api.py:107 +#: apps/xpack/swagger_api/auth_api.py:15 apps/xpack/swagger_api/auth_api.py:16 +#: apps/xpack/swagger_api/auth_api.py:30 apps/xpack/swagger_api/auth_api.py:87 +#: apps/xpack/swagger_api/auth_api.py:88 apps/xpack/views/auth.py:27 +#: apps/xpack/views/auth.py:28 +msgid "Authentication type" +msgstr "" + +#: apps/xpack/swagger_api/application_setting_api.py:109 +#: apps/xpack/swagger_api/application_setting_api.py:110 +#: apps/xpack/swagger_api/auth_api.py:18 apps/xpack/swagger_api/auth_api.py:19 +#: apps/xpack/swagger_api/auth_api.py:93 apps/xpack/swagger_api/auth_api.py:94 +msgid "Configuration" +msgstr "" + +#: apps/xpack/swagger_api/application_setting_api.py:112 +#: apps/xpack/swagger_api/application_setting_api.py:113 +#: apps/xpack/swagger_api/auth_api.py:21 apps/xpack/swagger_api/auth_api.py:22 +#: community/apps/common/swagger_api/common_api.py:72 +#: community/apps/common/swagger_api/common_api.py:73 +#: community/apps/dataset/serializers/document_serializers.py:819 +#: community/apps/dataset/serializers/document_serializers.py:820 +#: community/apps/dataset/serializers/document_serializers.py:838 +#: community/apps/dataset/serializers/document_serializers.py:839 +#: community/apps/dataset/serializers/paragraph_serializers.py:57 +#: community/apps/dataset/serializers/paragraph_serializers.py:71 +#: community/apps/dataset/serializers/paragraph_serializers.py:719 +#: community/apps/dataset/serializers/paragraph_serializers.py:720 +#: community/apps/dataset/swagger_api/problem_api.py:130 +#: community/apps/function_lib/serializers/function_lib_serializer.py:110 +#: community/apps/function_lib/serializers/function_lib_serializer.py:129 +#: community/apps/function_lib/serializers/function_lib_serializer.py:139 +#: community/apps/function_lib/swagger_api/function_lib_api.py:121 +#: community/apps/function_lib/swagger_api/function_lib_api.py:122 +#: community/apps/function_lib/swagger_api/function_lib_api.py:167 +#: community/apps/function_lib/swagger_api/function_lib_api.py:168 +#: community/apps/setting/serializers/team_serializers.py:46 +#: community/apps/users/serializers/user_serializers.py:473 +#: community/apps/users/serializers/user_serializers.py:496 +#: community/apps/users/serializers/user_serializers.py:584 +#: community/apps/users/serializers/user_serializers.py:585 +#: community/apps/users/serializers/user_serializers.py:721 +#: community/apps/users/serializers/user_serializers.py:737 +#: community/apps/users/serializers/user_serializers.py:738 +msgid "Is active" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:37 +#| msgid "parameter configuration" +msgid "Wecom configuration" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:38 +#| msgid "parameter configuration" +msgid "Wecom configuration details" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:40 apps/xpack/swagger_api/auth_api.py:53 +msgid "Corp ID" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:41 +msgid "Agent ID" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:42 apps/xpack/swagger_api/auth_api.py:55 +#: apps/xpack/swagger_api/auth_api.py:67 +msgid "App Secret" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:43 apps/xpack/swagger_api/auth_api.py:56 +#: apps/xpack/swagger_api/auth_api.py:68 +msgid "Callback URL" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:50 +#| msgid "parameter configuration" +msgid "Dingtalk configuration" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:51 +msgid "Dingtalk configuration details" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:54 apps/xpack/swagger_api/auth_api.py:66 +msgid "App Key" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:63 +#| msgid "parameter configuration" +msgid "Feishu configuration" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:64 +msgid "Feishu configuration details" +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:22 +msgid "license status" +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:24 +msgid "" +"License status, possible values are: valid, invalid, expired, which " +"respectively represent: valid, invalid, expired" +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:26 +msgid "license details" +msgstr "License details" + +#: apps/xpack/swagger_api/license_api.py:30 +msgid "customer name" +msgstr "Customer name" + +#: apps/xpack/swagger_api/license_api.py:31 +msgid "customer name. For example: *** company." +msgstr "Customer name. For example: *** company." + +#: apps/xpack/swagger_api/license_api.py:33 +msgid "independent software vendor" +msgstr "Independent software vendor" + +#: apps/xpack/swagger_api/license_api.py:35 +msgid "" +"Independent Software Vendor. For example: *** Company, suitable for the " +"embedded version of the product." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:37 +msgid "Authorization deadline." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:39 +msgid "" +"Authorization deadline. For example: 2020-12-31, this license will expire on " +"2021-01-01." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:41 +msgid "product name." +msgstr "Product name" + +#: apps/xpack/swagger_api/license_api.py:43 +msgid "Product name. For example: JumpServer, CMP, etc." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:45 +msgid "product version." +msgstr "Product version" + +#: apps/xpack/swagger_api/license_api.py:47 +msgid "Product version. For example: JumpServer 2.0, CMP 1.0, etc." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:49 +msgid "license version." +msgstr "License version" + +#: apps/xpack/swagger_api/license_api.py:51 +msgid "License version. For example: 1.0, 2.0, etc." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:53 +msgid "authorization quantity." +msgstr "Authorization quantity" + +#: apps/xpack/swagger_api/license_api.py:55 +msgid "" +"Authorization quantity. For example: 100, this license can be used by 100 " +"users." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:57 +msgid "Serial number, the unique identifier of the License." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:59 +msgid "" +"Serial number, the unique identifier of the license. The customer support " +"portal will save the serial number after generating the license. If the " +"serial number is not recorded in the customer support portal, the license " +"will be regarded as an unknown source." +msgstr "" + +#: apps/xpack/swagger_api/license_api.py:61 +msgid "remarks" +msgstr "Remarks" + +#: apps/xpack/swagger_api/license_api.py:63 +msgid "" +"Remarks, record additional information, length limit is 50. For example, a " +"customer purchases two identical JumpServer subscriptions and uses them in " +"different computer rooms respectively. You can use this field to note the A " +"computer room and B computer room to help distinguish the licenses." +msgstr "" + +#: apps/xpack/swagger_api/operate_log.py:12 +#: apps/xpack/swagger_api/operate_log.py:13 +#: apps/xpack/swagger_api/operate_log.py:38 +#: apps/xpack/swagger_api/operate_log.py:39 apps/xpack/views/operate_log.py:24 +#: apps/xpack/views/operate_log.py:36 +msgid "Operate log" +msgstr "" + +#: apps/xpack/swagger_api/operate_log.py:23 +#: apps/xpack/swagger_api/operate_log.py:24 +msgid "menu_label" +msgstr "" + +#: apps/xpack/swagger_api/operate_log.py:26 +#: apps/xpack/swagger_api/operate_log.py:27 +msgid "operate_label" +msgstr "" + +#: apps/xpack/swagger_api/operate_log.py:42 +#: apps/xpack/swagger_api/operate_log.py:43 +#: community/apps/dataset/serializers/dataset_serializers.py:104 +msgid "id" +msgstr "ID" + +#: apps/xpack/swagger_api/operate_log.py:60 +#: apps/xpack/swagger_api/operate_log.py:61 +#| msgid "license details" +msgid "details" +msgstr "Details" + +#: apps/xpack/views/application_setting_views.py:22 +#: apps/xpack/views/application_setting_views.py:23 +#| msgid "Modification time" +msgid "Modify Application Settings" +msgstr "Modify Application Display Settings" + +#: apps/xpack/views/application_setting_views.py:24 +#: apps/xpack/views/application_setting_views.py:40 +msgid "Pro/Application/Public Access" +msgstr "" + +#: apps/xpack/views/application_setting_views.py:37 +#: apps/xpack/views/application_setting_views.py:38 +#| msgid "Application version id" +msgid "Get Application Settings" +msgstr "" + +#: apps/xpack/views/auth.py:29 +msgid "Authentication" +msgstr "" + +#: apps/xpack/views/auth.py:40 apps/xpack/views/auth.py:41 +msgid "Add or modify authentication configuration" +msgstr "" + +#: apps/xpack/views/auth.py:44 apps/xpack/views/auth.py:58 +#: apps/xpack/views/auth.py:72 +msgid "System settings/login authentication" +msgstr "" + +#: apps/xpack/views/auth.py:55 apps/xpack/views/auth.py:56 +msgid "Get authentication configuration" +msgstr "" + +#: apps/xpack/views/auth.py:69 apps/xpack/views/auth.py:70 +msgid "test connection" +msgstr "Test connection" + +#: apps/xpack/views/auth.py:96 apps/xpack/views/auth.py:97 +#: community/apps/users/views/user.py:173 +#: community/apps/users/views/user.py:174 +msgid "Log in" +msgstr "" + +#: apps/xpack/views/auth.py:101 apps/xpack/views/auth.py:114 +#: apps/xpack/views/auth.py:130 apps/xpack/views/auth.py:146 +#: apps/xpack/views/auth.py:207 apps/xpack/views/auth.py:224 +#: apps/xpack/views/auth.py:242 apps/xpack/views/auth.py:260 +#: apps/xpack/views/auth.py:278 apps/xpack/views/auth.py:296 +msgid "Three-party login" +msgstr "" + +#: apps/xpack/views/auth.py:111 apps/xpack/views/auth.py:112 +msgid "CAS login" +msgstr "" + +#: apps/xpack/views/auth.py:127 apps/xpack/views/auth.py:128 +msgid "OIDC login" +msgstr "" + +#: apps/xpack/views/auth.py:143 apps/xpack/views/auth.py:144 +msgid "OAuth2 login" +msgstr "" + +#: apps/xpack/views/auth.py:160 apps/xpack/views/auth.py:161 +#: apps/xpack/views/auth.py:162 apps/xpack/views/auth.py:170 +#: apps/xpack/views/auth.py:194 apps/xpack/views/auth.py:195 +#: apps/xpack/views/auth.py:196 +msgid "Get platform information" +msgstr "" + +#: apps/xpack/views/auth.py:167 apps/xpack/views/auth.py:168 +msgid "Modify platform information" +msgstr "" + +#: apps/xpack/views/auth.py:175 apps/xpack/views/auth.py:176 +#: apps/xpack/views/auth.py:178 +msgid "Test platform connection" +msgstr "" + +#: apps/xpack/views/auth.py:185 apps/xpack/views/auth.py:186 +msgid "Scan code login type" +msgstr "" + +#: apps/xpack/views/auth.py:187 +msgid "Scan code to log in" +msgstr "" + +#: apps/xpack/views/auth.py:204 apps/xpack/views/auth.py:205 +msgid "DingTalk callback" +msgstr "" + +#: apps/xpack/views/auth.py:221 apps/xpack/views/auth.py:222 +msgid "DingTalk OAuth2 callback" +msgstr "" + +#: apps/xpack/views/auth.py:239 apps/xpack/views/auth.py:240 +msgid "Lark callback" +msgstr "" + +#: apps/xpack/views/auth.py:257 apps/xpack/views/auth.py:258 +msgid "Lark OAuth2 callback" +msgstr "" + +#: apps/xpack/views/auth.py:275 apps/xpack/views/auth.py:276 +msgid "Wecom callback" +msgstr "" + +#: apps/xpack/views/auth.py:293 apps/xpack/views/auth.py:294 +msgid "Wecom OAuth2 callback" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:22 +#: apps/xpack/views/dataset_lark_views.py:23 +msgid "Create a lark knowledge base" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:26 +#: apps/xpack/views/dataset_lark_views.py:40 +#: community/apps/dataset/views/dataset.py:39 +#: community/apps/dataset/views/dataset.py:62 +#: community/apps/dataset/views/dataset.py:82 +#: community/apps/dataset/views/dataset.py:98 +#: community/apps/dataset/views/dataset.py:109 +#: community/apps/dataset/views/dataset.py:123 +#: community/apps/dataset/views/dataset.py:137 +#: community/apps/dataset/views/dataset.py:157 +#: community/apps/dataset/views/dataset.py:172 +#: community/apps/dataset/views/dataset.py:187 +#: community/apps/dataset/views/dataset.py:202 +#: community/apps/dataset/views/dataset.py:217 +#: community/apps/dataset/views/dataset.py:231 +#: community/apps/dataset/views/dataset.py:250 +msgid "Knowledge Base" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:36 +#: apps/xpack/views/dataset_lark_views.py:37 +msgid "Update the lark knowledge base" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:53 +#: apps/xpack/views/dataset_lark_views.py:54 +msgid "Get the list of documents in the lark knowledge base" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:57 +#: apps/xpack/views/dataset_lark_views.py:74 +#: apps/xpack/views/dataset_lark_views.py:90 +#: apps/xpack/views/dataset_lark_views.py:110 +#: community/apps/dataset/views/document.py:34 +#: community/apps/dataset/views/document.py:47 +#: community/apps/dataset/views/document.py:62 +#: community/apps/dataset/views/document.py:81 +#: community/apps/dataset/views/document.py:102 +#: community/apps/dataset/views/document.py:123 +#: community/apps/dataset/views/document.py:137 +#: community/apps/dataset/views/document.py:158 +#: community/apps/dataset/views/document.py:178 +#: community/apps/dataset/views/document.py:193 +#: community/apps/dataset/views/document.py:208 +#: community/apps/dataset/views/document.py:224 +#: community/apps/dataset/views/document.py:244 +#: community/apps/dataset/views/document.py:265 +#: community/apps/dataset/views/document.py:284 +#: community/apps/dataset/views/document.py:306 +#: community/apps/dataset/views/document.py:324 +#: community/apps/dataset/views/document.py:349 +#: community/apps/dataset/views/document.py:364 +#: community/apps/dataset/views/document.py:380 +#: community/apps/dataset/views/document.py:396 +#: community/apps/dataset/views/document.py:413 +#: community/apps/dataset/views/document.py:429 +#: community/apps/dataset/views/document.py:442 +#: community/apps/dataset/views/document.py:467 +msgid "Knowledge Base/Documentation" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:70 +#: apps/xpack/views/dataset_lark_views.py:71 +msgid "Import documents to the lark knowledge base" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:86 +#: apps/xpack/views/dataset_lark_views.py:87 +msgid "Synchronize lark document" +msgstr "" + +#: apps/xpack/views/dataset_lark_views.py:104 +#: apps/xpack/views/dataset_lark_views.py:105 +msgid "Batch sync lark documents" +msgstr "" + +#: apps/xpack/views/display.py:17 apps/xpack/views/display.py:18 +msgid "View appearance settings" +msgstr "" + +#: apps/xpack/views/display.py:19 apps/xpack/views/display.py:33 +msgid "System Settings/Appearance Settings" +msgstr "" + +#: apps/xpack/views/display.py:30 apps/xpack/views/display.py:31 +msgid "Update appearance settings" +msgstr "" + +#: apps/xpack/views/license.py:29 apps/xpack/views/license.py:30 +msgid "Get license information" +msgstr "" + +#: apps/xpack/views/license.py:38 apps/xpack/views/license.py:39 +msgid "Update license information" +msgstr "" + +#: apps/xpack/views/license.py:44 +msgid "upload file" +msgstr "Upload file" + +#: apps/xpack/views/operate_log.py:21 apps/xpack/views/operate_log.py:22 +msgid "Get menu operate log" +msgstr "" + +#: apps/xpack/views/operate_log.py:33 apps/xpack/views/operate_log.py:34 +msgid "Get operate log" +msgstr "" + +#: apps/xpack/views/platform.py:56 apps/xpack/views/platform.py:57 +msgid "Get platform configuration" +msgstr "" + +#: apps/xpack/views/platform.py:59 apps/xpack/views/platform.py:67 +msgid "Application/application access" +msgstr "" + +#: apps/xpack/views/platform.py:63 apps/xpack/views/platform.py:64 +msgid "Update platform configuration" +msgstr "" + +#: apps/xpack/views/platform.py:80 apps/xpack/views/platform.py:81 +msgid "Get platform status" +msgstr "" + +#: apps/xpack/views/platform.py:86 +msgid "Application/Get platform status" +msgstr "" + +#: apps/xpack/views/platform.py:96 apps/xpack/views/platform.py:97 +msgid "Update platform status" +msgstr "" + +#: apps/xpack/views/platform.py:103 +msgid "Application/Update platform status" +msgstr "" + +#: apps/xpack/views/system_api_key_views.py:28 +#: apps/xpack/views/system_api_key_views.py:29 +msgid "Get personal system API_KEY list" +msgstr "" + +#: apps/xpack/views/system_api_key_views.py:30 +#: apps/xpack/views/system_api_key_views.py:39 +#: apps/xpack/views/system_api_key_views.py:53 +#: apps/xpack/views/system_api_key_views.py:62 +msgid "Personal system/API_KEY" +msgstr "" + +#: apps/xpack/views/system_api_key_views.py:37 +#: apps/xpack/views/system_api_key_views.py:38 +msgid "Update personal system API_KEY" +msgstr "" + +#: apps/xpack/views/system_api_key_views.py:51 +#: apps/xpack/views/system_api_key_views.py:52 +msgid "Delete personal system API_KEY" +msgstr "" + +#: apps/xpack/views/system_api_key_views.py:60 +#: apps/xpack/views/system_api_key_views.py:61 +msgid "Add personal system API_KEY" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:27 +msgid "Model type error" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:37 +#: community/apps/common/field/common.py:21 +#: community/apps/common/field/common.py:34 +msgid "Message type error" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:56 +msgid "Conversation list" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:57 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:30 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:19 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:13 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:13 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:19 +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:13 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:13 +#: community/apps/application/serializers/application_serializers.py:72 +#: community/apps/application/serializers/chat_serializers.py:365 +#: community/apps/application/swagger_api/application_api.py:53 +#: community/apps/application/swagger_api/application_api.py:185 +#: community/apps/application/swagger_api/application_api.py:186 +#: community/apps/application/swagger_api/application_api.py:334 +#: community/apps/application/swagger_api/application_api.py:335 +msgid "Model id" +msgstr "Model ID" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:59 +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:30 +msgid "Paragraph List" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:61 +#: community/apps/application/serializers/chat_message_serializers.py:201 +#: community/apps/application/serializers/chat_message_serializers.py:253 +#: community/apps/application/serializers/chat_serializers.py:76 +#: community/apps/application/serializers/chat_serializers.py:240 +#: community/apps/application/serializers/chat_serializers.py:439 +#: community/apps/application/serializers/chat_serializers.py:531 +#: community/apps/application/serializers/chat_serializers.py:587 +#: community/apps/application/serializers/chat_serializers.py:613 +#: community/apps/application/serializers/chat_serializers.py:672 +#: community/apps/application/serializers/chat_serializers.py:712 +#: community/apps/application/swagger_api/chat_api.py:38 +#: community/apps/application/swagger_api/chat_api.py:76 +#: community/apps/application/swagger_api/chat_api.py:171 +#: community/apps/application/swagger_api/chat_api.py:172 +#: community/apps/application/swagger_api/chat_api.py:286 +#: community/apps/application/swagger_api/chat_api.py:355 +#: community/apps/application/swagger_api/chat_api.py:432 +#: community/apps/application/swagger_api/chat_api.py:465 +#: community/apps/application/views/chat_views.py:482 +msgid "Conversation ID" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:63 +#: community/apps/application/flow/step_node/application_node/i_application_node.py:15 +#: community/apps/application/serializers/chat_message_serializers.py:254 +#: community/apps/application/serializers/chat_serializers.py:240 +msgid "User Questions" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:66 +msgid "Post-processor" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:69 +msgid "Completion Question" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:71 +#: community/apps/application/serializers/chat_message_serializers.py:203 +msgid "Streaming Output" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:72 +#: community/apps/application/serializers/chat_message_serializers.py:208 +#: community/apps/application/serializers/chat_message_serializers.py:271 +#: community/apps/application/serializers/chat_serializers.py:103 +msgid "Client id" +msgstr "Client ID" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:73 +#: community/apps/application/serializers/chat_message_serializers.py:209 +#: community/apps/application/serializers/chat_message_serializers.py:272 +msgid "Client Type" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:76 +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:46 +#: community/apps/application/swagger_api/application_api.py:262 +msgid "No reference segment settings" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:78 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:31 +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:48 +#: community/apps/application/serializers/application_serializers.py:70 +#: community/apps/application/serializers/application_serializers.py:511 +#: community/apps/application/serializers/application_serializers.py:582 +#: community/apps/application/serializers/application_serializers.py:627 +#: community/apps/application/serializers/application_serializers.py:697 +#: community/apps/application/serializers/application_serializers.py:718 +#: community/apps/application/serializers/application_serializers.py:792 +#: community/apps/application/serializers/application_serializers.py:1228 +#: community/apps/application/serializers/chat_serializers.py:118 +#: community/apps/application/serializers/chat_serializers.py:285 +#: community/apps/application/serializers/chat_serializers.py:338 +#: community/apps/application/serializers/chat_serializers.py:360 +#: community/apps/function_lib/serializers/function_lib_serializer.py:332 +#: community/apps/function_lib/serializers/function_lib_serializer.py:358 +#: community/apps/function_lib/serializers/function_lib_serializer.py:387 +msgid "User ID" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:81 +#| msgid "Model id" +msgid "Model settings" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:84 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:31 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:29 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:27 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:27 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:19 +#: community/apps/application/serializers/chat_serializers.py:382 +msgid "Model parameter settings" +msgstr "" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:91 +msgid "message type error" +msgstr "Message type error" + +#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:226 +#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:271 +msgid "" +"Sorry, the AI model is not configured. Please go to the application to set " +"up the AI model first." +msgstr "" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:27 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:25 +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:25 +#: community/apps/application/serializers/chat_serializers.py:579 +msgid "question" +msgstr "Question" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:33 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:28 +msgid "History Questions" +msgstr "" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:35 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:25 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:21 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:18 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:24 +#: community/apps/application/swagger_api/application_api.py:55 +#: community/apps/application/swagger_api/application_api.py:56 +#: community/apps/application/swagger_api/application_api.py:188 +#: community/apps/application/swagger_api/application_api.py:189 +#: community/apps/application/swagger_api/application_api.py:337 +#: community/apps/application/swagger_api/application_api.py:338 +msgid "Number of multi-round conversations" +msgstr "" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:38 +msgid "Maximum length of the knowledge base paragraph" +msgstr "" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:40 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:22 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:16 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:22 +#: community/apps/application/serializers/application_serializers.py:108 +#: community/apps/application/serializers/application_serializers.py:138 +#: community/apps/application/swagger_api/application_api.py:286 +#: community/apps/application/swagger_api/application_api.py:287 +msgid "Prompt word" +msgstr "" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:42 +#: community/apps/application/swagger_api/application_api.py:300 +#: community/apps/application/swagger_api/application_api.py:301 +msgid "System prompt words (role)" +msgstr "" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:44 +msgid "Completion problem" +msgstr "" + +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:34 +#: community/apps/application/serializers/application_serializers.py:237 +msgid "Question completion prompt" +msgstr "" + +#: community/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py:20 +#: community/apps/application/serializers/chat_message_serializers.py:99 +#: community/apps/application/swagger_api/application_api.py:210 +#: community/apps/application/swagger_api/application_api.py:355 +#, python-brace-format +msgid "" +"() contains the user's question. Answer the guessed user's question based on " +"the context ({question}) Requirement: Output a complete question and put it " +"in the tag" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:28 +msgid "System completes question text" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:31 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:39 +msgid "Dataset id list" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:34 +msgid "List of document ids to exclude" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:37 +msgid "List of exclusion vector ids" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:40 +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:21 +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:24 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:24 +#: community/apps/application/serializers/application_serializers.py:121 +#: community/apps/application/serializers/chat_serializers.py:243 +#: community/apps/application/swagger_api/application_api.py:249 +#: community/apps/application/swagger_api/application_api.py:250 +msgid "Reference segment number" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:43 +#: community/apps/application/swagger_api/application_api.py:252 +#: community/apps/application/swagger_api/application_api.py:253 +msgid "Similarity" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:46 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:30 +#: community/apps/application/serializers/application_serializers.py:129 +#: community/apps/application/serializers/application_serializers.py:590 +#: community/apps/dataset/serializers/dataset_serializers.py:576 +msgid "The type only supports embedding|keywords|blend" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:47 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:31 +#: community/apps/application/serializers/application_serializers.py:130 +#: community/apps/application/serializers/application_serializers.py:591 +#: community/apps/application/swagger_api/application_api.py:259 +msgid "Retrieval Mode" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:31 +#: community/apps/application/serializers/application_serializers.py:84 +#: community/apps/application/serializers/application_serializers.py:1026 +#: community/apps/application/serializers/application_serializers.py:1036 +#: community/apps/application/serializers/application_serializers.py:1046 +#: community/apps/dataset/serializers/dataset_serializers.py:801 +#: community/apps/dataset/serializers/document_serializers.py:746 +#: community/apps/setting/models_provider/tools.py:23 +msgid "Model does not exist" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:33 +#, python-brace-format +msgid "No permission to use this model {model_name}" +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:41 +msgid "" +"The vector model of the associated knowledge base is inconsistent and the " +"segmentation cannot be recalled." +msgstr "" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:43 +msgid "The knowledge base setting is wrong, please reset the knowledge base" +msgstr "" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:21 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:15 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:21 +msgid "Role Setting" +msgstr "" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:28 +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:24 +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:29 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:47 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:26 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:22 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:26 +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:15 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:15 +msgid "Whether to return content" +msgstr "" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:35 +msgid "Context Type" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:16 +msgid "API Input Fields" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:18 +msgid "User Input Fields" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:19 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:24 +#: community/apps/application/serializers/application_serializers.py:698 +#: community/apps/application/serializers/chat_message_serializers.py:274 +#: community/apps/function_lib/serializers/function_lib_serializer.py:359 +msgid "picture" +msgstr "Picture" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:20 +#: community/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py:13 +#: community/apps/application/serializers/chat_message_serializers.py:275 +msgid "document" +msgstr "Document" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:21 +#: community/apps/application/serializers/chat_message_serializers.py:276 +msgid "Audio" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:23 +#: community/apps/application/serializers/chat_message_serializers.py:278 +msgid "Child Nodes" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:24 +#: community/apps/application/flow/step_node/form_node/i_form_node.py:21 +msgid "Form Data" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:58 +msgid "" +"Parameter value error: The uploaded document lacks file_id, and the document " +"upload fails" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:67 +msgid "" +"Parameter value error: The uploaded image lacks file_id, and the image " +"upload fails" +msgstr "" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:77 +msgid "" +"Parameter value error: The uploaded audio lacks file_id, and the audio " +"upload fails." +msgstr "" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:19 +#: community/apps/application/serializers/chat_serializers.py:124 +msgid "Comparator" +msgstr "" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:20 +#: community/apps/application/swagger_api/application_api.py:271 +msgid "value" +msgstr "Value" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:21 +msgid "Fields" +msgstr "" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:25 +msgid "Branch id" +msgstr "Branch ID" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:26 +msgid "Branch Type" +msgstr "" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:27 +msgid "Condition or|and" +msgstr "" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:20 +msgid "Response Type" +msgstr "" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:21 +#: community/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py:14 +msgid "Reference Field" +msgstr "" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:23 +msgid "Direct answer content" +msgstr "" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:30 +msgid "Reference field cannot be empty" +msgstr "" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:32 +msgid "Reference field error" +msgstr "" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:35 +msgid "Content cannot be empty" +msgstr "" + +#: community/apps/application/flow/step_node/form_node/i_form_node.py:19 +msgid "Form Configuration" +msgstr "" + +#: community/apps/application/flow/step_node/form_node/i_form_node.py:20 +msgid "Form output content" +msgstr "" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:22 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:24 +msgid "Variable Name" +msgstr "" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:23 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:34 +msgid "Variable Value" +msgstr "" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:27 +msgid "Library ID" +msgstr "" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:35 +msgid "The function has been deleted" +msgstr "" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:25 +msgid "Is this field required" +msgstr "" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:28 +msgid "The field only supports string|int|dict|array|float" +msgstr "" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:30 +#: community/apps/function_lib/serializers/function_lib_serializer.py:76 +#: community/apps/function_lib/swagger_api/function_lib_api.py:98 +#: community/apps/function_lib/swagger_api/function_lib_api.py:144 +#: community/apps/function_lib/swagger_api/function_lib_api.py:190 +msgid "source" +msgstr "Source" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:32 +#: community/apps/function_lib/serializers/function_lib_serializer.py:78 +msgid "The field only supports custom|reference" +msgstr "" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:40 +#, python-brace-format +msgid "{field}, this field is required." +msgstr "" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:46 +#: community/apps/function_lib/views/function_lib_views.py:131 +#: community/apps/function_lib/views/function_lib_views.py:145 +msgid "function" +msgstr "Function" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:15 +msgid "Prompt word (positive)" +msgstr "" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:17 +msgid "Prompt word (negative)" +msgstr "" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:24 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:20 +msgid "Conversation storage type" +msgstr "" + +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:26 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:33 +msgid "Maximum number of words in a quoted segment" +msgstr "" + +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:27 +#: community/apps/common/swagger_api/common_api.py:36 +#: community/apps/dataset/serializers/dataset_serializers.py:573 +msgid "similarity" +msgstr "Similarity" + +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:17 +msgid "The audio file cannot be empty" +msgstr "" + +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:31 +msgid "" +"Parameter value error: The uploaded audio lacks file_id, and the audio " +"upload fails" +msgstr "" + +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:17 +msgid "Text content" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:107 +#, python-brace-format +msgid "The branch {branch} of the {node} node needs to be connected" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:113 +#, python-brace-format +msgid "{node} Nodes cannot be considered as end nodes" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:123 +msgid "The next node that does not exist" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:137 +msgid "The starting node is required" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:139 +msgid "There can only be one starting node" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:147 +#, python-brace-format +msgid "The node {node} model does not exist" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:157 +#, python-brace-format +msgid "Node {node} is unavailable" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:163 +#, python-brace-format +msgid "The library ID of node {node} cannot be empty" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:166 +#, python-brace-format +msgid "The function library for node {node} is not available" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:172 +msgid "Basic information node is required" +msgstr "" + +#: community/apps/application/flow/workflow_manage.py:174 +msgid "There can only be one basic information node" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:75 +#: community/apps/application/serializers/chat_serializers.py:618 +#: community/apps/application/serializers/chat_serializers.py:677 +#: community/apps/application/serializers/chat_serializers.py:709 +#: community/apps/application/swagger_api/chat_api.py:365 +#: community/apps/application/swagger_api/chat_api.py:393 +#: community/apps/application/swagger_api/chat_api.py:394 +#: community/apps/application/swagger_api/chat_api.py:415 +#: community/apps/application/swagger_api/chat_api.py:494 +#: community/apps/application/swagger_api/chat_api.py:495 +msgid "Knowledge base id" +msgstr "Knowledge base ID" + +#: community/apps/application/serializers/application_serializers.py:76 +msgid "Knowledge Base List" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:90 +msgid "The knowledge base id does not exist" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:107 +msgid "No reference status" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:123 +msgid "Acquaintance" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:126 +#: community/apps/application/swagger_api/application_api.py:256 +#: community/apps/application/swagger_api/application_api.py:257 +msgid "Maximum number of quoted characters" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:133 +msgid "Segment settings not referenced" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:140 +msgid "Role prompts" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:142 +#: community/apps/application/swagger_api/application_api.py:303 +#: community/apps/application/swagger_api/application_api.py:305 +msgid "No citation segmentation prompt" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:144 +msgid "Thinking process switch" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:148 +msgid "The thinking process begins to mark" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:151 +msgid "End of thinking process marker" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:156 +#: community/apps/application/serializers/application_serializers.py:482 +#: community/apps/application/serializers/application_serializers.py:623 +#: community/apps/application/swagger_api/application_api.py:49 +#: community/apps/application/swagger_api/application_api.py:50 +#: community/apps/application/swagger_api/application_api.py:181 +#: community/apps/application/swagger_api/application_api.py:182 +#: community/apps/application/swagger_api/application_api.py:330 +#: community/apps/application/swagger_api/application_api.py:331 +#: community/apps/application/swagger_api/application_api.py:377 +msgid "Application Name" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:159 +#: community/apps/application/serializers/application_serializers.py:484 +#: community/apps/application/serializers/application_serializers.py:625 +#: community/apps/application/swagger_api/application_api.py:51 +#: community/apps/application/swagger_api/application_api.py:52 +#: community/apps/application/swagger_api/application_api.py:183 +#: community/apps/application/swagger_api/application_api.py:184 +#: community/apps/application/swagger_api/application_api.py:332 +#: community/apps/application/swagger_api/application_api.py:333 +#: community/apps/application/swagger_api/application_api.py:382 +msgid "Application Description" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:160 +msgid "Workflow Objects" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:162 +#: community/apps/application/serializers/application_serializers.py:225 +#: community/apps/application/serializers/application_serializers.py:492 +#: community/apps/application/swagger_api/application_api.py:57 +#: community/apps/application/swagger_api/application_api.py:58 +#: community/apps/application/swagger_api/application_api.py:190 +#: community/apps/application/swagger_api/application_api.py:191 +#: community/apps/application/swagger_api/application_api.py:339 +#: community/apps/application/swagger_api/application_api.py:340 +msgid "Opening remarks" +msgstr "Opening Remarks" + +#: community/apps/application/serializers/application_serializers.py:214 +#: community/apps/dataset/serializers/dataset_serializers.py:105 +#: community/apps/dataset/serializers/dataset_serializers.py:106 +msgid "application name" +msgstr "Application Name" + +#: community/apps/application/serializers/application_serializers.py:217 +msgid "application describe" +msgstr "Application Describe" + +#: community/apps/application/serializers/application_serializers.py:219 +#: community/apps/application/serializers/application_serializers.py:486 +msgid "Model" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:223 +#: community/apps/application/serializers/application_serializers.py:490 +msgid "Historical chat records" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:228 +#: community/apps/application/serializers/application_serializers.py:494 +msgid "Related Knowledge Base" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:235 +#: community/apps/application/serializers/application_serializers.py:504 +#: community/apps/application/serializers/chat_serializers.py:379 +msgid "Question completion" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:239 +#: community/apps/application/swagger_api/application_api.py:203 +#: community/apps/application/swagger_api/application_api.py:349 +msgid "Application Type" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:243 +msgid "Application type only supports SIMPLE|WORK_FLOW" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:247 +#: community/apps/application/serializers/application_serializers.py:508 +msgid "Model parameters" +msgstr "Model Parameters" + +#: community/apps/application/serializers/application_serializers.py:255 +msgid "Host" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:256 +msgid "protocol" +msgstr "Protocol" + +#: community/apps/application/serializers/application_serializers.py:339 +#: community/apps/application/swagger_api/application_api.py:153 +#: community/apps/application/swagger_api/application_api.py:154 +msgid "Reset Token" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:340 +msgid "Is it enabled" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:343 +#: community/apps/application/swagger_api/application_api.py:158 +#: community/apps/application/swagger_api/application_api.py:159 +msgid "Number of visits" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:345 +#: community/apps/application/swagger_api/application_api.py:160 +#: community/apps/application/swagger_api/application_api.py:161 +msgid "Whether to enable whitelist" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:348 +#: community/apps/application/serializers/application_serializers.py:349 +#: community/apps/application/swagger_api/application_api.py:163 +#: community/apps/application/swagger_api/application_api.py:164 +msgid "Whitelist" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:352 +#: community/apps/application/swagger_api/application_api.py:166 +#: community/apps/application/swagger_api/application_api.py:167 +msgid "Whether to display knowledge sources" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:423 +msgid "access_token" +msgstr "Access Token" + +#: community/apps/application/serializers/application_serializers.py:425 +msgid "Certification Information" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:462 +msgid "Invalid access_token" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:473 +msgid "Wrong password" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:498 +msgid "Dataset settings" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:501 +msgid "Model setup" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:505 +msgid "Icon" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:515 +#: community/apps/application/serializers/application_serializers.py:722 +#: community/apps/setting/serializers/valid_serializers.py:29 +msgid "" +"The community version supports up to 5 applications. If you need more " +"applications, please contact us (https://fit2cloud.com/)." +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:583 +msgid "Query text" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:585 +msgid "topN" +msgstr "TopN" + +#: community/apps/application/serializers/application_serializers.py:587 +msgid "Relevance" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:596 +#: community/apps/application/serializers/application_serializers.py:705 +#: community/apps/application/serializers/application_serializers.py:797 +msgid "Application id does not exist" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:628 +msgid "Select User ID" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:717 +#: community/apps/dataset/serializers/document_serializers.py:164 +#: community/apps/dataset/serializers/document_serializers.py:213 +#: community/apps/dataset/serializers/document_serializers.py:220 +#: community/apps/dataset/serializers/file_serializers.py:59 +#: community/apps/dataset/views/file.py:35 +#: community/apps/dataset/views/file.py:44 +#: community/apps/function_lib/serializers/function_lib_serializer.py:331 +msgid "file" +msgstr "File" + +#: community/apps/application/serializers/application_serializers.py:732 +#: community/apps/common/handle/impl/qa/zip_parse_qa_handle.py:62 +#: community/apps/common/handle/impl/zip_split_handle.py:56 +#: community/apps/dataset/serializers/document_serializers.py:874 +#: community/apps/dataset/serializers/document_serializers.py:882 +#: community/apps/function_lib/serializers/function_lib_serializer.py:343 +msgid "Unsupported file format" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:872 +msgid "work_flow is a required field" +msgstr "Work_flow is a required field" + +#: community/apps/application/serializers/application_serializers.py:934 +#: community/apps/application/serializers/application_serializers.py:1076 +#, python-brace-format +msgid "Unknown knowledge base id {dataset_id}, unable to associate" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:954 +msgid "Illegal User" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1028 +#: community/apps/application/serializers/application_serializers.py:1038 +#: community/apps/application/serializers/application_serializers.py:1048 +#, python-brace-format +msgid "No permission to use this model:{model_name}" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1259 +#: community/apps/application/swagger_api/chat_api.py:498 +#: community/apps/application/swagger_api/chat_api.py:499 +msgid "Availability" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1263 +#: community/apps/application/swagger_api/application_api.py:129 +#: community/apps/application/swagger_api/application_api.py:130 +msgid "Is cross-domain allowed" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1268 +msgid "Cross-domain address" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1269 +#: community/apps/application/swagger_api/application_api.py:131 +msgid "Cross-domain list" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1274 +msgid "ApiKeyid" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1295 +msgid "APIKey does not exist" +msgstr "" + +#: community/apps/application/serializers/application_version_serializers.py:30 +#: community/apps/application/swagger_api/application_version_api.py:24 +#: community/apps/application/swagger_api/application_version_api.py:25 +#: community/apps/application/swagger_api/application_version_api.py:47 +#: community/apps/application/swagger_api/application_version_api.py:70 +#: community/apps/application/swagger_api/application_version_api.py:71 +msgid "Version Name" +msgstr "" + +#: community/apps/application/serializers/application_version_serializers.py:37 +#: community/apps/application/serializers/chat_serializers.py:115 +#: community/apps/application/serializers/chat_serializers.py:240 +msgid "summary" +msgstr "Summary" + +#: community/apps/application/serializers/application_version_serializers.py:61 +msgid "Workflow version id" +msgstr "" + +#: community/apps/application/serializers/application_version_serializers.py:71 +#: community/apps/application/serializers/application_version_serializers.py:86 +msgid "Workflow version does not exist" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:195 +#: community/apps/dataset/serializers/paragraph_serializers.py:47 +#: community/apps/dataset/serializers/paragraph_serializers.py:180 +#: community/apps/dataset/serializers/paragraph_serializers.py:692 +#: community/apps/dataset/serializers/paragraph_serializers.py:705 +#: community/apps/dataset/serializers/paragraph_serializers.py:706 +#: community/apps/dataset/serializers/problem_serializers.py:41 +#: community/apps/dataset/serializers/problem_serializers.py:52 +#: community/apps/dataset/serializers/problem_serializers.py:113 +#: community/apps/dataset/swagger_api/problem_api.py:24 +#: community/apps/dataset/swagger_api/problem_api.py:25 +#: community/apps/dataset/swagger_api/problem_api.py:109 +#: community/apps/dataset/swagger_api/problem_api.py:110 +#: community/apps/dataset/swagger_api/problem_api.py:126 +#: community/apps/dataset/swagger_api/problem_api.py:127 +#: community/apps/dataset/swagger_api/problem_api.py:154 +#: community/apps/dataset/swagger_api/problem_api.py:169 +msgid "content" +msgstr "Content" + +#: community/apps/application/serializers/chat_message_serializers.py:196 +#: community/apps/setting/serializers/team_serializers.py:45 +#: community/apps/users/serializers/user_serializers.py:472 +#: community/apps/users/serializers/user_serializers.py:495 +#: community/apps/users/serializers/user_serializers.py:586 +msgid "Role" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:202 +msgid "Regenerate" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:256 +msgid "Is the answer in streaming mode" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:257 +msgid "Do you want to reply again" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:259 +#: community/apps/application/serializers/chat_serializers.py:442 +#: community/apps/application/serializers/chat_serializers.py:534 +#: community/apps/application/serializers/chat_serializers.py:590 +#: community/apps/application/serializers/chat_serializers.py:616 +#: community/apps/application/serializers/chat_serializers.py:675 +#: community/apps/application/swagger_api/chat_api.py:148 +#: community/apps/application/swagger_api/chat_api.py:149 +#: community/apps/application/swagger_api/chat_api.py:360 +#: community/apps/application/swagger_api/chat_api.py:437 +#: community/apps/application/swagger_api/chat_api.py:470 +msgid "Conversation record id" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:262 +msgid "Node id" +msgstr "Node ID" + +#: community/apps/application/serializers/chat_message_serializers.py:265 +#: community/apps/application/swagger_api/chat_api.py:142 +#: community/apps/application/swagger_api/chat_api.py:143 +msgid "Runtime node id" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:268 +msgid "Node parameters" +msgstr "Node Parameters" + +#: community/apps/application/serializers/chat_message_serializers.py:273 +msgid "Global variables" +msgstr "Global Variables" + +#: community/apps/application/serializers/chat_message_serializers.py:286 +#: community/apps/application/serializers/chat_message_serializers.py:421 +#: community/apps/application/serializers/chat_serializers.py:469 +msgid "Conversation does not exist" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:303 +msgid "The number of visits exceeds today's visits" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:314 +msgid "The current model is not available" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:316 +msgid "The model is downloading, please try again later" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:361 +#: community/apps/application/serializers/chat_serializers.py:599 +#: community/apps/application/serializers/chat_serializers.py:645 +#: community/apps/application/serializers/chat_serializers.py:694 +msgid "Conversation record does not exist" +msgstr "" + +#: community/apps/application/serializers/chat_message_serializers.py:454 +#: community/apps/application/serializers/chat_serializers.py:314 +msgid "The application has not been published. Please use it after publishing." +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:55 +msgid "node" +msgstr "Node" + +#: community/apps/application/serializers/chat_serializers.py:56 +msgid "Connection" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:71 +#: community/apps/application/swagger_api/chat_api.py:48 +#: community/apps/application/swagger_api/chat_api.py:49 +#: community/apps/application/swagger_api/chat_api.py:169 +#: community/apps/application/swagger_api/chat_api.py:170 +#: community/apps/application/swagger_api/chat_api.py:256 +msgid "abstract" +msgstr "Abstract" + +#: community/apps/application/serializers/chat_serializers.py:121 +#: community/apps/application/swagger_api/chat_api.py:258 +msgid "Minimum number of likes" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:123 +#: community/apps/application/swagger_api/chat_api.py:260 +msgid "Minimum number of clicks" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:126 +msgid "Only supports and|or" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:241 +msgid "Problem after optimization" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:242 +msgid "answer" +msgstr "Answer" + +#: community/apps/application/serializers/chat_serializers.py:242 +msgid "User feedback" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:244 +msgid "Section title + content" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:245 +#: community/apps/application/views/chat_views.py:385 +#: community/apps/application/views/chat_views.py:386 +msgid "Annotation" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:245 +msgid "Consuming tokens" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:245 +msgid "Time consumed (s)" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:246 +msgid "Question Time" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:337 +msgid "Workflow" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:369 +msgid "Multi-round conversation" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:372 +msgid "Related Datasets" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:449 +msgid "Application authentication information does not exist" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:451 +msgid "Displaying knowledge sources is not enabled" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:537 +msgid "Bidding Status" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:546 +msgid "" +"Voting on the current session minutes, please do not send repeated requests" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:551 +msgid "Non-existent conversation chat_record_id" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:568 +msgid "Already voted, please cancel first and then vote again" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:575 +#: community/apps/application/swagger_api/chat_api.py:379 +#: community/apps/application/swagger_api/chat_api.py:380 +#: community/apps/dataset/swagger_api/problem_api.py:128 +#: community/apps/dataset/swagger_api/problem_api.py:129 +msgid "Section title" +msgstr "Section Title" + +#: community/apps/application/serializers/chat_serializers.py:576 +#: community/apps/application/swagger_api/chat_api.py:381 +#: community/apps/application/swagger_api/chat_api.py:382 +#: community/apps/application/swagger_api/chat_api.py:483 +#: community/apps/application/swagger_api/chat_api.py:484 +#: community/apps/common/swagger_api/common_api.py:57 +#: community/apps/common/swagger_api/common_api.py:58 +msgid "Paragraph content" +msgstr "Paragraph Content" + +#: community/apps/application/serializers/chat_serializers.py:620 +#: community/apps/application/serializers/chat_serializers.py:679 +#: community/apps/application/serializers/chat_serializers.py:710 +#: community/apps/application/swagger_api/chat_api.py:370 +#: community/apps/application/swagger_api/chat_api.py:395 +#: community/apps/application/swagger_api/chat_api.py:396 +#: community/apps/application/swagger_api/chat_api.py:496 +#: community/apps/application/swagger_api/chat_api.py:497 +msgid "Document id" +msgstr "Document ID" + +#: community/apps/application/serializers/chat_serializers.py:626 +#: community/apps/application/serializers/chat_serializers.py:717 +#: community/apps/dataset/serializers/paragraph_serializers.py:576 +msgid "The document id is incorrect" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:681 +#: community/apps/application/swagger_api/chat_api.py:310 +#: community/apps/application/swagger_api/chat_api.py:311 +msgid "Paragraph id" +msgstr "Paragraph ID" + +#: community/apps/application/serializers/chat_serializers.py:697 +#, python-brace-format +msgid "" +"The paragraph id is wrong. The current conversation record does not exist. " +"[{paragraph_id}] paragraph id" +msgstr "" + +#: community/apps/application/serializers/chat_serializers.py:736 +msgid "Conversation records that do not exist" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:24 +#: community/apps/application/views/chat_views.py:470 +#: community/apps/application/views/chat_views.py:471 +msgid "Upload files" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:35 +#: community/apps/application/swagger_api/application_api.py:36 +msgid "Application authentication token" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:48 +#: community/apps/application/swagger_api/application_version_api.py:22 +#: community/apps/application/swagger_api/application_version_api.py:23 +msgid "Primary key id" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:60 +msgid "Example List" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:61 +#: community/apps/application/swagger_api/application_api.py:62 +msgid "Affiliation user" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:64 +msgid "Is publish" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:66 +#: community/apps/application/swagger_api/application_api.py:67 +#: community/apps/application/swagger_api/application_version_api.py:28 +#: community/apps/application/swagger_api/application_version_api.py:29 +#: community/apps/application/swagger_api/chat_api.py:185 +#: community/apps/application/swagger_api/chat_api.py:186 +#: community/apps/application/swagger_api/chat_api.py:335 +#: community/apps/application/swagger_api/chat_api.py:336 +#: community/apps/application/swagger_api/chat_api.py:503 +#: community/apps/application/swagger_api/chat_api.py:504 +msgid "Creation time" +msgstr "Create Time" + +#: community/apps/application/swagger_api/application_api.py:69 +#: community/apps/application/swagger_api/application_api.py:70 +#: community/apps/application/swagger_api/application_version_api.py:30 +#: community/apps/application/swagger_api/application_version_api.py:31 +#: community/apps/application/swagger_api/chat_api.py:332 +#: community/apps/application/swagger_api/chat_api.py:333 +#: community/apps/application/swagger_api/chat_api.py:500 +#: community/apps/application/swagger_api/chat_api.py:501 +msgid "Modification time" +msgstr "Update Time" + +#: community/apps/application/swagger_api/application_api.py:74 +#: community/apps/application/swagger_api/application_api.py:194 +#: community/apps/application/swagger_api/application_api.py:195 +#: community/apps/application/swagger_api/application_api.py:343 +#: community/apps/application/swagger_api/application_api.py:344 +#: community/apps/application/swagger_api/chat_api.py:229 +#: community/apps/application/swagger_api/chat_api.py:230 +msgid "List of associated knowledge base IDs" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:76 +msgid "List of associated knowledge base IDs (returned when querying details)" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:91 +msgid "Model Type" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:117 +msgid "Application api_key id" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:126 +#: community/apps/application/swagger_api/application_api.py:127 +#: community/apps/application/swagger_api/application_api.py:156 +#: community/apps/application/swagger_api/application_api.py:157 +msgid "Is activation" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:198 +#: community/apps/application/swagger_api/application_api.py:347 +#: community/apps/application/swagger_api/application_api.py:348 +msgid "Problem Optimization" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:199 +msgid "Whether to enable problem optimization" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:204 +#: community/apps/application/swagger_api/application_api.py:350 +msgid "Application Type SIMPLE | WORK_FLOW" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:207 +#: community/apps/application/swagger_api/application_api.py:208 +#: community/apps/application/swagger_api/application_api.py:352 +#: community/apps/application/swagger_api/application_api.py:353 +msgid "Question optimization tips" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:211 +#: community/apps/application/swagger_api/application_api.py:212 +#: community/apps/application/swagger_api/application_api.py:356 +#: community/apps/application/swagger_api/application_api.py:357 +msgid "Text-to-speech model ID" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:213 +#: community/apps/application/swagger_api/application_api.py:214 +#: community/apps/application/swagger_api/application_api.py:358 +#: community/apps/application/swagger_api/application_api.py:359 +msgid "Speech-to-text model id" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:215 +#: community/apps/application/swagger_api/application_api.py:216 +#: community/apps/application/swagger_api/application_api.py:360 +#: community/apps/application/swagger_api/application_api.py:361 +msgid "Is speech-to-text enabled" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:217 +#: community/apps/application/swagger_api/application_api.py:218 +#: community/apps/application/swagger_api/application_api.py:362 +#: community/apps/application/swagger_api/application_api.py:363 +msgid "Is text-to-speech enabled" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:219 +#: community/apps/application/swagger_api/application_api.py:220 +#: community/apps/application/swagger_api/application_api.py:364 +#: community/apps/application/swagger_api/application_api.py:365 +msgid "Text-to-speech type" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:233 +msgid "Node List" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:236 +msgid "Connection List" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:266 +msgid "state" +msgstr "State" + +#: community/apps/application/swagger_api/application_api.py:268 +msgid "ai_questioning|designated_answer" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:273 +msgid "" +"ai_questioning: is the title, designated_answer: is the designated answer " +"content" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:403 +#: community/apps/function_lib/swagger_api/function_lib_api.py:216 +msgid "Upload image files" +msgstr "" + +#: community/apps/application/swagger_api/application_api.py:434 +#: community/apps/application/swagger_api/application_api.py:435 +msgid "Text" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:41 +#: community/apps/application/swagger_api/application_statistics_api.py:42 +#: community/apps/application/swagger_api/chat_api.py:490 +#: community/apps/application/swagger_api/chat_api.py:491 +msgid "Number of Likes" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:44 +#: community/apps/application/swagger_api/chat_api.py:492 +#: community/apps/application/swagger_api/chat_api.py:493 +msgid "Number of thumbs-downs" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:45 +#: community/apps/application/swagger_api/application_statistics_api.py:46 +msgid "Number of tokens used" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:47 +#: community/apps/application/swagger_api/application_statistics_api.py:48 +msgid "Number of conversations" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:49 +#: community/apps/application/swagger_api/application_statistics_api.py:50 +msgid "Number of customers" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:51 +#: community/apps/application/swagger_api/application_statistics_api.py:52 +msgid "Number of new customers" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:54 +#: community/apps/application/swagger_api/application_statistics_api.py:69 +#: community/apps/application/swagger_api/application_statistics_api.py:70 +msgid "time" +msgstr "Time" + +#: community/apps/application/swagger_api/application_statistics_api.py:55 +msgid "Time, this field is only available when querying trends" +msgstr "" + +#: community/apps/application/swagger_api/application_statistics_api.py:66 +#: community/apps/application/swagger_api/application_statistics_api.py:83 +msgid "New quantity" +msgstr "New Quantity" + +#: community/apps/application/swagger_api/application_statistics_api.py:81 +#: community/apps/application/swagger_api/application_statistics_api.py:82 +msgid "Today's new quantity" +msgstr "" + +#: community/apps/application/swagger_api/application_version_api.py:26 +#: community/apps/application/swagger_api/application_version_api.py:27 +msgid "Workflow data" +msgstr "Workflow Data" + +#: community/apps/application/swagger_api/application_version_api.py:61 +msgid "Application version id" +msgstr "Application Version ID" + +#: community/apps/application/swagger_api/chat_api.py:61 +#: community/apps/application/swagger_api/chat_api.py:62 +#: community/apps/application/swagger_api/chat_api.py:92 +#: community/apps/dataset/serializers/problem_serializers.py:91 +msgid "problem" +msgstr "Problem" + +#: community/apps/application/swagger_api/chat_api.py:68 +msgid "Question content" +msgstr "Question Content" + +#: community/apps/application/swagger_api/chat_api.py:72 +msgid "role" +msgstr "Role" + +#: community/apps/application/swagger_api/chat_api.py:77 +#: community/apps/application/swagger_api/chat_api.py:93 +msgid "regenerate" +msgstr "Regenerate" + +#: community/apps/application/swagger_api/chat_api.py:79 +msgid "Stream Output" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:94 +msgid "Is it streaming output" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:96 +#: community/apps/application/swagger_api/chat_api.py:97 +#| msgid "Workflow data" +msgid "Form data" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:101 +#: community/apps/application/swagger_api/chat_api.py:102 +#| msgid "state list" +msgid "Image list" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:107 +msgid "Image name" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:109 +msgid "Image URL" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:115 +#: community/apps/application/swagger_api/chat_api.py:116 +#: community/apps/dataset/views/document.py:133 +#: community/apps/dataset/views/document.py:134 +msgid "Document list" +msgstr "Document List" + +#: community/apps/application/swagger_api/chat_api.py:122 +msgid "Document name" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:124 +msgid "Document URL" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:129 +#: community/apps/application/swagger_api/chat_api.py:130 +#| msgid "id list" +msgid "Audio list" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:135 +msgid "Audio name" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:137 +msgid "Audio URL" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:145 +#: community/apps/application/swagger_api/chat_api.py:146 +msgid "Node data" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:151 +#: community/apps/application/swagger_api/chat_api.py:152 +msgid "Child node" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:173 +#: community/apps/application/swagger_api/chat_api.py:174 +msgid "Number of dialogue questions" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:176 +#: community/apps/application/swagger_api/chat_api.py:177 +msgid "Number of tags" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:178 +#: community/apps/application/swagger_api/chat_api.py:179 +#: community/apps/common/swagger_api/common_api.py:64 +#: community/apps/common/swagger_api/common_api.py:65 +#: community/apps/dataset/serializers/paragraph_serializers.py:711 +#: community/apps/dataset/serializers/paragraph_serializers.py:712 +msgid "Number of likes" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:180 +#: community/apps/application/swagger_api/chat_api.py:181 +msgid "Number of clicks" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:182 +#: community/apps/application/swagger_api/chat_api.py:183 +msgid "Change time" +msgstr "Update time" + +#: community/apps/application/swagger_api/chat_api.py:224 +msgid "Application ID, pass when modifying, do not pass when creating" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:225 +#: community/apps/application/swagger_api/chat_api.py:226 +msgid "Model ID" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:232 +#: community/apps/application/swagger_api/chat_api.py:234 +msgid "Do you want to initiate multiple sessions" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:237 +msgid "Problem optimization" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:238 +msgid "Do you want to enable problem optimization" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:254 +msgid "Historical days" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:262 +msgid "or|and comparator" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:266 +#| msgid "Start time" +msgid "start time" +msgstr "Start Time" + +#: community/apps/application/swagger_api/chat_api.py:291 +msgid "Is it ascending order" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:304 +msgid "Session log id" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:305 +msgid "Conversation log id" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:306 +#: community/apps/application/swagger_api/chat_api.py:307 +#: community/apps/application/swagger_api/chat_api.py:446 +msgid "Voting Status" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:308 +#: community/apps/application/swagger_api/chat_api.py:309 +msgid "Dataset id" +msgstr "Knowledge Base ID" + +#: community/apps/application/swagger_api/chat_api.py:312 +#: community/apps/application/swagger_api/chat_api.py:313 +msgid "Resource ID" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:314 +#: community/apps/application/swagger_api/chat_api.py:315 +msgid "Resource Type" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:317 +#: community/apps/application/swagger_api/chat_api.py:318 +msgid "Number of tokens consumed by the question" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:320 +#: community/apps/application/swagger_api/chat_api.py:321 +msgid "The number of tokens consumed by the answer" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:324 +#: community/apps/application/swagger_api/chat_api.py:325 +msgid "Improved annotation list" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:328 +msgid "Corresponding session Corresponding subscript" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:329 +msgid "Corresponding session id corresponding subscript" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:397 +#: community/apps/application/swagger_api/chat_api.py:398 +msgid "Conversation id list" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:447 +msgid "-1: Cancel vote | 0: Agree | 1: Oppose" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:485 +#: community/apps/application/swagger_api/chat_api.py:486 +#: community/apps/common/swagger_api/common_api.py:59 +#: community/apps/common/swagger_api/common_api.py:60 +#: community/apps/dataset/serializers/paragraph_serializers.py:687 +#: community/apps/dataset/serializers/paragraph_serializers.py:707 +#: community/apps/dataset/serializers/paragraph_serializers.py:708 +msgid "title" +msgstr "Title" + +#: community/apps/application/swagger_api/chat_api.py:486 +#: community/apps/common/swagger_api/common_api.py:60 +msgid "Description of xxx" +msgstr "" + +#: community/apps/application/swagger_api/chat_api.py:487 +#: community/apps/application/swagger_api/chat_api.py:488 +#: community/apps/common/swagger_api/common_api.py:61 +#: community/apps/common/swagger_api/common_api.py:62 +msgid "Number of hits" +msgstr "" + +#: community/apps/application/views/application_version_views.py:28 +#: community/apps/application/views/application_version_views.py:29 +#: community/apps/application/views/application_views.py:489 +#: community/apps/application/views/application_views.py:490 +msgid "Get the application list" +msgstr "" + +#: community/apps/application/views/application_version_views.py:32 +#: community/apps/application/views/application_version_views.py:50 +#: community/apps/application/views/application_version_views.py:68 +#: community/apps/application/views/application_version_views.py:83 +msgid "Application/Version" +msgstr "" + +#: community/apps/application/views/application_version_views.py:45 +#: community/apps/application/views/application_version_views.py:46 +msgid "Get the list of application versions by page" +msgstr "" + +#: community/apps/application/views/application_version_views.py:64 +#: community/apps/application/views/application_version_views.py:65 +msgid "Get application version details" +msgstr "" + +#: community/apps/application/views/application_version_views.py:78 +#: community/apps/application/views/application_version_views.py:79 +msgid "Modify application version information" +msgstr "" + +#: community/apps/application/views/application_views.py:42 +#: community/apps/application/views/application_views.py:43 +msgid "User Statistics" +msgstr "" + +#: community/apps/application/views/application_views.py:44 +#: community/apps/application/views/application_views.py:70 +#: community/apps/application/views/application_views.py:95 +#: community/apps/application/views/application_views.py:121 +msgid "Application/Statistics" +msgstr "" + +#: community/apps/application/views/application_views.py:68 +#: community/apps/application/views/application_views.py:69 +msgid "User demographic trends" +msgstr "" + +#: community/apps/application/views/application_views.py:93 +#: community/apps/application/views/application_views.py:94 +msgid "Conversation statistics" +msgstr "" + +#: community/apps/application/views/application_views.py:119 +#: community/apps/application/views/application_views.py:120 +msgid "Dialogue-related statistical trends" +msgstr "" + +#: community/apps/application/views/application_views.py:150 +#: community/apps/application/views/application_views.py:151 +msgid "Modify application icon" +msgstr "" + +#: community/apps/application/views/application_views.py:152 +#: community/apps/application/views/application_views.py:175 +#: community/apps/application/views/application_views.py:189 +#: community/apps/application/views/application_views.py:202 +#: community/apps/application/views/application_views.py:216 +#: community/apps/application/views/application_views.py:236 +#: community/apps/application/views/application_views.py:255 +#: community/apps/application/views/application_views.py:274 +#: community/apps/application/views/application_views.py:313 +#: community/apps/application/views/application_views.py:482 +#: community/apps/application/views/application_views.py:493 +#: community/apps/application/views/application_views.py:508 +#: community/apps/application/views/application_views.py:535 +#: community/apps/application/views/application_views.py:555 +#: community/apps/application/views/application_views.py:575 +#: community/apps/application/views/application_views.py:593 +#: community/apps/application/views/application_views.py:614 +#: community/apps/application/views/application_views.py:635 +#: community/apps/application/views/application_views.py:670 +msgid "Application" +msgstr "" + +#: community/apps/application/views/application_views.py:173 +msgid "Import Application" +msgstr "" + +#: community/apps/application/views/application_views.py:187 +msgid "Export Application" +msgstr "" + +#: community/apps/application/views/application_views.py:200 +#: community/apps/application/views/application_views.py:201 +msgid "Get embedded js" +msgstr "" + +#: community/apps/application/views/application_views.py:214 +#: community/apps/application/views/application_views.py:215 +msgid "Get a list of models" +msgstr "" + +#: community/apps/application/views/application_views.py:234 +#: community/apps/application/views/application_views.py:235 +#: community/apps/setting/views/model.py:100 +#: community/apps/setting/views/model.py:101 +msgid "Get model parameter form" +msgstr "" + +#: community/apps/application/views/application_views.py:253 +#: community/apps/application/views/application_views.py:254 +msgid "Get a list of function libraries" +msgstr "" + +#: community/apps/application/views/application_views.py:272 +#: community/apps/application/views/application_views.py:273 +msgid "Get library details" +msgstr "" + +#: community/apps/application/views/application_views.py:292 +#: community/apps/application/views/application_views.py:293 +msgid "Get the list of apps created by the current user" +msgstr "" + +#: community/apps/application/views/application_views.py:294 +#: community/apps/application/views/application_views.py:333 +#: community/apps/application/views/chat_views.py:74 +#: community/apps/application/views/chat_views.py:93 +#: community/apps/application/views/chat_views.py:105 +#: community/apps/application/views/chat_views.py:118 +#: community/apps/application/views/chat_views.py:347 +msgid "Application/Chat" +msgstr "" + +#: community/apps/application/views/application_views.py:311 +#: community/apps/application/views/application_views.py:312 +msgid "Get application data" +msgstr "" + +#: community/apps/application/views/application_views.py:331 +#: community/apps/application/views/application_views.py:332 +msgid "Get application related information" +msgstr "" + +#: community/apps/application/views/application_views.py:346 +#: community/apps/application/views/application_views.py:347 +msgid "Add ApiKey" +msgstr "" + +#: community/apps/application/views/application_views.py:348 +#: community/apps/application/views/application_views.py:364 +#: community/apps/application/views/application_views.py:383 +#: community/apps/application/views/application_views.py:402 +msgid "Application/API_KEY" +msgstr "" + +#: community/apps/application/views/application_views.py:362 +#: community/apps/application/views/application_views.py:363 +msgid "Get the application API_KEY list" +msgstr "" + +#: community/apps/application/views/application_views.py:381 +#: community/apps/application/views/application_views.py:382 +msgid "Modify application API_KEY" +msgstr "" + +#: community/apps/application/views/application_views.py:400 +#: community/apps/application/views/application_views.py:401 +msgid "Delete Application API_KEY" +msgstr "" + +#: community/apps/application/views/application_views.py:421 +#: community/apps/application/views/application_views.py:422 +msgid "Modify Application AccessToken" +msgstr "" + +#: community/apps/application/views/application_views.py:423 +#: community/apps/application/views/application_views.py:441 +msgid "Application/Public Access" +msgstr "" + +#: community/apps/application/views/application_views.py:438 +#: community/apps/application/views/application_views.py:439 +msgid "Get the application AccessToken information" +msgstr "" + +#: community/apps/application/views/application_views.py:462 +#: community/apps/application/views/application_views.py:463 +msgid "Application Certification" +msgstr "" + +#: community/apps/application/views/application_views.py:465 +msgid "Application/Certification" +msgstr "" + +#: community/apps/application/views/application_views.py:479 +#: community/apps/application/views/application_views.py:480 +msgid "Create an application" +msgstr "" + +#: community/apps/application/views/application_views.py:505 +msgid "Hit Test List" +msgstr "" + +#: community/apps/application/views/application_views.py:530 +#: community/apps/application/views/application_views.py:531 +msgid "Publishing an application" +msgstr "" + +#: community/apps/application/views/application_views.py:551 +#: community/apps/application/views/application_views.py:552 +msgid "Deleting application" +msgstr "" + +#: community/apps/application/views/application_views.py:570 +#: community/apps/application/views/application_views.py:571 +msgid "Modify the application" +msgstr "" + +#: community/apps/application/views/application_views.py:589 +#: community/apps/application/views/application_views.py:590 +msgid "Get application details" +msgstr "" + +#: community/apps/application/views/application_views.py:609 +#: community/apps/application/views/application_views.py:610 +msgid "Get the knowledge base available to the current application" +msgstr "" + +#: community/apps/application/views/application_views.py:630 +#: community/apps/application/views/application_views.py:631 +msgid "Get the application list by page" +msgstr "" + +#: community/apps/application/views/application_views.py:665 +#: community/apps/application/views/application_views.py:666 +msgid "text to speech" +msgstr "Text to speech" + +#: community/apps/application/views/chat_views.py:36 +#: community/apps/application/views/chat_views.py:37 +msgid "OpenAI Interface Dialogue" +msgstr "" + +#: community/apps/application/views/chat_views.py:39 +msgid "OpenAI Dialogue" +msgstr "" + +#: community/apps/application/views/chat_views.py:52 +#: community/apps/application/views/chat_views.py:53 +msgid "Export conversation" +msgstr "Export Conversation" + +#: community/apps/application/views/chat_views.py:55 +#: community/apps/application/views/chat_views.py:156 +#: community/apps/application/views/chat_views.py:174 +#: community/apps/application/views/chat_views.py:197 +#: community/apps/application/views/chat_views.py:217 +#: community/apps/application/views/chat_views.py:235 +#: community/apps/application/views/chat_views.py:257 +#: community/apps/application/views/chat_views.py:282 +#: community/apps/application/views/chat_views.py:302 +#: community/apps/application/views/chat_views.py:324 +#: community/apps/application/views/chat_views.py:489 +msgid "Application/Conversation Log" +msgstr "" + +#: community/apps/application/views/chat_views.py:71 +#: community/apps/application/views/chat_views.py:72 +msgid "Get the session id according to the application id" +msgstr "" + +#: community/apps/application/views/chat_views.py:90 +#: community/apps/application/views/chat_views.py:91 +msgid "Get the workflow temporary session id" +msgstr "" + +#: community/apps/application/views/chat_views.py:102 +#: community/apps/application/views/chat_views.py:103 +msgid "Get a temporary session id" +msgstr "" + +#: community/apps/application/views/chat_views.py:115 +#: community/apps/application/views/chat_views.py:116 +msgid "dialogue" +msgstr "Dialogue" + +#: community/apps/application/views/chat_views.py:152 +#: community/apps/application/views/chat_views.py:153 +msgid "Get the conversation list" +msgstr "" + +#: community/apps/application/views/chat_views.py:172 +#: community/apps/application/views/chat_views.py:173 +msgid "Delete a conversation" +msgstr "" + +#: community/apps/application/views/chat_views.py:192 +#: community/apps/application/views/chat_views.py:193 +msgid "Get client conversation list by paging" +msgstr "" + +#: community/apps/application/views/chat_views.py:215 +#: community/apps/application/views/chat_views.py:216 +msgid "Client deletes conversation" +msgstr "" + +#: community/apps/application/views/chat_views.py:232 +#: community/apps/application/views/chat_views.py:233 +msgid "Client modifies dialogue summary" +msgstr "" + +#: community/apps/application/views/chat_views.py:253 +#: community/apps/application/views/chat_views.py:254 +msgid "Get the conversation list by page" +msgstr "" + +#: community/apps/application/views/chat_views.py:278 +#: community/apps/application/views/chat_views.py:279 +msgid "Get conversation record details" +msgstr "" + +#: community/apps/application/views/chat_views.py:298 +#: community/apps/application/views/chat_views.py:299 +msgid "Get a list of conversation records" +msgstr "" + +#: community/apps/application/views/chat_views.py:319 +#: community/apps/application/views/chat_views.py:320 +msgid "Get the conversation history list by page" +msgstr "" + +#: community/apps/application/views/chat_views.py:342 +#: community/apps/application/views/chat_views.py:343 +msgid "Like, Dislike" +msgstr "" + +#: community/apps/application/views/chat_views.py:365 +#: community/apps/application/views/chat_views.py:366 +msgid "Get the list of marked paragraphs" +msgstr "" + +#: community/apps/application/views/chat_views.py:369 +#: community/apps/application/views/chat_views.py:390 +#: community/apps/application/views/chat_views.py:442 +msgid "Application/Conversation Log/Annotation" +msgstr "" + +#: community/apps/application/views/chat_views.py:412 +#: community/apps/application/views/chat_views.py:413 +msgid "Add to Knowledge Base" +msgstr "" + +#: community/apps/application/views/chat_views.py:416 +msgid "Application/Conversation Log/Add to Knowledge Base" +msgstr "" + +#: community/apps/application/views/chat_views.py:438 +#: community/apps/application/views/chat_views.py:439 +msgid "Delete a Annotation" +msgstr "" + +#: community/apps/application/views/chat_views.py:487 +#: community/apps/dataset/views/file.py:28 +#: community/apps/dataset/views/file.py:29 +#: community/apps/dataset/views/file.py:34 +msgid "Upload file" +msgstr "" + +#: community/apps/common/auth/authenticate.py:62 +#: community/apps/common/auth/authenticate.py:83 +msgid "Not logged in, please log in first" +msgstr "" + +#: community/apps/common/auth/authenticate.py:68 +#: community/apps/common/auth/authenticate.py:74 +#: community/apps/common/auth/authenticate.py:89 +#: community/apps/common/auth/authenticate.py:95 +msgid "Authentication information is incorrect! illegal user" +msgstr "" + +#: community/apps/common/auth/authentication.py:94 +msgid "No permission to access" +msgstr "" + +#: community/apps/common/auth/handle/impl/application_key.py:23 +#: community/apps/common/auth/handle/impl/application_key.py:25 +msgid "Secret key is invalid" +msgstr "" + +#: community/apps/common/auth/handle/impl/public_access_token.py:48 +#: community/apps/common/auth/handle/impl/public_access_token.py:50 +#: community/apps/common/auth/handle/impl/public_access_token.py:52 +#: community/apps/common/auth/handle/impl/public_access_token.py:54 +msgid "Authentication information is incorrect" +msgstr "" + +#: community/apps/common/auth/handle/impl/user_token.py:34 +msgid "Login expired" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:31 +msgid "The username or password is incorrect" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:32 +msgid "Please log in first and bring the user Token" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:33 +#: community/apps/users/serializers/user_serializers.py:429 +msgid "Email sending failed" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:34 +msgid "Email format error" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:35 +msgid "The email has been registered, please log in directly" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:36 +msgid "The email is not registered, please register first" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:38 +msgid "The verification code is incorrect or the verification code has expired" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:39 +msgid "The username has been registered, please log in directly" +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:41 +msgid "" +"The username cannot be empty and must be between 6 and 20 characters long." +msgstr "" + +#: community/apps/common/constants/exception_code_constants.py:43 +msgid "Password and confirmation password are inconsistent" +msgstr "" + +#: community/apps/common/constants/permission_constants.py:61 +msgid "ADMIN" +msgstr "" + +#: community/apps/common/constants/permission_constants.py:61 +msgid "Admin, prefabs are not currently used" +msgstr "" + +#: community/apps/common/constants/permission_constants.py:62 +msgid "USER" +msgstr "" + +#: community/apps/common/constants/permission_constants.py:62 +msgid "All user permissions" +msgstr "" + +#: community/apps/common/constants/permission_constants.py:63 +msgid "chat" +msgstr "Chat" + +#: community/apps/common/constants/permission_constants.py:63 +msgid "Only has application dialog interface permissions" +msgstr "" + +#: community/apps/common/constants/permission_constants.py:64 +msgid "Apply private key" +msgstr "" + +#: community/apps/common/event/__init__.py:30 +msgid "The download process was interrupted, please try again" +msgstr "" + +#: community/apps/common/event/listener_manage.py:91 +#, python-brace-format +msgid "Query vector data: {paragraph_id_list} error {error} {traceback}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:96 +#, python-brace-format +msgid "Start--->Embedding paragraph: {paragraph_id_list}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:108 +#, python-brace-format +msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:114 +#, python-brace-format +msgid "End--->Embedding paragraph: {paragraph_id_list}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:123 +#, python-brace-format +msgid "Start--->Embedding paragraph: {paragraph_id}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:148 +#, python-brace-format +msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:153 +#, python-brace-format +msgid "End--->Embedding paragraph: {paragraph_id}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:269 +#, python-brace-format +msgid "Start--->Embedding document: {document_id}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:291 +#, python-brace-format +msgid "Vectorized document: {document_id} error {error} {traceback}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:296 +#, python-brace-format +msgid "End--->Embedding document: {document_id}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:307 +#, python-brace-format +msgid "Start--->Embedding dataset: {dataset_id}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:311 +#, python-brace-format +msgid "Start--->Embedding document: {document_list}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:315 +#: community/apps/embedding/task/embedding.py:123 +#, python-brace-format +msgid "Vectorized dataset: {dataset_id} error {error} {traceback}" +msgstr "" + +#: community/apps/common/event/listener_manage.py:318 +#, python-brace-format +msgid "End--->Embedding dataset: {dataset_id}" +msgstr "" + +#: community/apps/common/field/common.py:45 +msgid "not a function" +msgstr "" + +#: community/apps/common/forms/base_field.py:64 +#, python-brace-format +msgid "The field {field_label} is required" +msgstr "" + +#: community/apps/common/forms/slider_field.py:56 +#, python-brace-format +msgid "The {field_label} cannot be less than {min}" +msgstr "" + +#: community/apps/common/forms/slider_field.py:62 +#, python-brace-format +msgid "The {field_label} cannot be greater than {max}" +msgstr "" + +#: community/apps/common/handle/handle_exception.py:30 +msgid "Unknown exception" +msgstr "" + +#: community/apps/common/handle/impl/pdf_split_handle.py:278 +#, python-brace-format +msgid "This document has no preface and is treated as ordinary text: {e}" +msgstr "" + +#: community/apps/common/init/init_doc.py:26 +#: community/apps/common/init/init_doc.py:45 +msgid "Intelligent customer service platform" +msgstr "" + +#: community/apps/common/job/clean_chat_job.py:25 +msgid "start clean chat log" +msgstr "Start cleaning chat logs" + +#: community/apps/common/job/clean_chat_job.py:71 +msgid "end clean chat log" +msgstr "End of chat log cleaning" + +#: community/apps/common/job/clean_debug_file_job.py:21 +msgid "start clean debug file" +msgstr "Start cleaning debug files" + +#: community/apps/common/job/clean_debug_file_job.py:25 +msgid "end clean debug file" +msgstr "End of debug file cleaning" + +#: community/apps/common/job/client_access_num_job.py:25 +msgid "start reset access_num" +msgstr "Start resetting access_num" + +#: community/apps/common/job/client_access_num_job.py:27 +msgid "end reset access_num" +msgstr "End of access_num reset" + +#: community/apps/common/log/log.py:37 +msgid "unknown" +msgstr "" + +#: community/apps/common/response/result.py:24 +msgid "Success" +msgstr "" + +#: community/apps/common/response/result.py:36 +#: community/apps/common/response/result.py:80 +#: community/apps/common/response/result.py:82 +msgid "current page" +msgstr "Current page" + +#: community/apps/common/response/result.py:42 +#: community/apps/common/response/result.py:85 +#: community/apps/common/response/result.py:87 +msgid "page size" +msgstr "Page size" + +#: community/apps/common/response/result.py:53 +#: community/apps/common/response/result.py:101 +#: community/apps/common/response/result.py:130 +msgid "response parameters" +msgstr "Response Parameters" + +#: community/apps/common/response/result.py:59 +#: community/apps/common/response/result.py:107 +#: community/apps/common/response/result.py:136 +msgid "response code" +msgstr "Response Code" + +#: community/apps/common/response/result.py:61 +#: community/apps/common/response/result.py:109 +#: community/apps/common/response/result.py:138 +msgid "success:200 fail:other" +msgstr "" + +#: community/apps/common/response/result.py:64 +#: community/apps/common/response/result.py:112 +#: community/apps/common/response/result.py:141 +msgid "prompt" +msgstr "Prompt" + +#: community/apps/common/response/result.py:65 +#: community/apps/common/response/result.py:113 +#: community/apps/common/response/result.py:142 +msgid "success" +msgstr "Success" + +#: community/apps/common/response/result.py:66 +#: community/apps/common/response/result.py:114 +#: community/apps/common/response/result.py:143 +msgid "error prompt" +msgstr "Error Prompt" + +#: community/apps/common/response/result.py:72 +#: community/apps/common/response/result.py:74 +msgid "total number of data" +msgstr "Total number of data" + +#: community/apps/common/swagger_api/common_api.py:24 +#: community/apps/dataset/serializers/dataset_serializers.py:569 +msgid "query text" +msgstr "Query Text" + +#: community/apps/common/swagger_api/common_api.py:42 +msgid "Retrieval pattern embedding|keywords|blend" +msgstr "" + +#: community/apps/common/swagger_api/common_api.py:66 +#: community/apps/common/swagger_api/common_api.py:67 +msgid "Number of clicks and dislikes" +msgstr "" + +#: community/apps/common/swagger_api/common_api.py:74 +#: community/apps/common/swagger_api/common_api.py:75 +msgid "relevance score" +msgstr "Relevance score" + +#: community/apps/common/swagger_api/common_api.py:76 +#: community/apps/common/swagger_api/common_api.py:77 +msgid "Comprehensive score, used for ranking" +msgstr "" + +#: community/apps/common/swagger_api/common_api.py:78 +#: community/apps/common/swagger_api/common_api.py:79 +#: community/apps/users/serializers/user_serializers.py:591 +#: community/apps/users/serializers/user_serializers.py:592 +msgid "Update time" +msgstr "Update Time" + +#: community/apps/common/swagger_api/common_api.py:81 +#: community/apps/common/swagger_api/common_api.py:82 +#: community/apps/users/serializers/user_serializers.py:589 +#: community/apps/users/serializers/user_serializers.py:590 +msgid "Create time" +msgstr "" + +#: community/apps/common/util/common.py:239 +msgid "Text-to-speech node, the text content must be of string type" +msgstr "" + +#: community/apps/common/util/common.py:241 +msgid "Text-to-speech node, the text content cannot be empty" +msgstr "" + +#: community/apps/dataset/serializers/common_serializers.py:87 +msgid "source url" +msgstr "Source URL" + +#: community/apps/dataset/serializers/common_serializers.py:89 +#: community/apps/dataset/serializers/dataset_serializers.py:333 +#: community/apps/dataset/serializers/dataset_serializers.py:390 +#: community/apps/dataset/serializers/dataset_serializers.py:391 +#: community/apps/dataset/serializers/document_serializers.py:155 +#: community/apps/dataset/serializers/document_serializers.py:181 +msgid "selector" +msgstr "Selector" + +#: community/apps/dataset/serializers/common_serializers.py:96 +#: community/apps/dataset/serializers/dataset_serializers.py:341 +#, python-brace-format +msgid "URL error, cannot parse [{source_url}]" +msgstr "" + +#: community/apps/dataset/serializers/common_serializers.py:105 +#: community/apps/dataset/serializers/common_serializers.py:124 +#: community/apps/dataset/serializers/common_serializers.py:125 +#: community/apps/dataset/serializers/document_serializers.py:85 +#: community/apps/dataset/swagger_api/document_api.py:23 +#: community/apps/dataset/swagger_api/document_api.py:24 +#: community/apps/dataset/swagger_api/document_api.py:49 +#: community/apps/dataset/swagger_api/document_api.py:50 +msgid "id list" +msgstr "ID list" + +#: community/apps/dataset/serializers/common_serializers.py:115 +#, python-brace-format +msgid "The following id does not exist: {error_id_list}" +msgstr "" + +#: community/apps/dataset/serializers/common_serializers.py:183 +#: community/apps/dataset/serializers/common_serializers.py:207 +msgid "The knowledge base is inconsistent with the vector model" +msgstr "" + +#: community/apps/dataset/serializers/common_serializers.py:185 +#: community/apps/dataset/serializers/common_serializers.py:209 +msgid "Knowledge base setting error, please reset the knowledge base" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:109 +#: community/apps/dataset/serializers/dataset_serializers.py:110 +#: community/apps/setting/serializers/model_apply_serializers.py:51 +msgid "model id" +msgstr "Model ID" + +#: community/apps/dataset/serializers/dataset_serializers.py:112 +#: community/apps/dataset/serializers/dataset_serializers.py:114 +msgid "Whether to start multiple rounds of dialogue" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:115 +#: community/apps/dataset/serializers/dataset_serializers.py:116 +msgid "opening remarks" +msgstr "Opening Remarks" + +#: community/apps/dataset/serializers/dataset_serializers.py:118 +msgid "example" +msgstr "Example" + +#: community/apps/dataset/serializers/dataset_serializers.py:119 +msgid "User id" +msgstr "User ID" + +#: community/apps/dataset/serializers/dataset_serializers.py:121 +#: community/apps/dataset/serializers/dataset_serializers.py:122 +msgid "Whether to publish" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:124 +#: community/apps/dataset/serializers/dataset_serializers.py:125 +#: community/apps/dataset/serializers/dataset_serializers.py:304 +#: community/apps/dataset/serializers/dataset_serializers.py:305 +#: community/apps/dataset/serializers/dataset_serializers.py:366 +#: community/apps/dataset/serializers/dataset_serializers.py:367 +#: community/apps/dataset/serializers/dataset_serializers.py:511 +#: community/apps/dataset/serializers/dataset_serializers.py:512 +#: community/apps/dataset/serializers/dataset_serializers.py:942 +#: community/apps/dataset/serializers/dataset_serializers.py:943 +#: community/apps/dataset/serializers/document_serializers.py:824 +#: community/apps/dataset/serializers/document_serializers.py:825 +#: community/apps/dataset/serializers/paragraph_serializers.py:200 +#: community/apps/dataset/serializers/paragraph_serializers.py:201 +#: community/apps/dataset/serializers/paragraph_serializers.py:724 +#: community/apps/dataset/serializers/paragraph_serializers.py:725 +#: community/apps/dataset/swagger_api/problem_api.py:33 +#: community/apps/dataset/swagger_api/problem_api.py:34 +#: community/apps/dataset/swagger_api/problem_api.py:135 +#: community/apps/dataset/swagger_api/problem_api.py:136 +#: community/apps/function_lib/swagger_api/function_lib_api.py:32 +#: community/apps/function_lib/swagger_api/function_lib_api.py:33 +msgid "create time" +msgstr "Create Time" + +#: community/apps/dataset/serializers/dataset_serializers.py:127 +#: community/apps/dataset/serializers/dataset_serializers.py:128 +#: community/apps/dataset/serializers/dataset_serializers.py:301 +#: community/apps/dataset/serializers/dataset_serializers.py:302 +#: community/apps/dataset/serializers/dataset_serializers.py:363 +#: community/apps/dataset/serializers/dataset_serializers.py:364 +#: community/apps/dataset/serializers/dataset_serializers.py:508 +#: community/apps/dataset/serializers/dataset_serializers.py:509 +#: community/apps/dataset/serializers/dataset_serializers.py:939 +#: community/apps/dataset/serializers/dataset_serializers.py:940 +#: community/apps/dataset/serializers/document_serializers.py:821 +#: community/apps/dataset/serializers/document_serializers.py:822 +#: community/apps/dataset/serializers/paragraph_serializers.py:197 +#: community/apps/dataset/serializers/paragraph_serializers.py:198 +#: community/apps/dataset/serializers/paragraph_serializers.py:721 +#: community/apps/dataset/serializers/paragraph_serializers.py:722 +#: community/apps/dataset/swagger_api/problem_api.py:30 +#: community/apps/dataset/swagger_api/problem_api.py:31 +#: community/apps/dataset/swagger_api/problem_api.py:132 +#: community/apps/dataset/swagger_api/problem_api.py:133 +#: community/apps/function_lib/swagger_api/function_lib_api.py:34 +#: community/apps/function_lib/swagger_api/function_lib_api.py:35 +msgid "update time" +msgstr "Update Time" + +#: community/apps/dataset/serializers/dataset_serializers.py:257 +#: community/apps/dataset/serializers/dataset_serializers.py:260 +#: community/apps/dataset/serializers/document_serializers.py:211 +#: community/apps/dataset/serializers/document_serializers.py:218 +#: community/apps/dataset/serializers/document_serializers.py:987 +#: community/apps/dataset/serializers/document_serializers.py:1016 +msgid "file list" +msgstr "File list" + +#: community/apps/dataset/serializers/dataset_serializers.py:269 +msgid "upload files " +msgstr "Upload files" + +#: community/apps/dataset/serializers/dataset_serializers.py:297 +#: community/apps/dataset/serializers/dataset_serializers.py:298 +#: community/apps/dataset/serializers/dataset_serializers.py:359 +#: community/apps/dataset/serializers/dataset_serializers.py:360 +#: community/apps/dataset/serializers/dataset_serializers.py:504 +#: community/apps/dataset/serializers/dataset_serializers.py:505 +#: community/apps/dataset/serializers/dataset_serializers.py:935 +#: community/apps/dataset/serializers/dataset_serializers.py:936 +#: community/apps/dataset/serializers/document_serializers.py:814 +#: community/apps/dataset/serializers/document_serializers.py:815 +msgid "char length" +msgstr "Character length" + +#: community/apps/dataset/serializers/dataset_serializers.py:299 +#: community/apps/dataset/serializers/dataset_serializers.py:300 +#: community/apps/dataset/serializers/dataset_serializers.py:361 +#: community/apps/dataset/serializers/dataset_serializers.py:362 +#: community/apps/dataset/serializers/dataset_serializers.py:506 +#: community/apps/dataset/serializers/dataset_serializers.py:507 +#: community/apps/dataset/serializers/dataset_serializers.py:937 +#: community/apps/dataset/serializers/dataset_serializers.py:938 +msgid "document count" +msgstr "Document count" + +#: community/apps/dataset/serializers/dataset_serializers.py:308 +#: community/apps/dataset/serializers/dataset_serializers.py:309 +#: community/apps/dataset/serializers/dataset_serializers.py:370 +#: community/apps/dataset/serializers/dataset_serializers.py:371 +#: community/apps/dataset/serializers/dataset_serializers.py:515 +#: community/apps/dataset/serializers/dataset_serializers.py:516 +#: community/apps/dataset/serializers/document_serializers.py:290 +#: community/apps/dataset/serializers/document_serializers.py:485 +msgid "document list" +msgstr "Document list" + +#: community/apps/dataset/serializers/dataset_serializers.py:327 +#: community/apps/dataset/serializers/dataset_serializers.py:388 +#: community/apps/dataset/serializers/dataset_serializers.py:389 +msgid "web source url" +msgstr "Web source URL" + +#: community/apps/dataset/serializers/dataset_serializers.py:414 +#: community/apps/setting/serializers/valid_serializers.py:26 +msgid "" +"The community version supports up to 50 knowledge bases. If you need more " +"knowledge bases, please contact us (https://fit2cloud.com/)." +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:533 +#: community/apps/dataset/serializers/dataset_serializers.py:534 +msgid "documents" +msgstr "Documents" + +#: community/apps/dataset/serializers/dataset_serializers.py:577 +msgid "search mode" +msgstr "Search mode" + +#: community/apps/dataset/serializers/dataset_serializers.py:582 +#: community/apps/dataset/serializers/dataset_serializers.py:618 +#: community/apps/dataset/serializers/dataset_serializers.py:706 +msgid "id does not exist" +msgstr "ID does not exist" + +#: community/apps/dataset/serializers/dataset_serializers.py:609 +msgid "sync type" +msgstr "Synchronization type" + +#: community/apps/dataset/serializers/dataset_serializers.py:611 +msgid "The synchronization type only supports:replace|complete" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:620 +#: community/apps/dataset/serializers/document_serializers.py:499 +msgid "Synchronization is only supported for web site types" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:694 +msgid "" +"Synchronization type->replace: replacement synchronization, complete: " +"complete synchronization" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:803 +#: community/apps/dataset/serializers/document_serializers.py:748 +#: community/apps/setting/models_provider/tools.py:25 +msgid "No permission to use this model" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:815 +msgid "Failed to send the vectorization task, please try again later!" +msgstr "" + +#: community/apps/dataset/serializers/dataset_serializers.py:911 +#: community/apps/dataset/serializers/document_serializers.py:846 +msgid "meta" +msgstr "Metadata" + +#: community/apps/dataset/serializers/dataset_serializers.py:913 +msgid "Knowledge base metadata->web:{source_url:xxx,selector:'xxx'},base:{}" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:87 +#: community/apps/dataset/serializers/document_serializers.py:100 +#: community/apps/dataset/serializers/document_serializers.py:416 +#: community/apps/dataset/swagger_api/document_api.py:37 +#: community/apps/dataset/swagger_api/document_api.py:51 +msgid "task type" +msgstr "Task type" + +#: community/apps/dataset/serializers/document_serializers.py:95 +#: community/apps/dataset/serializers/document_serializers.py:108 +msgid "task type not support" +msgstr "Task type not supported" + +#: community/apps/dataset/serializers/document_serializers.py:115 +#: community/apps/dataset/serializers/document_serializers.py:188 +#: community/apps/dataset/serializers/document_serializers.py:200 +#: community/apps/dataset/serializers/document_serializers.py:201 +#: community/apps/dataset/serializers/document_serializers.py:412 +#: community/apps/dataset/serializers/document_serializers.py:476 +#: community/apps/dataset/serializers/document_serializers.py:836 +#: community/apps/dataset/serializers/document_serializers.py:837 +msgid "document name" +msgstr "Document Name" + +#: community/apps/dataset/serializers/document_serializers.py:118 +msgid "The type only supports optimization|directly_return" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:120 +#: community/apps/dataset/serializers/document_serializers.py:414 +#: community/apps/dataset/serializers/document_serializers.py:480 +#: community/apps/dataset/serializers/document_serializers.py:840 +#: community/apps/dataset/swagger_api/document_api.py:25 +msgid "hit handling method" +msgstr "Hit Handling Method" + +#: community/apps/dataset/serializers/document_serializers.py:126 +#: community/apps/dataset/serializers/document_serializers.py:844 +#: community/apps/dataset/swagger_api/document_api.py:27 +msgid "directly return similarity" +msgstr "Directly return similarity" + +#: community/apps/dataset/serializers/document_serializers.py:129 +#: community/apps/dataset/serializers/document_serializers.py:415 +msgid "document is active" +msgstr "Document is active" + +#: community/apps/dataset/serializers/document_serializers.py:150 +#: community/apps/dataset/serializers/document_serializers.py:152 +msgid "document url list" +msgstr "Document URL list" + +#: community/apps/dataset/serializers/document_serializers.py:178 +#: community/apps/dataset/serializers/document_serializers.py:179 +msgid "source url list" +msgstr "Source URL list" + +#: community/apps/dataset/serializers/document_serializers.py:202 +#: community/apps/dataset/serializers/document_serializers.py:203 +msgid "paragraphs" +msgstr "Paragraphs" + +#: community/apps/dataset/serializers/document_serializers.py:227 +msgid "The template type only supports excel|csv" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:237 +msgid "Export template type csv|excel" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:289 +#: community/apps/dataset/serializers/paragraph_serializers.py:304 +#: community/apps/dataset/serializers/paragraph_serializers.py:436 +msgid "target dataset id" +msgstr "Target knowledge base ID" + +#: community/apps/dataset/serializers/document_serializers.py:391 +#: community/apps/dataset/serializers/paragraph_serializers.py:305 +#: community/apps/dataset/serializers/paragraph_serializers.py:441 +msgid "target document id" +msgstr "Target document ID" + +#: community/apps/dataset/serializers/document_serializers.py:399 +#: community/apps/dataset/serializers/document_serializers.py:400 +msgid "document id list" +msgstr "Document ID list" + +#: community/apps/dataset/serializers/document_serializers.py:418 +msgid "order by" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:653 +msgid "Section title (optional)" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:654 +msgid "" +"Section content (required, question answer, no more than 4096 characters)" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:655 +msgid "Question (optional, one per line in the cell)" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:765 +msgid "The task is being executed, please do not send it repeatedly." +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:842 +msgid "ai optimization: optimization, direct return: directly_return" +msgstr "AI optimization: optimization, direct return: directly_return" + +#: community/apps/dataset/serializers/document_serializers.py:848 +msgid "Document metadata->web:{source_url:xxx,selector:'xxx'},base:{}" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:859 +msgid "dataset id not exist" +msgstr "Dataset ID does not exist" + +#: community/apps/dataset/serializers/document_serializers.py:990 +#: community/apps/dataset/serializers/document_serializers.py:1020 +msgid "limit" +msgstr "Limit" + +#: community/apps/dataset/serializers/document_serializers.py:994 +#: community/apps/dataset/serializers/document_serializers.py:996 +msgid "patterns" +msgstr "Patterns" + +#: community/apps/dataset/serializers/document_serializers.py:999 +msgid "Auto Clean" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:1006 +msgid "The maximum size of the uploaded file cannot exceed 100MB" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:1025 +msgid "Segmented regular list" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:1029 +#: community/apps/dataset/serializers/document_serializers.py:1030 +msgid "Whether to clear special characters" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:1049 +msgid "space" +msgstr "Space" + +#: community/apps/dataset/serializers/document_serializers.py:1050 +msgid "semicolon" +msgstr "Semicolon" + +#: community/apps/dataset/serializers/document_serializers.py:1050 +msgid "comma" +msgstr "Comma" + +#: community/apps/dataset/serializers/document_serializers.py:1051 +msgid "period" +msgstr "Period" + +#: community/apps/dataset/serializers/document_serializers.py:1051 +msgid "enter" +msgstr "Enter" + +#: community/apps/dataset/serializers/document_serializers.py:1052 +msgid "blank line" +msgstr "Blank line" + +#: community/apps/dataset/serializers/document_serializers.py:1165 +msgid "Hit handling method is required" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:1167 +msgid "The hit processing method must be directly_return|optimization" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:1213 +#: community/apps/dataset/serializers/paragraph_serializers.py:753 +msgid "The task is being executed, please do not send it again." +msgstr "" + +#: community/apps/dataset/serializers/file_serializers.py:82 +msgid "File not found" +msgstr "" + +#: community/apps/dataset/serializers/image_serializers.py:23 +msgid "image" +msgstr "Image" + +#: community/apps/dataset/serializers/image_serializers.py:42 +msgid "Image not found" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:52 +#: community/apps/dataset/serializers/paragraph_serializers.py:68 +#: community/apps/dataset/serializers/paragraph_serializers.py:69 +#: community/apps/dataset/serializers/paragraph_serializers.py:82 +#: community/apps/dataset/serializers/paragraph_serializers.py:85 +#: community/apps/dataset/serializers/paragraph_serializers.py:91 +#: community/apps/dataset/serializers/paragraph_serializers.py:93 +#: community/apps/dataset/serializers/paragraph_serializers.py:653 +msgid "section title" +msgstr "Section Title" + +#: community/apps/dataset/serializers/paragraph_serializers.py:65 +#: community/apps/dataset/serializers/paragraph_serializers.py:66 +msgid "section content" +msgstr "Section Content" + +#: community/apps/dataset/serializers/paragraph_serializers.py:73 +#: community/apps/dataset/serializers/paragraph_serializers.py:74 +#: community/apps/dataset/serializers/problem_serializers.py:88 +msgid "problem list" +msgstr "Problem List" + +#: community/apps/dataset/serializers/paragraph_serializers.py:100 +#: community/apps/dataset/serializers/paragraph_serializers.py:172 +#: community/apps/dataset/serializers/paragraph_serializers.py:214 +#: community/apps/dataset/serializers/paragraph_serializers.py:276 +#: community/apps/dataset/serializers/paragraph_serializers.py:308 +#: community/apps/dataset/serializers/paragraph_serializers.py:456 +#: community/apps/dataset/serializers/paragraph_serializers.py:563 +#: community/apps/dataset/serializers/problem_serializers.py:57 +#: community/apps/dataset/swagger_api/problem_api.py:61 +msgid "paragraph id" +msgstr "Paragraph ID" + +#: community/apps/dataset/serializers/paragraph_serializers.py:105 +#: community/apps/dataset/serializers/paragraph_serializers.py:467 +msgid "Paragraph id does not exist" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:134 +msgid "Already associated, please do not associate again" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:191 +#: community/apps/dataset/serializers/paragraph_serializers.py:192 +msgid "question content" +msgstr "Question Content" + +#: community/apps/dataset/serializers/paragraph_serializers.py:193 +#: community/apps/dataset/serializers/paragraph_serializers.py:709 +#: community/apps/dataset/swagger_api/problem_api.py:26 +msgid "hit num" +msgstr "Hit Num" + +#: community/apps/dataset/serializers/paragraph_serializers.py:210 +#: community/apps/dataset/serializers/paragraph_serializers.py:281 +#: community/apps/dataset/serializers/problem_serializers.py:39 +#: community/apps/dataset/serializers/problem_serializers.py:64 +#: community/apps/dataset/serializers/problem_serializers.py:194 +#: community/apps/dataset/swagger_api/problem_api.py:101 +msgid "problem id" +msgstr "Problem ID" + +#: community/apps/dataset/serializers/paragraph_serializers.py:222 +msgid "Paragraph does not exist" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:224 +msgid "Problem does not exist" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:306 +#: community/apps/dataset/serializers/paragraph_serializers.py:449 +#: community/apps/dataset/serializers/paragraph_serializers.py:450 +msgid "paragraph id list" +msgstr "Paragraph ID list" + +#: community/apps/dataset/serializers/paragraph_serializers.py:317 +msgid "The document to be migrated is consistent with the target document" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:319 +#, python-brace-format +msgid "The document id does not exist [{document_id}]" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:323 +#, python-brace-format +msgid "The target document id does not exist [{document_id}]" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:503 +msgid "Problem id does not exist" +msgstr "" + +#: community/apps/dataset/serializers/paragraph_serializers.py:713 +#: community/apps/dataset/serializers/paragraph_serializers.py:714 +msgid "Number of dislikes" +msgstr "" + +#: community/apps/dataset/serializers/problem_serializers.py:50 +msgid "Issue ID is passed when modifying, not when creating." +msgstr "" + +#: community/apps/dataset/serializers/problem_serializers.py:62 +#: community/apps/dataset/swagger_api/problem_api.py:51 +#: community/apps/dataset/swagger_api/problem_api.py:52 +#: community/apps/dataset/swagger_api/problem_api.py:83 +#: community/apps/dataset/swagger_api/problem_api.py:84 +msgid "problem id list" +msgstr "Problem ID list" + +#: community/apps/dataset/swagger_api/document_api.py:38 +#: community/apps/dataset/swagger_api/document_api.py:52 +msgid "1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents" +msgstr "" + +#: community/apps/dataset/swagger_api/document_api.py:64 +#: community/apps/dataset/swagger_api/document_api.py:65 +msgid "state list" +msgstr "State list" + +#: community/apps/dataset/swagger_api/image_api.py:22 +msgid "image file" +msgstr "Image file" + +#: community/apps/dataset/swagger_api/problem_api.py:54 +#: community/apps/dataset/swagger_api/problem_api.py:55 +msgid "Associated paragraph information list" +msgstr "" + +#: community/apps/dataset/swagger_api/problem_api.py:131 +msgid "Hit num" +msgstr "Hit Num" + +#: community/apps/dataset/task/generate.py:95 +#, python-brace-format +msgid "" +"Generate issue based on document: {document_id} error {error}{traceback}" +msgstr "" + +#: community/apps/dataset/task/generate.py:99 +#, python-brace-format +msgid "End--->Generate problem: {document_id}" +msgstr "" + +#: community/apps/dataset/task/sync.py:29 +#: community/apps/dataset/task/sync.py:43 +#, python-brace-format +msgid "Start--->Start synchronization web knowledge base:{dataset_id}" +msgstr "" + +#: community/apps/dataset/task/sync.py:34 +#: community/apps/dataset/task/sync.py:47 +#, python-brace-format +msgid "End--->End synchronization web knowledge base:{dataset_id}" +msgstr "" + +#: community/apps/dataset/task/sync.py:36 +#: community/apps/dataset/task/sync.py:49 +#, python-brace-format +msgid "Synchronize web knowledge base:{dataset_id} error{error}{traceback}" +msgstr "" + +#: community/apps/dataset/task/tools.py:114 +#, python-brace-format +msgid "Association problem failed {error}" +msgstr "" + +#: community/apps/dataset/views/dataset.py:35 +#: community/apps/dataset/views/dataset.py:36 +msgid "Synchronize the knowledge base of the website" +msgstr "" + +#: community/apps/dataset/views/dataset.py:57 +#: community/apps/dataset/views/dataset.py:58 +msgid "Create QA knowledge base" +msgstr "" + +#: community/apps/dataset/views/dataset.py:77 +#: community/apps/dataset/views/dataset.py:78 +msgid "Create a web site knowledge base" +msgstr "" + +#: community/apps/dataset/views/dataset.py:93 +#: community/apps/dataset/views/dataset.py:94 +msgid "Get a list of applications available in the knowledge base" +msgstr "" + +#: community/apps/dataset/views/dataset.py:105 +#: community/apps/dataset/views/dataset.py:106 +msgid "Get a list of knowledge bases" +msgstr "" + +#: community/apps/dataset/views/dataset.py:119 +#: community/apps/dataset/views/dataset.py:120 +msgid "Create a knowledge base" +msgstr "" + +#: community/apps/dataset/views/dataset.py:134 +msgid "Hit test list" +msgstr "" + +#: community/apps/dataset/views/dataset.py:154 +msgid "Re-vectorize" +msgstr "" + +#: community/apps/dataset/views/dataset.py:170 +msgid "Export knowledge base" +msgstr "" + +#: community/apps/dataset/views/dataset.py:184 +#: community/apps/dataset/views/dataset.py:185 +msgid "Export knowledge base containing images" +msgstr "Export ZIP Knowledge Base" + +#: community/apps/dataset/views/dataset.py:199 +msgid "Delete knowledge base" +msgstr "" + +#: community/apps/dataset/views/dataset.py:213 +#: community/apps/dataset/views/dataset.py:214 +msgid "Query knowledge base details based on knowledge base id" +msgstr "" + +#: community/apps/dataset/views/dataset.py:226 +#: community/apps/dataset/views/dataset.py:227 +msgid "Modify knowledge base information" +msgstr "" + +#: community/apps/dataset/views/dataset.py:245 +#: community/apps/dataset/views/dataset.py:246 +#: community/apps/dataset/views/document.py:463 +#: community/apps/dataset/views/document.py:464 +msgid "Get the knowledge base paginated list" +msgstr "" + +#: community/apps/dataset/views/document.py:31 +#: community/apps/dataset/views/document.py:32 +msgid "Get QA template" +msgstr "" + +#: community/apps/dataset/views/document.py:44 +#: community/apps/dataset/views/document.py:45 +msgid "Get form template" +msgstr "" + +#: community/apps/dataset/views/document.py:57 +#: community/apps/dataset/views/document.py:58 +msgid "Create Web site documents" +msgstr "" + +#: community/apps/dataset/views/document.py:77 +#: community/apps/dataset/views/document.py:78 +msgid "Import QA and create documentation" +msgstr "" + +#: community/apps/dataset/views/document.py:98 +#: community/apps/dataset/views/document.py:99 +msgid "Import tables and create documents" +msgstr "" + +#: community/apps/dataset/views/document.py:118 +#: community/apps/dataset/views/document.py:119 +msgid "Create document" +msgstr "" + +#: community/apps/dataset/views/document.py:152 +#: community/apps/dataset/views/document.py:153 +msgid "Modify document hit processing methods in batches" +msgstr "" + +#: community/apps/dataset/views/document.py:171 +#: community/apps/dataset/views/document.py:172 +msgid "Create documents in batches" +msgstr "" + +#: community/apps/dataset/views/document.py:187 +#: community/apps/dataset/views/document.py:188 +msgid "Batch sync documents" +msgstr "" + +#: community/apps/dataset/views/document.py:202 +#: community/apps/dataset/views/document.py:203 +msgid "Delete documents in batches" +msgstr "" + +#: community/apps/dataset/views/document.py:220 +#: community/apps/dataset/views/document.py:221 +msgid "Synchronize web site types" +msgstr "" + +#: community/apps/dataset/views/document.py:239 +#: community/apps/dataset/views/document.py:240 +msgid "Cancel task" +msgstr "" + +#: community/apps/dataset/views/document.py:260 +#: community/apps/dataset/views/document.py:261 +msgid "Cancel tasks in batches" +msgstr "" + +#: community/apps/dataset/views/document.py:279 +#: community/apps/dataset/views/document.py:280 +msgid "Refresh document vector library" +msgstr "" + +#: community/apps/dataset/views/document.py:300 +#: community/apps/dataset/views/document.py:301 +msgid "Batch refresh document vector library" +msgstr "" + +#: community/apps/dataset/views/document.py:319 +#: community/apps/dataset/views/document.py:320 +msgid "Migrate documents in batches" +msgstr "" + +#: community/apps/dataset/views/document.py:346 +#: community/apps/dataset/views/document.py:347 +msgid "Export document" +msgstr "Export Document" + +#: community/apps/dataset/views/document.py:361 +#: community/apps/dataset/views/document.py:362 +msgid "Export Zip document" +msgstr "" + +#: community/apps/dataset/views/document.py:376 +#: community/apps/dataset/views/document.py:377 +msgid "Get document details" +msgstr "" + +#: community/apps/dataset/views/document.py:391 +#: community/apps/dataset/views/document.py:392 +msgid "Modify document" +msgstr "Update Document" + +#: community/apps/dataset/views/document.py:409 +#: community/apps/dataset/views/document.py:410 +msgid "Delete document" +msgstr "Delete Document" + +#: community/apps/dataset/views/document.py:427 +#: community/apps/dataset/views/document.py:428 +msgid "Get a list of segment IDs" +msgstr "" + +#: community/apps/dataset/views/document.py:439 +#: community/apps/dataset/views/document.py:440 +msgid "Segmented document" +msgstr "" + +#: community/apps/dataset/views/file.py:42 +#: community/apps/dataset/views/file.py:43 +msgid "Get file" +msgstr "" + +#: community/apps/dataset/views/image.py:28 +#: community/apps/dataset/views/image.py:29 +#: community/apps/dataset/views/image.py:34 +msgid "Upload image" +msgstr "" + +#: community/apps/dataset/views/image.py:35 +#: community/apps/dataset/views/image.py:44 +msgid "Image" +msgstr "" + +#: community/apps/dataset/views/image.py:42 +#: community/apps/dataset/views/image.py:43 +msgid "Get Image" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:28 +#: community/apps/dataset/views/paragraph.py:29 +msgid "Paragraph list" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:32 +#: community/apps/dataset/views/paragraph.py:51 +#: community/apps/dataset/views/paragraph.py:69 +#: community/apps/dataset/views/paragraph.py:85 +#: community/apps/dataset/views/paragraph.py:103 +#: community/apps/dataset/views/paragraph.py:121 +#: community/apps/dataset/views/paragraph.py:140 +#: community/apps/dataset/views/paragraph.py:156 +#: community/apps/dataset/views/paragraph.py:172 +#: community/apps/dataset/views/paragraph.py:193 +#: community/apps/dataset/views/paragraph.py:211 +#: community/apps/dataset/views/paragraph.py:238 +msgid "Knowledge Base/Documentation/Paragraph" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:46 +#: community/apps/dataset/views/paragraph.py:47 +msgid "Create Paragraph" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:64 +#: community/apps/dataset/views/paragraph.py:65 +msgid "Add associated questions" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:80 +#: community/apps/dataset/views/paragraph.py:81 +msgid "Get a list of paragraph questions" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:99 +#: community/apps/dataset/views/paragraph.py:100 +msgid "Disassociation issue" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:117 +#: community/apps/dataset/views/paragraph.py:118 +msgid "Related questions" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:135 +#: community/apps/dataset/views/paragraph.py:136 +msgid "Modify paragraph data" +msgstr "Update Paragraph Data" + +#: community/apps/dataset/views/paragraph.py:152 +#: community/apps/dataset/views/paragraph.py:153 +msgid "Get paragraph details" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:168 +#: community/apps/dataset/views/paragraph.py:169 +msgid "Delete paragraph" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:187 +#: community/apps/dataset/views/paragraph.py:188 +msgid "Delete paragraphs in batches" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:206 +#: community/apps/dataset/views/paragraph.py:207 +msgid "Migrate paragraphs in batches" +msgstr "" + +#: community/apps/dataset/views/paragraph.py:233 +#: community/apps/dataset/views/paragraph.py:234 +msgid "Get paragraph list by pagination" +msgstr "" + +#: community/apps/dataset/views/problem.py:28 +#: community/apps/dataset/views/problem.py:29 +msgid "Question list" +msgstr "Question List" + +#: community/apps/dataset/views/problem.py:32 +#: community/apps/dataset/views/problem.py:50 +#: community/apps/dataset/views/problem.py:68 +#: community/apps/dataset/views/problem.py:88 +#: community/apps/dataset/views/problem.py:103 +#: community/apps/dataset/views/problem.py:120 +#: community/apps/dataset/views/problem.py:136 +#: community/apps/dataset/views/problem.py:155 +msgid "Knowledge Base/Documentation/Paragraph/Question" +msgstr "" + +#: community/apps/dataset/views/problem.py:45 +#: community/apps/dataset/views/problem.py:46 +msgid "Create question" +msgstr "Create Question" + +#: community/apps/dataset/views/problem.py:64 +#: community/apps/dataset/views/problem.py:65 +msgid "Get a list of associated paragraphs" +msgstr "" + +#: community/apps/dataset/views/problem.py:82 +#: community/apps/dataset/views/problem.py:83 +msgid "Batch deletion issues" +msgstr "" + +#: community/apps/dataset/views/problem.py:98 +#: community/apps/dataset/views/problem.py:99 +msgid "Batch associated paragraphs" +msgstr "" + +#: community/apps/dataset/views/problem.py:116 +#: community/apps/dataset/views/problem.py:117 +msgid "Delete question" +msgstr "" + +#: community/apps/dataset/views/problem.py:131 +#: community/apps/dataset/views/problem.py:132 +msgid "Modify question" +msgstr "Update Question" + +#: community/apps/dataset/views/problem.py:150 +#: community/apps/dataset/views/problem.py:151 +msgid "Get the list of questions by page" +msgstr "" + +#: community/apps/embedding/task/embedding.py:30 +#: community/apps/embedding/task/embedding.py:81 +#, python-brace-format +msgid "Failed to obtain vector model: {error} {traceback}" +msgstr "" + +#: community/apps/embedding/task/embedding.py:110 +#, python-brace-format +msgid "Start--->Vectorized dataset: {dataset_id}" +msgstr "" + +#: community/apps/embedding/task/embedding.py:114 +#, python-brace-format +msgid "Dataset documentation: {document_names}" +msgstr "" + +#: community/apps/embedding/task/embedding.py:127 +#, python-brace-format +msgid "End--->Vectorized dataset: {dataset_id}" +msgstr "" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:70 +#: community/apps/function_lib/serializers/function_lib_serializer.py:83 +#: community/apps/function_lib/swagger_api/function_lib_api.py:68 +#: community/apps/function_lib/swagger_api/function_lib_api.py:69 +#: community/apps/function_lib/swagger_api/function_lib_api.py:84 +#: community/apps/function_lib/swagger_api/function_lib_api.py:85 +#: community/apps/function_lib/swagger_api/function_lib_api.py:130 +#: community/apps/function_lib/swagger_api/function_lib_api.py:131 +#: community/apps/function_lib/swagger_api/function_lib_api.py:176 +#: community/apps/function_lib/swagger_api/function_lib_api.py:177 +msgid "variable name" +msgstr "Variable Name" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:71 +#: community/apps/function_lib/swagger_api/function_lib_api.py:88 +#: community/apps/function_lib/swagger_api/function_lib_api.py:89 +#: community/apps/function_lib/swagger_api/function_lib_api.py:134 +#: community/apps/function_lib/swagger_api/function_lib_api.py:135 +#: community/apps/function_lib/swagger_api/function_lib_api.py:180 +#: community/apps/function_lib/swagger_api/function_lib_api.py:181 +msgid "required" +msgstr "Required" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:74 +msgid "fields only support string|int|dict|array|float" +msgstr "Fields only support string|int|dict|array|float" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:85 +#: community/apps/function_lib/swagger_api/function_lib_api.py:72 +#: community/apps/function_lib/swagger_api/function_lib_api.py:73 +msgid "variable value" +msgstr "Variable Value" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:93 +#: community/apps/function_lib/serializers/function_lib_serializer.py:104 +#: community/apps/function_lib/serializers/function_lib_serializer.py:119 +#: community/apps/function_lib/serializers/py_lint_serializer.py:23 +#: community/apps/function_lib/swagger_api/function_lib_api.py:28 +#: community/apps/function_lib/swagger_api/function_lib_api.py:29 +#: community/apps/function_lib/swagger_api/function_lib_api.py:75 +#: community/apps/function_lib/swagger_api/function_lib_api.py:76 +#: community/apps/function_lib/swagger_api/function_lib_api.py:117 +#: community/apps/function_lib/swagger_api/function_lib_api.py:118 +#: community/apps/function_lib/swagger_api/function_lib_api.py:163 +#: community/apps/function_lib/swagger_api/function_lib_api.py:164 +#: community/apps/function_lib/swagger_api/py_lint_api.py:22 +#: community/apps/function_lib/swagger_api/py_lint_api.py:23 +msgid "function content" +msgstr "Function Content" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:98 +#: community/apps/function_lib/serializers/function_lib_serializer.py:114 +#: community/apps/function_lib/serializers/function_lib_serializer.py:135 +#: community/apps/function_lib/serializers/function_lib_serializer.py:388 +#: community/apps/function_lib/swagger_api/function_lib_api.py:24 +#: community/apps/function_lib/swagger_api/function_lib_api.py:25 +#: community/apps/function_lib/swagger_api/function_lib_api.py:46 +#: community/apps/function_lib/swagger_api/function_lib_api.py:113 +#: community/apps/function_lib/swagger_api/function_lib_api.py:114 +#: community/apps/function_lib/swagger_api/function_lib_api.py:159 +#: community/apps/function_lib/swagger_api/function_lib_api.py:160 +msgid "function name" +msgstr "Function Name" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:101 +#: community/apps/function_lib/serializers/function_lib_serializer.py:117 +#: community/apps/function_lib/serializers/function_lib_serializer.py:138 +#: community/apps/function_lib/swagger_api/function_lib_api.py:26 +#: community/apps/function_lib/swagger_api/function_lib_api.py:27 +#: community/apps/function_lib/swagger_api/function_lib_api.py:51 +#: community/apps/function_lib/swagger_api/function_lib_api.py:115 +#: community/apps/function_lib/swagger_api/function_lib_api.py:116 +#: community/apps/function_lib/swagger_api/function_lib_api.py:161 +#: community/apps/function_lib/swagger_api/function_lib_api.py:162 +msgid "function description" +msgstr "Function Description" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:232 +msgid "field has no value set" +msgstr "Field has no value set" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:248 +#: community/apps/function_lib/serializers/function_lib_serializer.py:253 +msgid "type error" +msgstr "Type error" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:256 +#, python-brace-format +msgid "Field: {name} Type: {_type} Value: {value} Type conversion error" +msgstr "" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:261 +msgid "function id" +msgstr "Function ID" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:267 +#: community/apps/function_lib/serializers/function_lib_serializer.py:303 +#: community/apps/function_lib/serializers/function_lib_serializer.py:366 +#: community/apps/function_lib/serializers/function_lib_serializer.py:396 +msgid "Function does not exist" +msgstr "" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:357 +#: community/apps/function_lib/serializers/function_lib_serializer.py:386 +#| msgid "function" +msgid "function ID" +msgstr "Function" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:23 +#: community/apps/function_lib/swagger_api/function_lib_api.py:205 +msgid "ID" +msgstr "" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:30 +#: community/apps/function_lib/swagger_api/function_lib_api.py:31 +msgid "input field" +msgstr "Input field" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:62 +#: community/apps/function_lib/swagger_api/function_lib_api.py:78 +#: community/apps/function_lib/swagger_api/function_lib_api.py:124 +#: community/apps/function_lib/swagger_api/function_lib_api.py:170 +msgid "Input variable list" +msgstr "" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:94 +#: community/apps/function_lib/swagger_api/function_lib_api.py:140 +#: community/apps/function_lib/swagger_api/function_lib_api.py:186 +msgid "Field type string|int|dict|array|float" +msgstr "" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:100 +#: community/apps/function_lib/swagger_api/function_lib_api.py:146 +#: community/apps/function_lib/swagger_api/function_lib_api.py:192 +msgid "The source only supports custom|reference" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:28 +#: community/apps/function_lib/views/function_lib_views.py:29 +msgid "Get function list" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:30 +#: community/apps/function_lib/views/function_lib_views.py:46 +#: community/apps/function_lib/views/function_lib_views.py:59 +#: community/apps/function_lib/views/function_lib_views.py:74 +#: community/apps/function_lib/views/function_lib_views.py:85 +#: community/apps/function_lib/views/function_lib_views.py:95 +#: community/apps/function_lib/views/function_lib_views.py:111 +#: community/apps/function_lib/views/py_lint.py:29 +msgid "Function" +msgstr "Function Library" + +#: community/apps/function_lib/views/function_lib_views.py:43 +#: community/apps/function_lib/views/function_lib_views.py:44 +msgid "Create function" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:56 +#: community/apps/function_lib/views/function_lib_views.py:57 +msgid "Debug function" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:71 +#: community/apps/function_lib/views/function_lib_views.py:72 +msgid "Update function" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:83 +#: community/apps/function_lib/views/function_lib_views.py:84 +msgid "Delete function" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:93 +#: community/apps/function_lib/views/function_lib_views.py:94 +msgid "Get function details" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:106 +#: community/apps/function_lib/views/function_lib_views.py:107 +msgid "Get function list by pagination" +msgstr "" + +#: community/apps/function_lib/views/function_lib_views.py:129 +#| msgid "function" +msgid "Import function" +msgstr "Import Function" + +#: community/apps/function_lib/views/function_lib_views.py:143 +#| msgid "Export conversation" +msgid "Export function" +msgstr "Export function" + +#: community/apps/function_lib/views/py_lint.py:26 +#: community/apps/function_lib/views/py_lint.py:27 +msgid "Check code" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:66 +msgid "Model type cannot be empty" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:91 +msgid "The current platform does not support downloading models" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:146 +msgid "LLM" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:147 +msgid "Embedding Model" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:148 +msgid "Speech2Text" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:149 +msgid "TTS" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:150 +msgid "Vision Model" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:151 +msgid "Image Generation" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:152 +msgid "Rerank" +msgstr "" + +#: community/apps/setting/models_provider/base_model_provider.py:226 +msgid "The model does not support" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 +msgid "" +"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " +"Lab, developers can integrate high-quality text retrieval and sorting " +"through the LlamaIndex framework." +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 +msgid "" +"Chinese (including various dialects such as Cantonese), English, Japanese, " +"and Korean support free switching between multiple languages." +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 +msgid "" +"CosyVoice is based on a new generation of large generative speech models, " +"which can predict emotions, intonation, rhythm, etc. based on context, and " +"has better anthropomorphic effects." +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 +msgid "" +"Universal text vector is Tongyi Lab's multi-language text unified vector " +"model based on the LLM base. It provides high-level vector services for " +"multiple mainstream languages around the world and helps developers quickly " +"convert text data into high-quality vector data." +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40 +msgid "" +"Tongyi Wanxiang - a large image model for text generation, supports " +"bilingual input in Chinese and English, and supports the input of reference " +"pictures for reference content or reference style migration. Key styles " +"include but are not limited to watercolor, oil painting, Chinese painting, " +"sketch, flat illustration, two-dimensional, and 3D. Cartoon." +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 +msgid "Alibaba Cloud Bailian" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:28 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:40 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:68 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:55 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:45 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:23 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:58 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:41 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:39 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:44 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:27 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:31 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:44 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:22 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:61 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:40 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:68 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:19 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:78 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:53 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:46 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:29 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:24 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:47 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:19 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:39 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:25 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:59 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:39 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:40 +#, python-brace-format +msgid "{model_type} Model type is not supported" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:32 +#, python-brace-format +msgid "{key} is required" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:52 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:55 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:43 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:54 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:56 +#: community/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py:43 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:54 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py:54 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:52 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:60 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:76 +#: community/apps/setting/models_provider/impl/xf_model_provider/model/tts.py:101 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:34 +#: community/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py:44 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:56 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py:49 +msgid "Hello" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:38 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:86 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:73 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:65 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:40 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:77 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:61 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:38 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:45 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:51 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:64 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:39 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:80 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:86 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:64 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:104 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:55 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:70 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:38 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:38 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:50 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:84 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:41 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:65 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:60 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:40 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:37 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:77 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:56 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:61 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:59 +#, python-brace-format +msgid "" +"Verification failed, please check whether the parameters are correct: {error}" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:12 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:20 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:14 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:41 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:14 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:22 +msgid "Temperature" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:13 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:21 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:42 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:23 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:21 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:29 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:31 +msgid "Output the maximum Tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:30 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:32 +msgid "Specify the maximum number of tokens that the model can generate" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:72 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:60 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:32 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:50 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:28 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:63 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:46 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:46 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:62 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:63 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:49 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:27 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:66 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:72 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:49 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:45 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:55 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:72 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:34 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:71 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:29 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:52 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:40 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:59 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:64 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:44 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:45 +#, python-brace-format +msgid "{key} is required" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:14 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:15 +msgid "Image size" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22 +msgid "Specify the size of the generated image, such as: 1024x1024" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:43 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:41 +msgid "Number of pictures" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34 +msgid "Specify the number of generated images" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Style" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Specify the style of generated images" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45 +msgid "Default value, the image style is randomly output by the model" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46 +msgid "photography" +msgstr "Photography" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47 +msgid "Portraits" +msgstr "Portraits" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48 +msgid "3D cartoon" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49 +msgid "animation" +msgstr "Animation" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50 +msgid "painting" +msgstr "Painting" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51 +msgid "watercolor" +msgstr "Watercolor" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52 +msgid "sketch" +msgstr "Sketch" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53 +msgid "Chinese painting" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54 +msgid "flat illustration" +msgstr "Flat illustration" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15 +msgid "timbre" +msgstr "Timbre" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +msgid "Chinese sounds can support mixed scenes of Chinese and English" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +msgid "Long Xiaochun" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21 +msgid "Long Xiaoxia" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22 +msgid "Long Xiaochen" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23 +msgid "Long Xiaobai" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24 +msgid "Long laotie" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25 +msgid "Long Shu" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 +msgid "Long Shuo" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 +msgid "Long Jing" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 +msgid "Long Miao" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 +msgid "Long Yue" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 +msgid "Long Yuan" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 +msgid "Long Fei" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 +msgid "Long Jielidou" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 +msgid "Long Tong" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 +msgid "Long Xiang" +msgstr "" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "speaking speed" +msgstr "Speaking speed" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +msgid "[0.5,2], the default is 1, usually one decimal place is enough" +msgstr "" + +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:34 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:74 +msgid "API URL" +msgstr "" + +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:35 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:75 +msgid "API Key" +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 +msgid "" +"An update to Claude 2 that doubles the context window and improves " +"reliability, hallucination rates, and evidence-based accuracy in long " +"documents and RAG contexts." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 +msgid "" +"Anthropic is a powerful model that can handle a variety of tasks, from " +"complex dialogue and creative content generation to detailed command " +"obedience." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 +msgid "" +"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" +"instant responsiveness. The model can answer simple queries and requests " +"quickly. Customers will be able to build seamless AI experiences that mimic " +"human interactions. Claude 3 Haiku can process images and return text " +"output, and provides 200K context windows." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 +msgid "" +"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " +"intelligence and speed, especially when it comes to handling enterprise " +"workloads. This model offers maximum utility while being priced lower than " +"competing products, and it's been engineered to be a solid choice for " +"deploying AI at scale." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 +msgid "" +"The Claude 3.5 Sonnet raises the industry standard for intelligence, " +"outperforming competing models and the Claude 3 Opus in extensive " +"evaluations, with the speed and cost-effectiveness of our mid-range models." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 +msgid "" +"A faster, more affordable but still very powerful model that can handle a " +"range of tasks including casual conversation, text analysis, summarization " +"and document question answering." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 +msgid "" +"Titan Text Premier is the most powerful and advanced model in the Titan Text " +"series, designed to deliver exceptional performance for a variety of " +"enterprise applications. With its cutting-edge features, it delivers greater " +"accuracy and outstanding results, making it an excellent choice for " +"organizations looking for a top-notch text processing solution." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 +msgid "" +"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" +"tuning English-language tasks, including summarization and copywriting, " +"where customers require smaller, more cost-effective, and highly " +"customizable models." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 +msgid "" +"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " +"it ideal for a variety of high-level general language tasks, such as open-" +"ended text generation and conversational chat, as well as support in " +"retrieval-augmented generation (RAG). At launch, the model is optimized for " +"English, but other languages are supported." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 +msgid "" +"7B dense converter for rapid deployment and easy customization. Small in " +"size yet powerful in a variety of use cases. Supports English and code, as " +"well as 32k context windows." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 +msgid "" +"Advanced Mistral AI large-scale language model capable of handling any " +"language task, including complex multilingual reasoning, text understanding, " +"transformation, and code generation." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 +msgid "" +"Ideal for content creation, conversational AI, language understanding, R&D, " +"and enterprise applications" +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 +msgid "" +"Ideal for limited computing power and resources, edge devices, and faster " +"training times." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 +msgid "" +"Titan Embed Text is the largest embedding model in the Amazon Titan Embed " +"series and can handle various text embedding tasks, such as text " +"classification, text similarity calculation, etc." +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 +#, python-brace-format +msgid "The following fields are required: {keys}" +msgstr "" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:64 +msgid "Verification failed, please check whether the parameters are correct" +msgstr "" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28 +msgid "Picture quality" +msgstr "" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17 +msgid "" +"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " +"to find one that suits your desired tone and audience. The current voiceover " +"is optimized for English." +msgstr "" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 +msgid "Good at common conversational tasks, supports 32K contexts" +msgstr "" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 +msgid "Good at handling programming tasks, supports 16K contexts" +msgstr "" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 +msgid "Latest Gemini 1.0 Pro model, updated with Google update" +msgstr "" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 +msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" +msgstr "" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 +msgid "Latest Gemini 1.5 Flash model, updated with Google updates" +msgstr "" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53 +msgid "convert audio to text" +msgstr "Convert audio to text" + +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:53 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:54 +msgid "Model catalog" +msgstr "" + +#: community/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py:39 +msgid "local model" +msgstr "Local model" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:43 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:48 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:35 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:43 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:24 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:44 +msgid "API domain name is invalid" +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:35 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:48 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:53 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:40 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:48 +msgid "The model does not exist, please download the model first" +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 7B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 13B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 70B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 +msgid "" +"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " +"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " +"that it has strong Chinese conversation capabilities." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 8 billion " +"parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 70 billion " +"parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 +msgid "" +"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 500 million parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 +msgid "" +"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 1.8 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 +msgid "" +"Compared with previous versions, qwen 1.5 4b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"4 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 +msgid "" +"Compared with previous versions, qwen 1.5 7b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"7 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 +msgid "" +"Compared with previous versions, qwen 1.5 14b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"14 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 +msgid "" +"Compared with previous versions, qwen 1.5 32b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"32 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 +msgid "" +"Compared with previous versions, qwen 1.5 72b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"72 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 +msgid "" +"Compared with previous versions, qwen 1.5 110b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 110 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 +msgid "" +"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " +"model." +msgstr "" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 +msgid "" +"A high-performance open embedding model with a large token context window." +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " +"or 1792x1024 pixels." +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +msgid "" +" \n" +"By default, images are produced in standard quality, but with DALL·E 3 you " +"can set quality: \"hd\" to enhance detail. Square, standard quality images " +"are generated fastest.\n" +" " +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 +msgid "" +"You can use DALL·E 3 to request 1 image at a time (requesting more images by " +"issuing parallel requests), or use DALL·E 2 with the n parameter to request " +"up to 10 images at a time." +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111 +msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38 +msgid "Latest gpt-4, updated with OpenAI adjustments" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99 +msgid "" +"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " +"adjustments" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102 +msgid "" +"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " +"adjustments" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46 +msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49 +msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53 +msgid "" +"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " +"tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57 +msgid "" +"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " +"tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61 +msgid "" +"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " +"13, 2024" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65 +msgid "" +"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69 +msgid "" +"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " +"tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72 +msgid "" +"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " +"tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75 +msgid "" +"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " +"tokens" +msgstr "" + +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63 +msgid "Tongyi Qianwen" +msgstr "" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:46 +msgid "Please provide server URL" +msgstr "" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:49 +msgid "Please provide the model" +msgstr "" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:52 +msgid "Please provide the API Key" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 +msgid "Tencent Cloud" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:41 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:88 +#, python-brace-format +msgid "{keys} is required" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "painting style" +msgstr "Painting style" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "If not passed, the default value is 201 (Japanese anime style)" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18 +msgid "Not limited to style" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19 +msgid "ink painting" +msgstr "Ink painting" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20 +msgid "concept art" +msgstr "Concept art" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21 +msgid "Oil painting 1" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22 +msgid "Oil Painting 2 (Van Gogh)" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23 +msgid "watercolor painting" +msgstr "Watercolor painting" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24 +msgid "pixel art" +msgstr "Pixel art" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25 +msgid "impasto style" +msgstr "Impasto style" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26 +msgid "illustration" +msgstr "Illustration" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27 +msgid "paper cut style" +msgstr "Paper cut style" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28 +msgid "Impressionism 1 (Monet)" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29 +msgid "Impressionism 2" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31 +msgid "classical portraiture" +msgstr "Classical portraiture" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32 +msgid "black and white sketch" +msgstr "Black and white sketch" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33 +msgid "cyberpunk" +msgstr "Cyberpunk" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34 +msgid "science fiction style" +msgstr "Science fiction style" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35 +msgid "dark style" +msgstr "Dark style" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37 +msgid "vaporwave" +msgstr "Vaporwave" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38 +msgid "Japanese animation" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39 +msgid "monster style" +msgstr "Monster style" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40 +msgid "Beautiful ancient style" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41 +msgid "retro anime" +msgstr "Retro anime" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42 +msgid "Game cartoon hand drawing" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43 +msgid "Universal realistic style" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "Generate image resolution" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "If not transmitted, the default value is 768:768." +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 +msgid "" +"The most effective version of the current hybrid model, the trillion-level " +"parameter scale MOE-32K long article model. Reaching the absolute leading " +"level on various benchmarks, with complex instructions and reasoning, " +"complex mathematical capabilities, support for function call, and " +"application focus optimization in fields such as multi-language translation, " +"finance, law, and medical care" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 +msgid "" +"A better routing strategy is adopted to simultaneously alleviate the " +"problems of load balancing and expert convergence. For long articles, the " +"needle-in-a-haystack index reaches 99.9%" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 +msgid "" +"Upgraded to MOE structure, the context window is 256k, leading many open " +"source models in multiple evaluation sets such as NLP, code, mathematics, " +"industry, etc." +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 +msgid "" +"Hunyuan's latest version of the role-playing model, a role-playing model " +"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " +"model combined with the role-playing scene data set for additional training, " +"and has better basic effects in role-playing scenes." +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 +msgid "" +"Hunyuan's latest MOE architecture FunctionCall model has been trained with " +"high-quality FunctionCall data and has a context window of 32K, leading in " +"multiple dimensions of evaluation indicators." +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 +msgid "" +"Hunyuan's latest code generation model, after training the base model with " +"200B high-quality code data, and iterating on high-quality SFT data for half " +"a year, the context long window length has been increased to 8K, and it " +"ranks among the top in the automatic evaluation indicators of code " +"generation in the five major languages; the five major languages In the " +"manual high-quality evaluation of 10 comprehensive code tasks that consider " +"all aspects, the performance is in the first echelon." +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 +msgid "" +"Tencent's Hunyuan Embedding interface can convert text into high-quality " +"vector data. The vector dimension is 1024 dimensions." +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 +msgid "Mixed element visual model" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 +msgid "Hunyuan graph model" +msgstr "" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 +msgid "Tencent Hunyuan" +msgstr "" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 +msgid "Facebook’s 125M parameter model" +msgstr "" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 +msgid "BAAI’s 7B parameter model" +msgstr "" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 +msgid "BAAI’s 13B parameter mode" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 +msgid "" +"If the gap between width, height and 512 is too large, the picture rendering " +"effect will be poor and the probability of excessive delay will increase " +"significantly. Recommended ratio and corresponding width and height before " +"super score: width*height" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29 +msgid "Universal female voice" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25 +msgid "Supernatural timbre-ZiZi 2.0" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26 +msgid "Supernatural timbre-ZiZi" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27 +msgid "Supernatural sound-Ranran 2.0" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28 +msgid "Supernatural sound-Ranran" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30 +msgid "Universal male voice" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +msgid "[0.2,3], the default is 1, usually one decimal place is enough" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 +msgid "" +"The user goes to the model inference page of Volcano Ark to create an " +"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " +"it." +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 +msgid "Universal 2.0-Vincent Diagram" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 +msgid "Universal 2.0Pro-Vincent Chart" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 +msgid "Universal 1.4-Vincent Chart" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 +msgid "Animation 1.3.0-Vincent Picture" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 +msgid "Animation 1.3.1-Vincent Picture" +msgstr "" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 +msgid "volcano engine" +msgstr "Volcengine" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:51 +#, python-brace-format +msgid "{model_name} The model does not support" +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 +msgid "" +"ERNIE-Bot-4 is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 +msgid "" +"ERNIE-Bot is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 +msgid "" +"ERNIE-Bot-turbo is a large language model independently developed by Baidu. " +"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " +"content creation and generation, and has a faster response speed." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 +msgid "" +"BLOOMZ-7B is a well-known large language model in the industry. It was " +"developed and open sourced by BigScience and can output text in 46 languages " +"and 13 programming languages." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 +msgid "" +"Llama-2-13b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning and knowledge application. " +"Llama-2-13b-chat is a native open source version with balanced performance " +"and effect, suitable for conversation scenarios." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 +msgid "" +"Llama-2-70b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning, and knowledge application. " +"Llama-2-70b-chat is a native open source version with high-precision effects." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 +msgid "" +"The Chinese enhanced version developed by the Qianfan team based on " +"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" +"EVAL." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 +msgid "" +"Embedding-V1 is a text representation model based on Baidu Wenxin large " +"model technology. It can convert text into a vector form represented by " +"numerical values and can be used in text retrieval, information " +"recommendation, knowledge mining and other scenarios. Embedding-V1 provides " +"the Embeddings interface, which can generate corresponding vector " +"representations based on input content. You can call this interface to input " +"text into the model and obtain the corresponding vector representation for " +"subsequent text processing and analysis." +msgstr "" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 +msgid "Thousand sails large model" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42 +msgid "Please outline this picture" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15 +msgid "Speaker" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16 +msgid "" +"Speaker, optional value: Please go to the console to add a trial or purchase " +"speaker. After adding, the speaker parameter value will be displayed." +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21 +msgid "iFlytek Xiaoyan" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22 +msgid "iFlytek Xujiu" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23 +msgid "iFlytek Xiaoping" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24 +msgid "iFlytek Xiaojing" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25 +msgid "iFlytek Xuxiaobao" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "Speech speed, optional value: [0-100], default is 50" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39 +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50 +msgid "Chinese and English recognition" +msgstr "" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66 +msgid "iFlytek Spark" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " +"1792x1024 pixels." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29 +msgid "" +"By default, images are generated in standard quality, you can set quality: " +"\"hd\" to enhance detail. Square, standard quality images are generated " +"fastest." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42 +msgid "" +"You can request 1 image at a time (requesting more images by making parallel " +"requests), or up to 10 images at a time using the n parameter." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20 +msgid "Chinese female" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21 +msgid "Chinese male" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22 +msgid "Japanese male" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23 +msgid "Cantonese female" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24 +msgid "English female" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25 +msgid "English male" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26 +msgid "Korean female" +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 +msgid "" +"Code Llama is a language model specifically designed for code generation." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 +msgid "" +" \n" +"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " +"designed to perform specific tasks.\n" +" " +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 +msgid "" +"Code Llama Python is a language model specifically designed for Python code " +"generation." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 +msgid "" +"CodeQwen 1.5 is a language model for code generation with high performance." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 +msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." +msgstr "" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 +msgid "Deepseek is a large-scale language model with 13 billion parameters." +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16 +msgid "" +"Image size, only cogview-3-plus supports this parameter. Optional range: " +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " +"default is 1024x1024." +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 +msgid "" +"Have strong multi-modal understanding capabilities. Able to understand up to " +"five images simultaneously and supports video content understanding" +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis" +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis (free)" +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 +msgid "" +"Quickly and accurately generate images based on user text descriptions. " +"Resolution supports 1024x1024" +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes" +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes (free)" +msgstr "" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 +msgid "zhipu AI" +msgstr "Zhipu AI" + +#: community/apps/setting/serializers/model_apply_serializers.py:32 +#: community/apps/setting/serializers/model_apply_serializers.py:37 +msgid "vector text" +msgstr "Vector text" + +#: community/apps/setting/serializers/model_apply_serializers.py:33 +msgid "vector text list" +msgstr "Vector text list" + +#: community/apps/setting/serializers/model_apply_serializers.py:41 +msgid "text" +msgstr "Text" + +#: community/apps/setting/serializers/model_apply_serializers.py:42 +msgid "metadata" +msgstr "Metadata" + +#: community/apps/setting/serializers/model_apply_serializers.py:47 +msgid "query" +msgstr "Query" + +#: community/apps/setting/serializers/provider_serializers.py:79 +#: community/apps/setting/serializers/provider_serializers.py:83 +#: community/apps/setting/serializers/provider_serializers.py:130 +#: community/apps/setting/serializers/provider_serializers.py:176 +#: community/apps/setting/serializers/provider_serializers.py:190 +#: community/apps/setting/swagger_api/provide_api.py:30 +#: community/apps/setting/swagger_api/provide_api.py:54 +#: community/apps/setting/swagger_api/provide_api.py:55 +#: community/apps/setting/swagger_api/provide_api.py:87 +#: community/apps/setting/swagger_api/provide_api.py:88 +#: community/apps/setting/swagger_api/provide_api.py:170 +msgid "model name" +msgstr "Model Name" + +#: community/apps/setting/serializers/provider_serializers.py:81 +#: community/apps/setting/serializers/provider_serializers.py:132 +#: community/apps/setting/serializers/provider_serializers.py:142 +#: community/apps/setting/serializers/provider_serializers.py:180 +#: community/apps/setting/swagger_api/provide_api.py:26 +#: community/apps/setting/swagger_api/provide_api.py:51 +#: community/apps/setting/swagger_api/provide_api.py:52 +#: community/apps/setting/swagger_api/provide_api.py:84 +#: community/apps/setting/swagger_api/provide_api.py:85 +#: community/apps/setting/swagger_api/provide_api.py:134 +#: community/apps/setting/swagger_api/provide_api.py:165 +msgid "model type" +msgstr "Model Type" + +#: community/apps/setting/serializers/provider_serializers.py:85 +#: community/apps/setting/serializers/provider_serializers.py:178 +#: community/apps/setting/serializers/provider_serializers.py:402 +#: community/apps/setting/swagger_api/provide_api.py:35 +#: community/apps/setting/swagger_api/provide_api.py:57 +#: community/apps/setting/swagger_api/provide_api.py:58 +#: community/apps/setting/swagger_api/provide_api.py:79 +#: community/apps/setting/swagger_api/provide_api.py:80 +#: community/apps/setting/swagger_api/provide_api.py:105 +#: community/apps/setting/swagger_api/provide_api.py:129 +#: community/apps/setting/swagger_api/provide_api.py:160 +#: community/apps/setting/swagger_api/provide_api.py:179 +msgid "provider" +msgstr "Provider" + +#: community/apps/setting/serializers/provider_serializers.py:87 +#: community/apps/setting/serializers/provider_serializers.py:134 +#: community/apps/setting/serializers/provider_serializers.py:182 +msgid "permission type" +msgstr "Permission Type" + +#: community/apps/setting/serializers/provider_serializers.py:89 +msgid "create user" +msgstr "Create User" + +#: community/apps/setting/serializers/provider_serializers.py:138 +#: community/apps/setting/serializers/provider_serializers.py:186 +msgid "permissions only supportPUBLIC|PRIVATE" +msgstr "Permissions only support PUBLIC|PRIVATE" + +#: community/apps/setting/serializers/provider_serializers.py:145 +#: community/apps/setting/serializers/provider_serializers.py:196 +msgid "certification information" +msgstr "Certification information" + +#: community/apps/setting/serializers/provider_serializers.py:193 +msgid "parameter configuration" +msgstr "Parameter configuration" + +#: community/apps/setting/serializers/provider_serializers.py:202 +#, python-brace-format +msgid "Model name【{model_name}】already exists" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:29 +#: community/apps/setting/swagger_api/system_setting.py:25 +#: community/apps/setting/swagger_api/system_setting.py:26 +#: community/apps/setting/swagger_api/system_setting.py:57 +#: community/apps/setting/swagger_api/system_setting.py:58 +msgid "SMTP host" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:30 +#: community/apps/setting/swagger_api/system_setting.py:28 +#: community/apps/setting/swagger_api/system_setting.py:29 +#: community/apps/setting/swagger_api/system_setting.py:60 +#: community/apps/setting/swagger_api/system_setting.py:61 +msgid "SMTP port" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:31 +#: community/apps/setting/serializers/system_setting.py:35 +#: community/apps/setting/swagger_api/system_setting.py:31 +#: community/apps/setting/swagger_api/system_setting.py:32 +#: community/apps/setting/swagger_api/system_setting.py:43 +#: community/apps/setting/swagger_api/system_setting.py:44 +#: community/apps/setting/swagger_api/system_setting.py:63 +#: community/apps/setting/swagger_api/system_setting.py:64 +#: community/apps/setting/swagger_api/system_setting.py:75 +#: community/apps/setting/swagger_api/system_setting.py:76 +msgid "Sender's email" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:32 +#: community/apps/setting/swagger_api/system_setting.py:34 +#: community/apps/setting/swagger_api/system_setting.py:35 +#: community/apps/setting/swagger_api/system_setting.py:66 +#: community/apps/setting/swagger_api/system_setting.py:67 +#: community/apps/users/serializers/user_serializers.py:72 +#: community/apps/users/serializers/user_serializers.py:112 +#: community/apps/users/serializers/user_serializers.py:143 +#: community/apps/users/serializers/user_serializers.py:211 +#: community/apps/users/serializers/user_serializers.py:293 +#: community/apps/users/serializers/user_serializers.py:346 +#: community/apps/users/serializers/user_serializers.py:671 +#: community/apps/users/serializers/user_serializers.py:703 +#: community/apps/users/serializers/user_serializers.py:704 +#: community/apps/users/serializers/user_serializers.py:743 +#: community/apps/users/serializers/user_serializers.py:763 +#: community/apps/users/serializers/user_serializers.py:764 +#: community/apps/users/views/user.py:109 +#: community/apps/users/views/user.py:110 +#: community/apps/users/views/user.py:111 +#: community/apps/users/views/user.py:112 +msgid "Password" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:33 +#: community/apps/setting/swagger_api/system_setting.py:37 +#: community/apps/setting/swagger_api/system_setting.py:38 +#: community/apps/setting/swagger_api/system_setting.py:69 +#: community/apps/setting/swagger_api/system_setting.py:70 +msgid "Whether to enable TLS" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:34 +#: community/apps/setting/swagger_api/system_setting.py:40 +#: community/apps/setting/swagger_api/system_setting.py:41 +#: community/apps/setting/swagger_api/system_setting.py:72 +#: community/apps/setting/swagger_api/system_setting.py:73 +msgid "Whether to enable SSL" +msgstr "" + +#: community/apps/setting/serializers/system_setting.py:49 +msgid "Email verification failed" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:43 +#: community/apps/users/serializers/user_serializers.py:70 +#: community/apps/users/serializers/user_serializers.py:111 +#: community/apps/users/serializers/user_serializers.py:136 +#: community/apps/users/serializers/user_serializers.py:209 +#: community/apps/users/serializers/user_serializers.py:470 +#: community/apps/users/serializers/user_serializers.py:493 +#: community/apps/users/serializers/user_serializers.py:518 +#: community/apps/users/serializers/user_serializers.py:519 +#: community/apps/users/serializers/user_serializers.py:581 +#: community/apps/users/serializers/user_serializers.py:627 +#: community/apps/users/serializers/user_serializers.py:628 +#: community/apps/users/serializers/user_serializers.py:663 +#: community/apps/users/serializers/user_serializers.py:700 +#: community/apps/users/serializers/user_serializers.py:701 +msgid "Username" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:44 +#: community/apps/users/serializers/user_serializers.py:131 +#: community/apps/users/serializers/user_serializers.py:210 +#: community/apps/users/serializers/user_serializers.py:226 +#: community/apps/users/serializers/user_serializers.py:256 +#: community/apps/users/serializers/user_serializers.py:287 +#: community/apps/users/serializers/user_serializers.py:343 +#: community/apps/users/serializers/user_serializers.py:356 +#: community/apps/users/serializers/user_serializers.py:438 +#: community/apps/users/serializers/user_serializers.py:471 +#: community/apps/users/serializers/user_serializers.py:494 +#: community/apps/users/serializers/user_serializers.py:520 +#: community/apps/users/serializers/user_serializers.py:582 +#: community/apps/users/serializers/user_serializers.py:629 +#: community/apps/users/serializers/user_serializers.py:658 +#: community/apps/users/serializers/user_serializers.py:702 +#: community/apps/users/serializers/user_serializers.py:713 +#: community/apps/users/serializers/user_serializers.py:734 +msgid "Email" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:47 +#: community/apps/setting/serializers/team_serializers.py:148 +#: community/apps/setting/serializers/team_serializers.py:256 +msgid "team id" +msgstr "Team ID" + +#: community/apps/setting/serializers/team_serializers.py:48 +#: community/apps/setting/serializers/team_serializers.py:254 +#: community/apps/setting/serializers/team_serializers.py:324 +msgid "member id" +msgstr "Member ID" + +#: community/apps/setting/serializers/team_serializers.py:54 +msgid "use" +msgstr "Use" + +#: community/apps/setting/serializers/team_serializers.py:55 +msgid "manage" +msgstr "Manage" + +#: community/apps/setting/serializers/team_serializers.py:60 +msgid "Operation permissions USE, MANAGE permissions" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:63 +msgid "use permission" +msgstr "Use permission" + +#: community/apps/setting/serializers/team_serializers.py:64 +msgid "use permission True|False" +msgstr "Use permission True|False" + +#: community/apps/setting/serializers/team_serializers.py:66 +msgid "manage permission" +msgstr "Manage permission" + +#: community/apps/setting/serializers/team_serializers.py:67 +msgid "manage permission True|False" +msgstr "Manage permission True|False" + +#: community/apps/setting/serializers/team_serializers.py:73 +msgid "target id" +msgstr "Target ID" + +#: community/apps/setting/serializers/team_serializers.py:82 +#: community/apps/setting/serializers/team_serializers.py:83 +msgid "dataset id/application id" +msgstr "Dataset ID/Application ID" + +#: community/apps/setting/serializers/team_serializers.py:105 +msgid "Non-existent application|knowledge base id[" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:139 +#: community/apps/setting/serializers/team_serializers.py:140 +msgid "Permission data" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:157 +#: community/apps/setting/serializers/team_serializers.py:158 +msgid "user id list" +msgstr "User ID list" + +#: community/apps/setting/serializers/team_serializers.py:168 +#: community/apps/setting/serializers/team_serializers.py:169 +msgid "Username or email" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:217 +msgid "Username or email is required" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:221 +#: community/apps/users/serializers/user_serializers.py:800 +msgid "User does not exist" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:224 +msgid "The current members already exist in the team, do not add them again." +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:248 +msgid "member list" +msgstr "Member List" + +#: community/apps/setting/serializers/team_serializers.py:263 +msgid "The member does not exist, please add a member first" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:297 +msgid "Administrator rights do not allow modification" +msgstr "" + +#: community/apps/setting/serializers/team_serializers.py:311 +msgid "Unable to remove team admin" +msgstr "" + +#: community/apps/setting/serializers/valid_serializers.py:32 +#: community/apps/users/serializers/user_serializers.py:190 +#: community/apps/users/serializers/user_serializers.py:777 +msgid "" +"The community version supports up to 2 users. If you need more users, please " +"contact us (https://fit2cloud.com/)." +msgstr "" + +#: community/apps/setting/serializers/valid_serializers.py:41 +#: community/apps/setting/swagger_api/valid_api.py:27 +msgid "check quantity" +msgstr "Check quantity" + +#: community/apps/setting/swagger_api/provide_api.py:43 +#: community/apps/setting/swagger_api/provide_api.py:44 +#: community/apps/setting/swagger_api/provide_api.py:71 +#: community/apps/setting/swagger_api/provide_api.py:72 +#: community/apps/setting/swagger_api/provide_api.py:190 +#: community/apps/setting/swagger_api/provide_api.py:191 +msgid "parameters required to call the function" +msgstr "Parameters required to call the function" + +#: community/apps/setting/swagger_api/provide_api.py:60 +#: community/apps/setting/swagger_api/provide_api.py:61 +#: community/apps/setting/swagger_api/provide_api.py:90 +#: community/apps/setting/swagger_api/provide_api.py:91 +msgid "model certificate information" +msgstr "Model certificate information" + +#: community/apps/setting/swagger_api/provide_api.py:114 +#: community/apps/setting/swagger_api/provide_api.py:115 +msgid "model type description" +msgstr "Model type description" + +#: community/apps/setting/swagger_api/provide_api.py:115 +msgid "large language model" +msgstr "LLM" + +#: community/apps/setting/swagger_api/provide_api.py:116 +#: community/apps/setting/swagger_api/provide_api.py:117 +#: community/apps/setting/swagger_api/provide_api.py:147 +#: community/apps/setting/swagger_api/provide_api.py:148 +msgid "model type value" +msgstr "Model type value" + +#: community/apps/setting/swagger_api/provide_api.py:145 +#: community/apps/setting/swagger_api/provide_api.py:146 +msgid "model description" +msgstr "Model description" + +#: community/apps/setting/swagger_api/provide_api.py:184 +msgid "function that needs to be executed" +msgstr "Function that needs to be executed" + +#: community/apps/setting/swagger_api/system_setting.py:19 +#: community/apps/setting/swagger_api/system_setting.py:20 +#: community/apps/setting/swagger_api/system_setting.py:51 +#: community/apps/setting/swagger_api/system_setting.py:52 +msgid "Email related parameters" +msgstr "" + +#: community/apps/setting/swagger_api/valid_api.py:22 +msgid "Verification type: application|dataset|user" +msgstr "" + +#: community/apps/setting/views/Team.py:27 +#: community/apps/setting/views/Team.py:28 +msgid "Get a list of team members" +msgstr "" + +#: community/apps/setting/views/Team.py:30 +#: community/apps/setting/views/Team.py:40 +#: community/apps/setting/views/Team.py:54 +#: community/apps/setting/views/Team.py:68 +#: community/apps/setting/views/Team.py:80 +#: community/apps/setting/views/Team.py:92 +#: community/apps/users/serializers/user_serializers.py:198 +#: community/apps/users/serializers/user_serializers.py:791 +msgid "team" +msgstr "Team" + +#: community/apps/setting/views/Team.py:37 +#: community/apps/setting/views/Team.py:38 +msgid "Add member" +msgstr "" + +#: community/apps/setting/views/Team.py:51 +#: community/apps/setting/views/Team.py:52 +msgid "Add members in batches" +msgstr "" + +#: community/apps/setting/views/Team.py:65 +#: community/apps/setting/views/Team.py:66 +msgid "Get team member permissions" +msgstr "" + +#: community/apps/setting/views/Team.py:76 +#: community/apps/setting/views/Team.py:77 +msgid "Update team member permissions" +msgstr "" + +#: community/apps/setting/views/Team.py:89 +#: community/apps/setting/views/Team.py:90 +msgid "Remove member" +msgstr "" + +#: community/apps/setting/views/model.py:30 +#: community/apps/setting/views/model.py:31 +msgid "Create model" +msgstr "" + +#: community/apps/setting/views/model.py:33 +#: community/apps/setting/views/model.py:45 +#: community/apps/setting/views/model.py:57 +#: community/apps/setting/views/model.py:74 +#: community/apps/setting/views/model.py:88 +#: community/apps/setting/views/model.py:103 +#: community/apps/setting/views/model.py:114 +#: community/apps/setting/views/model.py:129 +#: community/apps/setting/views/model.py:141 +#: community/apps/setting/views/model.py:151 +#: community/apps/setting/views/model.py:170 +#: community/apps/setting/views/model.py:180 +#: community/apps/setting/views/model.py:204 +#: community/apps/setting/views/model.py:219 +#: community/apps/setting/views/model.py:239 +#: community/apps/setting/views/model.py:257 +#: community/apps/setting/views/model_apply.py:26 +#: community/apps/setting/views/model_apply.py:36 +#: community/apps/setting/views/model_apply.py:46 +msgid "model" +msgstr "Model Settings" + +#: community/apps/setting/views/model.py:42 +#: community/apps/setting/views/model.py:43 +msgid "Download model, trial only with Ollama platform" +msgstr "" + +#: community/apps/setting/views/model.py:54 +#: community/apps/setting/views/model.py:55 +msgid "Get model list" +msgstr "" + +#: community/apps/setting/views/model.py:71 +#: community/apps/setting/views/model.py:73 +msgid "" +"Query model meta information, this interface does not carry authentication " +"information" +msgstr "" + +#: community/apps/setting/views/model.py:86 +#: community/apps/setting/views/model.py:87 +msgid "Pause model download" +msgstr "" + +#: community/apps/setting/views/model.py:111 +#: community/apps/setting/views/model.py:112 +msgid "Save model parameter form" +msgstr "" + +#: community/apps/setting/views/model.py:126 +#: community/apps/setting/views/model.py:127 +msgid "Update model" +msgstr "" + +#: community/apps/setting/views/model.py:138 +#: community/apps/setting/views/model.py:139 +msgid "Delete model" +msgstr "" + +#: community/apps/setting/views/model.py:149 +#: community/apps/setting/views/model.py:150 +msgid "Query model details" +msgstr "" + +#: community/apps/setting/views/model.py:166 +#: community/apps/setting/views/model.py:167 +msgid "Call the supplier function to obtain form data" +msgstr "" + +#: community/apps/setting/views/model.py:178 +#: community/apps/setting/views/model.py:179 +msgid "Get a list of model suppliers" +msgstr "" + +#: community/apps/setting/views/model.py:200 +#: community/apps/setting/views/model.py:201 +msgid "Get a list of model types" +msgstr "" + +#: community/apps/setting/views/model.py:215 +#: community/apps/setting/views/model.py:216 +#: community/apps/setting/views/model.py:236 +#: community/apps/setting/views/model.py:254 +#: community/apps/setting/views/model.py:255 +msgid "Get the model creation form" +msgstr "" + +#: community/apps/setting/views/model.py:235 +msgid "Get model default parameters" +msgstr "" + +#: community/apps/setting/views/model_apply.py:23 +#: community/apps/setting/views/model_apply.py:24 +#: community/apps/setting/views/model_apply.py:33 +#: community/apps/setting/views/model_apply.py:34 +msgid "Vectorization documentation" +msgstr "" + +#: community/apps/setting/views/model_apply.py:43 +#: community/apps/setting/views/model_apply.py:44 +msgid "Reorder documents" +msgstr "" + +#: community/apps/setting/views/system_setting.py:29 +#: community/apps/setting/views/system_setting.py:30 +msgid "Create or update email settings" +msgstr "" + +#: community/apps/setting/views/system_setting.py:31 +#: community/apps/setting/views/system_setting.py:45 +#: community/apps/setting/views/system_setting.py:57 +msgid "Email settings" +msgstr "" + +#: community/apps/setting/views/system_setting.py:41 +#: community/apps/setting/views/system_setting.py:42 +msgid "Test email settings" +msgstr "" + +#: community/apps/setting/views/system_setting.py:54 +#: community/apps/setting/views/system_setting.py:55 +msgid "Get email settings" +msgstr "" + +#: community/apps/setting/views/valid.py:26 +#: community/apps/setting/views/valid.py:27 +msgid "Get verification results" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:62 +#: community/apps/users/serializers/user_serializers.py:63 +msgid "System version number" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:141 +#: community/apps/users/serializers/user_serializers.py:669 +msgid "Username must be 6-20 characters long" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:148 +#: community/apps/users/serializers/user_serializers.py:156 +#: community/apps/users/serializers/user_serializers.py:676 +#: community/apps/users/serializers/user_serializers.py:748 +msgid "" +"The password must be 6-20 characters long and must be a combination of " +"letters, numbers, and special characters." +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:151 +#: community/apps/users/serializers/user_serializers.py:212 +#: community/apps/users/serializers/user_serializers.py:213 +#: community/apps/users/serializers/user_serializers.py:300 +#: community/apps/users/serializers/user_serializers.py:347 +#: community/apps/users/serializers/user_serializers.py:348 +#: community/apps/users/serializers/user_serializers.py:749 +#: community/apps/users/serializers/user_serializers.py:765 +#: community/apps/users/serializers/user_serializers.py:766 +msgid "Confirm Password" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:158 +#: community/apps/users/serializers/user_serializers.py:214 +#: community/apps/users/serializers/user_serializers.py:215 +#: community/apps/users/serializers/user_serializers.py:229 +#: community/apps/users/serializers/user_serializers.py:257 +#: community/apps/users/serializers/user_serializers.py:258 +#: community/apps/users/serializers/user_serializers.py:291 +#: community/apps/users/serializers/user_serializers.py:344 +#: community/apps/users/serializers/user_serializers.py:345 +#: community/apps/users/views/user.py:107 +#: community/apps/users/views/user.py:108 +msgid "Verification code" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:232 +#: community/apps/users/serializers/user_serializers.py:259 +#: community/apps/users/serializers/user_serializers.py:360 +#: community/apps/users/serializers/user_serializers.py:439 +msgid "Type" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:236 +#: community/apps/users/serializers/user_serializers.py:362 +msgid "The type only supports register|reset_password" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:266 +msgid "Is it successful" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:268 +msgid "Error message" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:280 +msgid "language only support:" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:298 +#: community/apps/users/serializers/user_serializers.py:305 +#: community/apps/users/serializers/user_serializers.py:754 +msgid "" +"The confirmation password must be 6-20 characters long and must be a " +"combination of letters, numbers, and special characters." +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:380 +#, python-brace-format +msgid "Do not send emails again within {seconds} seconds" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:410 +msgid "" +"The email service has not been set up. Please contact the administrator to " +"set up the email service in [Email Settings]." +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:421 +#, python-brace-format +msgid "【Intelligent knowledge base question and answer system-{action}】" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:422 +#: community/apps/users/views/user.py:194 +#: community/apps/users/views/user.py:195 +msgid "User registration" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:422 +#: community/apps/users/views/user.py:212 +#: community/apps/users/views/user.py:213 +#: community/apps/users/views/user.py:301 +#: community/apps/users/views/user.py:302 +msgid "Change password" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:474 +#: community/apps/users/serializers/user_serializers.py:475 +msgid "Permissions" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:509 +#: community/apps/users/serializers/user_serializers.py:610 +#: community/apps/users/serializers/user_serializers.py:618 +msgid "Email or username" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:560 +msgid "All" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:561 +msgid "Me" +msgstr "Mine" + +#: community/apps/users/serializers/user_serializers.py:583 +#: community/apps/users/serializers/user_serializers.py:680 +#: community/apps/users/serializers/user_serializers.py:705 +#: community/apps/users/serializers/user_serializers.py:719 +#: community/apps/users/serializers/user_serializers.py:736 +msgid "Phone" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:587 +msgid "Source" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:588 +#: community/apps/users/serializers/user_serializers.py:678 +#: community/apps/users/serializers/user_serializers.py:706 +#: community/apps/users/serializers/user_serializers.py:717 +#: community/apps/users/serializers/user_serializers.py:735 +msgid "Name" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:727 +msgid "Email is already in use" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:808 +msgid "Unable to delete administrator" +msgstr "" + +#: community/apps/users/serializers/user_serializers.py:845 +msgid "Cannot modify administrator status" +msgstr "" + +#: community/apps/users/views/user.py:37 community/apps/users/views/user.py:38 +msgid "Get MaxKB related information" +msgstr "" + +#: community/apps/users/views/user.py:40 +msgid "System parameters" +msgstr "" + +#: community/apps/users/views/user.py:50 community/apps/users/views/user.py:51 +msgid "Get current user information" +msgstr "" + +#: community/apps/users/views/user.py:63 community/apps/users/views/user.py:64 +msgid "Get user list" +msgstr "" + +#: community/apps/users/views/user.py:67 community/apps/users/views/user.py:90 +#: community/apps/users/views/user.py:116 +#: community/apps/users/views/user.py:136 +#: community/apps/users/views/user.py:152 +#: community/apps/users/views/user.py:178 +#: community/apps/users/views/user.py:199 +#: community/apps/users/views/user.py:217 +#: community/apps/users/views/user.py:234 +#: community/apps/users/views/user.py:249 +#: community/apps/users/views/user.py:373 +msgid "User" +msgstr "" + +#: community/apps/users/views/user.py:79 community/apps/users/views/user.py:80 +msgid "Switch Language" +msgstr "" + +#: community/apps/users/views/user.py:101 +#: community/apps/users/views/user.py:102 +msgid "Modify current user password" +msgstr "" + +#: community/apps/users/views/user.py:125 +msgid "Failed to change password" +msgstr "" + +#: community/apps/users/views/user.py:133 +#: community/apps/users/views/user.py:134 +msgid "Send email to current user" +msgstr "" + +#: community/apps/users/views/user.py:149 +#: community/apps/users/views/user.py:150 +msgid "Sign out" +msgstr "" + +#: community/apps/users/views/user.py:205 +msgid "Registration successful" +msgstr "" + +#: community/apps/users/views/user.py:229 +#: community/apps/users/views/user.py:230 +msgid "Check whether the verification code is correct" +msgstr "" + +#: community/apps/users/views/user.py:244 +#: community/apps/users/views/user.py:245 +msgid "Send email" +msgstr "" + +#: community/apps/users/views/user.py:262 +#: community/apps/users/views/user.py:263 +msgid "Add user" +msgstr "" + +#: community/apps/users/views/user.py:266 +#: community/apps/users/views/user.py:282 +#: community/apps/users/views/user.py:306 +#: community/apps/users/views/user.py:324 +#: community/apps/users/views/user.py:338 +#: community/apps/users/views/user.py:354 +msgid "User management" +msgstr "" + +#: community/apps/users/views/user.py:280 +#: community/apps/users/views/user.py:281 +msgid "Get user paginated list" +msgstr "" + +#: community/apps/users/views/user.py:320 +#: community/apps/users/views/user.py:321 +msgid "Delete user" +msgstr "" + +#: community/apps/users/views/user.py:334 +#: community/apps/users/views/user.py:335 +msgid "Get user information" +msgstr "" + +#: community/apps/users/views/user.py:349 +#: community/apps/users/views/user.py:350 +msgid "Update user information" +msgstr "" + +#: community/apps/users/views/user.py:369 +#: community/apps/users/views/user.py:370 +msgid "Get user list by type" +msgstr "" + +msgid "Fail" +msgstr "Fail" + +msgid "Menu" +msgstr "Operate menu" + +msgid "Operate" +msgstr "Operate" + +msgid "Operate user" +msgstr "Operate user" + +msgid "Ip Address" +msgstr "IP Address" + +msgid "API Details" +msgstr "API Details" + +msgid "Operate Time" +msgstr "Operate time" + +msgid "System Settings/API Key" +msgstr "System API Key" + +msgid "Appearance Settings" +msgstr "Appearance Settings" + +msgid "Conversation Log" +msgstr "" + +msgid "login authentication" +msgstr "Login Authentication" + +msgid "Paragraph" +msgstr "" + +msgid "Batch generate related" +msgstr "Paragraph generate related problems" + +msgid "Application access" +msgstr "" + +msgid "Add internal function" +msgstr "" + +msgid "Batch generate related documents" +msgstr "Batch generate related problems" + +msgid "No permission to use this function {name}" +msgstr "" + +msgid "Function {name} is unavailable" +msgstr "" + +msgid "Field: {name} Type: {_type} Value: {value} Type error" +msgstr "" + +msgid "Field: {name} Type: {_type} Value: {value} Unsupported types" +msgstr "" + +msgid "Field: {name} No value set" +msgstr "" + +msgid "Generate related" +msgstr "" + +msgid "Obtain graphical captcha" +msgstr "" + +msgid "Captcha code error or expiration" +msgstr "" + +msgid "captcha" +msgstr "" \ No newline at end of file diff --git a/apps/locales/zh_CN/LC_MESSAGES/django.po b/apps/locales/zh_CN/LC_MESSAGES/django.po new file mode 100644 index 00000000000..346fd5e47ad --- /dev/null +++ b/apps/locales/zh_CN/LC_MESSAGES/django.po @@ -0,0 +1,7665 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-03-20 14:18+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: apps/xpack/auth/user_key.py:26 +#: apps/xpack/serializers/license_serializers.py:96 +#: apps/xpack/serializers/license_tools.py:109 +msgid "The license is invalid" +msgstr "License 无效" + +#: apps/xpack/auth/user_key.py:32 apps/xpack/auth/user_key.py:34 +msgid "secret_key is invalid" +msgstr "secret key无效" + +#: apps/xpack/middleware/swagger_middleware.py:19 +msgid "The license has not been uploaded or the license has expired" +msgstr "License 未上传或 License 已过期" + +#: apps/xpack/serializers/application_setting_serializer.py:20 +msgid "theme color" +msgstr "主题颜色" + +#: apps/xpack/serializers/application_setting_serializer.py:22 +msgid "header font color" +msgstr "头部字体颜色" + +#: apps/xpack/serializers/application_setting_serializer.py:26 +msgid "float location type" +msgstr "浮窗位置类型" + +#: apps/xpack/serializers/application_setting_serializer.py:27 +msgid "float location value" +msgstr "浮窗位置值" + +#: apps/xpack/serializers/application_setting_serializer.py:31 +msgid "float location x" +msgstr "浮窗位置 x" + +#: apps/xpack/serializers/application_setting_serializer.py:32 +msgid "float location y" +msgstr "浮窗位置 y" + +#: apps/xpack/serializers/application_setting_serializer.py:36 +#: apps/xpack/swagger_api/application_setting_api.py:23 +msgid "show source" +msgstr "是否显示来源" + +#: apps/xpack/serializers/application_setting_serializer.py:37 +#: community/apps/application/serializers/application_serializers.py:354 +#: community/apps/application/swagger_api/application_api.py:169 +#: community/apps/application/swagger_api/application_api.py:170 +#: community/apps/users/serializers/user_serializers.py:273 +#: community/apps/users/views/user.py:85 community/apps/users/views/user.py:86 +msgid "language" +msgstr "语言" + +#: apps/xpack/serializers/application_setting_serializer.py:38 +#: apps/xpack/swagger_api/application_setting_api.py:30 +msgid "show history" +msgstr "是否显示历史记录" + +#: apps/xpack/serializers/application_setting_serializer.py:39 +#: apps/xpack/swagger_api/application_setting_api.py:37 +msgid "draggable" +msgstr "是否可拖动" + +#: apps/xpack/serializers/application_setting_serializer.py:40 +#: apps/xpack/swagger_api/application_setting_api.py:44 +msgid "show guide" +msgstr "是否显示引导图" + +#: apps/xpack/serializers/application_setting_serializer.py:41 +#: apps/xpack/swagger_api/application_setting_api.py:51 +msgid "avatar" +msgstr "头像" + +#: apps/xpack/serializers/application_setting_serializer.py:42 +msgid "avatar url" +msgstr "头像地址" + +#: apps/xpack/serializers/application_setting_serializer.py:43 +#: apps/xpack/swagger_api/application_setting_api.py:86 +msgid "user avatar" +msgstr "用户头像" + +#: apps/xpack/serializers/application_setting_serializer.py:44 +msgid "user avatar url" +msgstr "用户头像地址" + +#: apps/xpack/serializers/application_setting_serializer.py:45 +#: apps/xpack/swagger_api/application_setting_api.py:58 +msgid "float icon" +msgstr "浮窗图标" + +#: apps/xpack/serializers/application_setting_serializer.py:46 +msgid "float icon url" +msgstr "浮窗图标地址" + +#: apps/xpack/serializers/application_setting_serializer.py:47 +#: apps/xpack/swagger_api/application_setting_api.py:65 +msgid "disclaimer" +msgstr "免责声明" + +#: apps/xpack/serializers/application_setting_serializer.py:48 +#: apps/xpack/swagger_api/application_setting_api.py:72 +msgid "disclaimer value" +msgstr "免责声明的值" + +#: apps/xpack/serializers/application_setting_serializer.py:70 +#: apps/xpack/serializers/dataset_lark_serializer.py:373 +#: community/apps/dataset/serializers/dataset_serializers.py:548 +msgid "application id" +msgstr "应用 id" + +#: apps/xpack/serializers/application_setting_serializer.py:96 +#: apps/xpack/serializers/platform_serializer.py:83 +#: apps/xpack/serializers/platform_serializer.py:105 +#: apps/xpack/serializers/platform_serializer.py:174 +#: apps/xpack/serializers/platform_serializer.py:185 +#: community/apps/application/serializers/application_serializers.py:1237 +#: community/apps/application/serializers/chat_message_serializers.py:424 +#: community/apps/application/serializers/chat_serializers.py:294 +#: community/apps/application/serializers/chat_serializers.py:396 +msgid "Application does not exist" +msgstr "应用不存在" + +#: apps/xpack/serializers/application_setting_serializer.py:116 +msgid "Float location field type error" +msgstr "浮窗位置字段类型错误" + +#: apps/xpack/serializers/application_setting_serializer.py:122 +msgid "Custom theme field type error" +msgstr "自定义主题字段类型错误" + +#: apps/xpack/serializers/auth_config_serializer.py:19 +msgid "LDAP server cannot be empty" +msgstr "LDAP 服务器不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:20 +msgid "Base DN cannot be empty" +msgstr "Base DN 不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:21 +msgid "Password cannot be empty" +msgstr "密码不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:22 +msgid "OU cannot be empty" +msgstr "OU 不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:23 +msgid "LDAP filter cannot be empty" +msgstr "LDAP 过滤器不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:24 +msgid "LDAP mapping cannot be empty" +msgstr "LDAP 映射不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:29 +msgid "Authorization address cannot be empty" +msgstr "授权地址不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:31 +msgid "Token address cannot be empty" +msgstr "令牌地址不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:33 +msgid "User information address cannot be empty" +msgstr "用户信息地址不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:34 +msgid "Scope cannot be empty" +msgstr "Scope 不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:35 +msgid "Client ID cannot be empty" +msgstr "Client ID 不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:36 +msgid "Client secret cannot be empty" +msgstr "Client secret 不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:38 +msgid "Redirect address cannot be empty" +msgstr "重定向地址不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:39 +msgid "Field mapping cannot be empty" +msgstr "字段映射不能为空" + +#: apps/xpack/serializers/auth_config_serializer.py:166 +#: apps/xpack/serializers/qr_login/qr_login.py:33 +#: community/apps/users/serializers/user_serializers.py:89 +msgid "The user has been disabled, please contact the administrator!" +msgstr "用户已被禁用,请联系管理员!" + +#: apps/xpack/serializers/cas.py:32 +msgid "HttpClient query failed: " +msgstr "HttpClient 查询失败:" + +#: apps/xpack/serializers/cas.py:56 +msgid "CAS authentication failed" +msgstr "CAS 认证失败" + +#: apps/xpack/serializers/channel/chat_manage.py:76 +#: apps/xpack/serializers/channel/chat_manage.py:134 +msgid "" +"Sorry, no relevant content was found. Please re-describe your problem or " +"provide more information. " +msgstr "抱歉,没有找到相关内容。请重新描述您的问题或提供更多信息。" + +#: apps/xpack/serializers/channel/chat_manage.py:82 +msgid "Think: " +msgstr "思考过程: " + +#: apps/xpack/serializers/channel/chat_manage.py:85 +#: apps/xpack/serializers/channel/chat_manage.py:87 +msgid "AI reply: " +msgstr "AI 回复: " + +#: apps/xpack/serializers/channel/chat_manage.py:298 +msgid "Thinking, please wait a moment!" +msgstr "正在思考中,请稍后......" + +#: apps/xpack/serializers/channel/ding_talk.py:19 +#: apps/xpack/serializers/channel/wechat.py:89 +#: apps/xpack/serializers/channel/wechat.py:130 +#: apps/xpack/serializers/channel/wecom.py:76 +#: apps/xpack/serializers/channel/wecom.py:259 +msgid "The corresponding platform configuration was not found" +msgstr "对应的平台配置未找到" + +#: apps/xpack/serializers/channel/ding_talk.py:27 +#: apps/xpack/serializers/channel/feishu.py:112 +msgid "Currently only text messages are supported" +msgstr "目前只支持文本消息" + +#: apps/xpack/serializers/channel/ding_talk.py:91 +#: apps/xpack/serializers/channel/wechat.py:161 +#: apps/xpack/serializers/channel/wecom.py:189 +msgid "Image download failed, check network" +msgstr "图片下载失败,请检查网络" + +#: apps/xpack/serializers/channel/ding_talk.py:92 +#: apps/xpack/serializers/channel/wechat.py:159 +#: apps/xpack/serializers/channel/wecom.py:185 +msgid "Please analyze the content of the image." +msgstr "请分析图片内容。" + +#: apps/xpack/serializers/channel/ding_talk.py:95 +#, python-brace-format +msgid "DingTalk application: {user}" +msgstr "钉钉应用:{user}" + +#: apps/xpack/serializers/channel/ding_talk.py:106 +#: apps/xpack/serializers/channel/ding_talk.py:151 +msgid "Content generated by AI" +msgstr "内容由 AI 生成" + +#: apps/xpack/serializers/channel/feishu.py:87 +#: apps/xpack/serializers/channel/feishu.py:107 +msgid "Lark application: " +msgstr "飞书应用:" + +#: apps/xpack/serializers/channel/slack.py:116 +#| msgid "The corresponding platform configuration was not found" +msgid "The corresponding platform configuration for Slack was not found" +msgstr "对应的平台配置未找到" + +#: apps/xpack/serializers/channel/slack.py:206 +msgid "Thinking..." +msgstr "思考中..." + +#: apps/xpack/serializers/channel/slack.py:321 +msgid "Invalid json format." +msgstr "json 格式无效。" + +#: apps/xpack/serializers/channel/slack.py:327 +#| msgid "Invalid access_token" +msgid "Invalid Slack request" +msgstr "无效的 Slack 请求" + +#: apps/xpack/serializers/channel/slack.py:335 +#| msgid "DingTalk application: {user}" +msgid "Slack application: {user}" +msgstr "Slack 应用:{user}" + +#: apps/xpack/serializers/channel/slack.py:471 +msgid "Stop" +msgstr "停止" + +#: apps/xpack/serializers/channel/wechat.py:141 +#, python-brace-format +msgid "WeChat Official Account: {account}" +msgstr "微信公众号:{account}" + +#: apps/xpack/serializers/channel/wechat.py:148 +#: apps/xpack/serializers/channel/wecom.py:171 +#: apps/xpack/serializers/channel/wecom.py:175 +msgid "" +"The app does not enable the speech-to-text function or the speech-to-text " +"function fails." +msgstr "应用未开启语音转文字功能或语音转文字功能失败。" + +#: apps/xpack/serializers/channel/wechat.py:187 +msgid "Message types not supported yet" +msgstr "暂时不支持该类型的消息" + +#: apps/xpack/serializers/channel/wechat.py:194 +msgid "Welcome to subscribe" +msgstr "欢迎订阅" + +#: apps/xpack/serializers/channel/wecom.py:84 +msgid "Enterprise WeChat user: " +msgstr "企业微信用户:" + +#: apps/xpack/serializers/channel/wecom.py:95 +msgid "Enterprise WeChat customer service: " +msgstr "企业微信客服:" + +#: apps/xpack/serializers/channel/wecom.py:132 +#: apps/xpack/serializers/channel/wecom.py:148 +msgid "This type of message is not supported yet" +msgstr "暂时不支持该类型的消息" + +#: apps/xpack/serializers/channel/wecom.py:254 +msgid "Signature missing" +msgstr "签名缺失" + +#: apps/xpack/serializers/channel/wecom.py:266 +#: apps/xpack/serializers/channel/wecom.py:273 +#, python-brace-format +msgid "An error occurred while processing the GET request {e}" +msgstr "GET 请求处理时发生错误 {e}" + +#: apps/xpack/serializers/dataset_lark_serializer.py:58 +#: community/apps/dataset/serializers/dataset_serializers.py:82 +#: community/apps/dataset/serializers/dataset_serializers.py:214 +#: community/apps/dataset/serializers/dataset_serializers.py:295 +#: community/apps/dataset/serializers/dataset_serializers.py:296 +#: community/apps/dataset/serializers/dataset_serializers.py:357 +#: community/apps/dataset/serializers/dataset_serializers.py:358 +#: community/apps/dataset/serializers/dataset_serializers.py:502 +#: community/apps/dataset/serializers/dataset_serializers.py:503 +#: community/apps/dataset/serializers/dataset_serializers.py:568 +#: community/apps/dataset/serializers/dataset_serializers.py:607 +#: community/apps/dataset/serializers/dataset_serializers.py:701 +#: community/apps/dataset/serializers/dataset_serializers.py:933 +#: community/apps/dataset/serializers/dataset_serializers.py:934 +#: community/apps/dataset/serializers/document_serializers.py:816 +#: community/apps/function_lib/serializers/function_lib_serializer.py:141 +#: community/apps/function_lib/serializers/function_lib_serializer.py:186 +#: community/apps/function_lib/serializers/function_lib_serializer.py:203 +#: community/apps/function_lib/serializers/function_lib_serializer.py:262 +#: community/apps/setting/serializers/provider_serializers.py:76 +#: community/apps/setting/serializers/provider_serializers.py:127 +#: community/apps/setting/serializers/provider_serializers.py:174 +#: community/apps/setting/serializers/provider_serializers.py:256 +#: community/apps/setting/serializers/provider_serializers.py:277 +#: community/apps/setting/serializers/provider_serializers.py:301 +#: community/apps/setting/serializers/team_serializers.py:42 +#: community/apps/users/serializers/user_serializers.py:272 +msgid "user id" +msgstr "用户 id" + +#: apps/xpack/serializers/dataset_lark_serializer.py:61 +#: apps/xpack/serializers/dataset_lark_serializer.py:112 +#: apps/xpack/serializers/dataset_lark_serializer.py:113 +#: apps/xpack/serializers/dataset_lark_serializer.py:367 +#: community/apps/dataset/serializers/dataset_serializers.py:137 +#: community/apps/dataset/serializers/dataset_serializers.py:201 +#: community/apps/dataset/serializers/dataset_serializers.py:221 +#: community/apps/dataset/serializers/dataset_serializers.py:244 +#: community/apps/dataset/serializers/dataset_serializers.py:273 +#: community/apps/dataset/serializers/dataset_serializers.py:274 +#: community/apps/dataset/serializers/dataset_serializers.py:291 +#: community/apps/dataset/serializers/dataset_serializers.py:292 +#: community/apps/dataset/serializers/dataset_serializers.py:319 +#: community/apps/dataset/serializers/dataset_serializers.py:353 +#: community/apps/dataset/serializers/dataset_serializers.py:354 +#: community/apps/dataset/serializers/dataset_serializers.py:382 +#: community/apps/dataset/serializers/dataset_serializers.py:383 +#: community/apps/dataset/serializers/dataset_serializers.py:498 +#: community/apps/dataset/serializers/dataset_serializers.py:499 +#: community/apps/dataset/serializers/dataset_serializers.py:527 +#: community/apps/dataset/serializers/dataset_serializers.py:528 +#: community/apps/dataset/serializers/dataset_serializers.py:542 +#: community/apps/dataset/serializers/dataset_serializers.py:907 +#: community/apps/dataset/serializers/dataset_serializers.py:908 +#: community/apps/dataset/serializers/dataset_serializers.py:929 +#: community/apps/dataset/serializers/dataset_serializers.py:930 +msgid "dataset name" +msgstr "知识库名称" + +#: apps/xpack/serializers/dataset_lark_serializer.py:63 +#: apps/xpack/serializers/dataset_lark_serializer.py:114 +#: apps/xpack/serializers/dataset_lark_serializer.py:115 +#: apps/xpack/serializers/dataset_lark_serializer.py:369 +#: community/apps/dataset/serializers/dataset_serializers.py:142 +#: community/apps/dataset/serializers/dataset_serializers.py:206 +#: community/apps/dataset/serializers/dataset_serializers.py:226 +#: community/apps/dataset/serializers/dataset_serializers.py:249 +#: community/apps/dataset/serializers/dataset_serializers.py:278 +#: community/apps/dataset/serializers/dataset_serializers.py:279 +#: community/apps/dataset/serializers/dataset_serializers.py:293 +#: community/apps/dataset/serializers/dataset_serializers.py:294 +#: community/apps/dataset/serializers/dataset_serializers.py:324 +#: community/apps/dataset/serializers/dataset_serializers.py:355 +#: community/apps/dataset/serializers/dataset_serializers.py:356 +#: community/apps/dataset/serializers/dataset_serializers.py:384 +#: community/apps/dataset/serializers/dataset_serializers.py:385 +#: community/apps/dataset/serializers/dataset_serializers.py:500 +#: community/apps/dataset/serializers/dataset_serializers.py:501 +#: community/apps/dataset/serializers/dataset_serializers.py:529 +#: community/apps/dataset/serializers/dataset_serializers.py:530 +#: community/apps/dataset/serializers/dataset_serializers.py:544 +#: community/apps/dataset/serializers/dataset_serializers.py:909 +#: community/apps/dataset/serializers/dataset_serializers.py:910 +#: community/apps/dataset/serializers/dataset_serializers.py:931 +#: community/apps/dataset/serializers/dataset_serializers.py:932 +msgid "dataset description" +msgstr "知识库描述" + +#: apps/xpack/serializers/dataset_lark_serializer.py:65 +#: apps/xpack/serializers/dataset_lark_serializer.py:118 +#: apps/xpack/serializers/dataset_lark_serializer.py:377 +msgid "app id" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:66 +#: apps/xpack/serializers/dataset_lark_serializer.py:119 +#: apps/xpack/serializers/dataset_lark_serializer.py:120 +#: apps/xpack/serializers/dataset_lark_serializer.py:378 +msgid "app secret" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:67 +#: apps/xpack/serializers/dataset_lark_serializer.py:121 +#: apps/xpack/serializers/dataset_lark_serializer.py:122 +#: apps/xpack/serializers/dataset_lark_serializer.py:132 +#: apps/xpack/serializers/dataset_lark_serializer.py:165 +#: apps/xpack/serializers/dataset_lark_serializer.py:379 +msgid "folder token" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:69 +#: apps/xpack/serializers/dataset_lark_serializer.py:116 +#: apps/xpack/serializers/dataset_lark_serializer.py:117 +#: community/apps/dataset/serializers/dataset_serializers.py:231 +#: community/apps/dataset/serializers/dataset_serializers.py:254 +#: community/apps/dataset/serializers/dataset_serializers.py:330 +#: community/apps/dataset/serializers/dataset_serializers.py:386 +#: community/apps/dataset/serializers/dataset_serializers.py:387 +#: community/apps/dataset/serializers/dataset_serializers.py:531 +#: community/apps/dataset/serializers/dataset_serializers.py:532 +msgid "embedding mode" +msgstr "向量模型" + +#: apps/xpack/serializers/dataset_lark_serializer.py:79 +#: apps/xpack/serializers/dataset_lark_serializer.py:389 +msgid "Network error or folder token error!" +msgstr "网络错误或文件夹 token 错误!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:87 +#: apps/xpack/serializers/dataset_lark_serializer.py:444 +#: community/apps/dataset/serializers/dataset_serializers.py:424 +#: community/apps/dataset/serializers/dataset_serializers.py:476 +#: community/apps/dataset/serializers/dataset_serializers.py:865 +msgid "Knowledge base name duplicate!" +msgstr "知识库名称重复!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:130 +#: apps/xpack/serializers/dataset_lark_serializer.py:164 +#: apps/xpack/serializers/dataset_lark_serializer.py:201 +#: apps/xpack/serializers/dataset_lark_serializer.py:221 +#: apps/xpack/serializers/dataset_lark_serializer.py:346 +#: apps/xpack/serializers/dataset_lark_serializer.py:363 +#: community/apps/common/swagger_api/common_api.py:68 +#: community/apps/common/swagger_api/common_api.py:69 +#: community/apps/dataset/serializers/dataset_serializers.py:84 +#: community/apps/dataset/serializers/dataset_serializers.py:93 +#: community/apps/dataset/serializers/dataset_serializers.py:605 +#: community/apps/dataset/serializers/dataset_serializers.py:688 +#: community/apps/dataset/serializers/dataset_serializers.py:699 +#: community/apps/dataset/serializers/dataset_serializers.py:955 +#: community/apps/dataset/serializers/document_serializers.py:169 +#: community/apps/dataset/serializers/document_serializers.py:286 +#: community/apps/dataset/serializers/document_serializers.py:407 +#: community/apps/dataset/serializers/document_serializers.py:573 +#: community/apps/dataset/serializers/document_serializers.py:1055 +#: community/apps/dataset/serializers/document_serializers.py:1216 +#: community/apps/dataset/serializers/paragraph_serializers.py:96 +#: community/apps/dataset/serializers/paragraph_serializers.py:162 +#: community/apps/dataset/serializers/paragraph_serializers.py:195 +#: community/apps/dataset/serializers/paragraph_serializers.py:196 +#: community/apps/dataset/serializers/paragraph_serializers.py:208 +#: community/apps/dataset/serializers/paragraph_serializers.py:266 +#: community/apps/dataset/serializers/paragraph_serializers.py:285 +#: community/apps/dataset/serializers/paragraph_serializers.py:302 +#: community/apps/dataset/serializers/paragraph_serializers.py:459 +#: community/apps/dataset/serializers/paragraph_serializers.py:567 +#: community/apps/dataset/serializers/paragraph_serializers.py:638 +#: community/apps/dataset/serializers/paragraph_serializers.py:647 +#: community/apps/dataset/serializers/paragraph_serializers.py:715 +#: community/apps/dataset/serializers/paragraph_serializers.py:716 +#: community/apps/dataset/serializers/paragraph_serializers.py:732 +#: community/apps/dataset/serializers/problem_serializers.py:87 +#: community/apps/dataset/serializers/problem_serializers.py:112 +#: community/apps/dataset/serializers/problem_serializers.py:135 +#: community/apps/dataset/serializers/problem_serializers.py:192 +#: community/apps/dataset/swagger_api/problem_api.py:28 +#: community/apps/dataset/swagger_api/problem_api.py:29 +#: community/apps/dataset/swagger_api/problem_api.py:77 +#: community/apps/dataset/swagger_api/problem_api.py:96 +#: community/apps/dataset/swagger_api/problem_api.py:149 +#: community/apps/dataset/swagger_api/problem_api.py:177 +msgid "dataset id" +msgstr "知识库 id" + +#: apps/xpack/serializers/dataset_lark_serializer.py:145 +#: apps/xpack/serializers/dataset_lark_serializer.py:146 +#: apps/xpack/serializers/dataset_lark_serializer.py:212 +#: community/apps/dataset/serializers/document_serializers.py:812 +#: community/apps/dataset/serializers/document_serializers.py:813 +#: community/apps/setting/swagger_api/provide_api.py:22 +#: community/apps/setting/swagger_api/provide_api.py:48 +#: community/apps/setting/swagger_api/provide_api.py:49 +#: community/apps/setting/swagger_api/provide_api.py:76 +#: community/apps/setting/swagger_api/provide_api.py:77 +#: community/apps/setting/swagger_api/provide_api.py:143 +#: community/apps/setting/swagger_api/provide_api.py:144 +msgid "name" +msgstr "名称" + +#: apps/xpack/serializers/dataset_lark_serializer.py:147 +#: apps/xpack/serializers/dataset_lark_serializer.py:148 +#: apps/xpack/serializers/dataset_lark_serializer.py:211 +#: community/apps/application/serializers/application_serializers.py:257 +msgid "token" +msgstr "token" + +#: apps/xpack/serializers/dataset_lark_serializer.py:149 +#: apps/xpack/serializers/dataset_lark_serializer.py:150 +#: apps/xpack/serializers/dataset_lark_serializer.py:210 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:26 +#: community/apps/dataset/serializers/document_serializers.py:229 +#: community/apps/function_lib/serializers/function_lib_serializer.py:72 +#: community/apps/function_lib/swagger_api/function_lib_api.py:92 +#: community/apps/function_lib/swagger_api/function_lib_api.py:138 +#: community/apps/function_lib/swagger_api/function_lib_api.py:184 +#: community/apps/setting/serializers/team_serializers.py:59 +#: community/apps/setting/serializers/team_serializers.py:74 +#: community/apps/setting/serializers/team_serializers.py:85 +#: community/apps/setting/serializers/valid_serializers.py:37 +msgid "type" +msgstr "类型" + +#: apps/xpack/serializers/dataset_lark_serializer.py:151 +#: apps/xpack/serializers/dataset_lark_serializer.py:152 +#| msgid "id does not exist" +msgid "is exist" +msgstr "ID 不存在" + +#: apps/xpack/serializers/dataset_lark_serializer.py:173 +#: apps/xpack/serializers/dataset_lark_serializer.py:230 +#: apps/xpack/task/sync.py:120 +#| msgid "Knowledge base id" +msgid "Knowledge base not found!" +msgstr "知识库未找到!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:185 +#: apps/xpack/serializers/dataset_lark_serializer.py:252 +msgid "Failed to get lark document list!" +msgstr "获取飞书文档列表失败!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:262 +#: community/apps/common/swagger_api/common_api.py:70 +#: community/apps/common/swagger_api/common_api.py:71 +#: community/apps/dataset/serializers/document_serializers.py:293 +#: community/apps/dataset/serializers/document_serializers.py:386 +#: community/apps/dataset/serializers/document_serializers.py:490 +#: community/apps/dataset/serializers/document_serializers.py:572 +#: community/apps/dataset/serializers/document_serializers.py:581 +#: community/apps/dataset/serializers/document_serializers.py:586 +#: community/apps/dataset/serializers/document_serializers.py:854 +#: community/apps/dataset/serializers/document_serializers.py:982 +#: community/apps/dataset/serializers/document_serializers.py:1191 +#: community/apps/dataset/serializers/paragraph_serializers.py:98 +#: community/apps/dataset/serializers/paragraph_serializers.py:167 +#: community/apps/dataset/serializers/paragraph_serializers.py:212 +#: community/apps/dataset/serializers/paragraph_serializers.py:271 +#: community/apps/dataset/serializers/paragraph_serializers.py:286 +#: community/apps/dataset/serializers/paragraph_serializers.py:303 +#: community/apps/dataset/serializers/paragraph_serializers.py:426 +#: community/apps/dataset/serializers/paragraph_serializers.py:431 +#: community/apps/dataset/serializers/paragraph_serializers.py:462 +#: community/apps/dataset/serializers/paragraph_serializers.py:570 +#: community/apps/dataset/serializers/paragraph_serializers.py:642 +#: community/apps/dataset/serializers/paragraph_serializers.py:650 +#: community/apps/dataset/serializers/paragraph_serializers.py:682 +#: community/apps/dataset/serializers/paragraph_serializers.py:717 +#: community/apps/dataset/serializers/paragraph_serializers.py:718 +#: community/apps/dataset/serializers/paragraph_serializers.py:733 +#: community/apps/dataset/serializers/problem_serializers.py:58 +#: community/apps/dataset/swagger_api/problem_api.py:64 +msgid "document id" +msgstr "文档 id" + +#: apps/xpack/serializers/dataset_lark_serializer.py:269 +#: apps/xpack/serializers/dataset_lark_serializer.py:289 +#: community/apps/dataset/serializers/document_serializers.py:497 +#: community/apps/dataset/serializers/document_serializers.py:593 +#: community/apps/dataset/serializers/document_serializers.py:1197 +msgid "document id not exist" +msgstr "文档 id 不存在" + +#: apps/xpack/serializers/dataset_lark_serializer.py:271 +#| msgid "Synchronization is only supported for web site types" +msgid "Synchronization is only supported for lark documents" +msgstr "只有飞书知识库类型才支持同步" + +#: apps/xpack/serializers/dataset_lark_serializer.py:374 +#: community/apps/dataset/serializers/dataset_serializers.py:549 +#: community/apps/dataset/serializers/dataset_serializers.py:914 +#: community/apps/dataset/serializers/dataset_serializers.py:915 +msgid "application id list" +msgstr "应用 id 列表" + +#: apps/xpack/serializers/dataset_lark_serializer.py:416 +#: community/apps/dataset/serializers/dataset_serializers.py:175 +#: community/apps/dataset/serializers/dataset_serializers.py:837 +#: community/apps/function_lib/serializers/function_lib_serializer.py:125 +#: community/apps/function_lib/swagger_api/function_lib_api.py:119 +#: community/apps/function_lib/swagger_api/function_lib_api.py:120 +#: community/apps/function_lib/swagger_api/function_lib_api.py:165 +#: community/apps/function_lib/swagger_api/function_lib_api.py:166 +#: community/apps/setting/swagger_api/provide_api.py:81 +msgid "permission" +msgstr "权限" + +#: apps/xpack/serializers/dataset_lark_serializer.py:463 +#: community/apps/dataset/serializers/dataset_serializers.py:884 +#, python-brace-format +msgid "Unknown application id {dataset_id}, cannot be associated" +msgstr "未知的应用id {dataset_id},无法关联" + +#: apps/xpack/serializers/license_serializers.py:52 +msgid "license file" +msgstr "License 文件" + +#: apps/xpack/serializers/license_tools.py:134 +msgid "License usage limit exceeded." +msgstr "超出许可证使用限制。" + +#: apps/xpack/serializers/license_tools.py:158 +msgid "The network is busy, try again later." +msgstr "网络繁忙,请稍后再试。" + +#: apps/xpack/serializers/oauth2.py:79 apps/xpack/serializers/oauth2.py:82 +msgid "Failed to obtain user information" +msgstr "获取用户信息失败" + +#: apps/xpack/serializers/operate_log.py:36 +#: community/apps/application/serializers/application_statistics_serializers.py:27 +#: community/apps/application/serializers/chat_serializers.py:116 +#: community/apps/application/swagger_api/application_statistics_api.py:26 +msgid "Start time" +msgstr "开始时间" + +#: apps/xpack/serializers/operate_log.py:37 +#: community/apps/application/serializers/application_statistics_serializers.py:28 +#: community/apps/application/serializers/chat_serializers.py:117 +#: community/apps/application/swagger_api/application_statistics_api.py:31 +#: community/apps/application/swagger_api/chat_api.py:270 +msgid "End time" +msgstr "结束时间" + +#: apps/xpack/serializers/operate_log.py:38 +#: apps/xpack/swagger_api/operate_log.py:17 +#: apps/xpack/swagger_api/operate_log.py:18 +#: apps/xpack/swagger_api/operate_log.py:45 +#: apps/xpack/swagger_api/operate_log.py:46 +msgid "menu" +msgstr "菜单" + +#: apps/xpack/serializers/operate_log.py:39 +#: apps/xpack/swagger_api/operate_log.py:20 +#: apps/xpack/swagger_api/operate_log.py:21 +#: apps/xpack/swagger_api/operate_log.py:48 +#: apps/xpack/swagger_api/operate_log.py:49 +#| msgid "Temperature" +msgid "operate" +msgstr "操作" + +#: apps/xpack/serializers/operate_log.py:40 +#: apps/xpack/swagger_api/operate_log.py:51 +#: apps/xpack/swagger_api/operate_log.py:52 +#| msgid "user id" +msgid "user" +msgstr "用户" + +#: apps/xpack/serializers/operate_log.py:41 +#: apps/xpack/swagger_api/operate_log.py:54 +#: apps/xpack/swagger_api/operate_log.py:55 +#: community/apps/dataset/serializers/document_serializers.py:417 +msgid "status" +msgstr "状态" + +#: apps/xpack/serializers/operate_log.py:42 +#: apps/xpack/swagger_api/operate_log.py:57 +#: apps/xpack/swagger_api/operate_log.py:58 +#| msgid "Forum address" +msgid "ip_address" +msgstr "IP 地址" + +#: apps/xpack/serializers/platform_serializer.py:14 +msgid "app_id is required" +msgstr "app_id 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:15 +msgid "app_secret is required" +msgstr "app_secret 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:16 +msgid "token is required" +msgstr "token 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:17 +msgid "callback_url is required" +msgstr "回调地址是必填项" + +#: apps/xpack/serializers/platform_serializer.py:23 +#: apps/xpack/serializers/platform_serializer.py:32 +msgid "App ID is required" +msgstr "App ID 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:24 +#: apps/xpack/serializers/platform_source_serializer.py:24 +msgid "Agent ID is required" +msgstr "Agent ID 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:25 +msgid "Secret is required" +msgstr "Secret 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:26 +msgid "Token is required" +msgstr "Token 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:28 +#: apps/xpack/serializers/platform_serializer.py:36 +#: apps/xpack/serializers/platform_serializer.py:42 +#: apps/xpack/serializers/platform_serializer.py:48 +#: apps/xpack/serializers/platform_source_serializer.py:19 +msgid "Callback URL is required" +msgstr "Callback URL 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:33 +#: apps/xpack/serializers/platform_source_serializer.py:18 +msgid "App Secret is required" +msgstr "App Secret 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:35 +msgid "Verification Token is required" +msgstr "Verification Token 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:40 +msgid "Client ID is required" +msgstr "Client ID 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:41 +msgid "Client Secret is required" +msgstr "Client Secret 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:46 +#| msgid "Client Secret is required" +msgid "Signing Secret is required" +msgstr "Signing Secret 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:47 +#| msgid "Token is required" +msgid "Bot User Token is required" +msgstr "Bot User Token 是必填项" + +#: apps/xpack/serializers/platform_serializer.py:68 +msgid "Check if the fields are correct" +msgstr "检查字段是否正确" + +#: apps/xpack/serializers/platform_serializer.py:114 +#: apps/xpack/views/platform.py:85 apps/xpack/views/platform.py:101 +#: community/apps/application/flow/step_node/application_node/i_application_node.py:13 +#: community/apps/application/serializers/application_serializers.py:335 +#: community/apps/application/serializers/application_serializers.py:581 +#: community/apps/application/serializers/application_serializers.py:696 +#: community/apps/application/serializers/application_serializers.py:791 +#: community/apps/application/serializers/application_serializers.py:1230 +#: community/apps/application/serializers/application_serializers.py:1272 +#: community/apps/application/serializers/application_statistics_serializers.py:26 +#: community/apps/application/serializers/application_version_serializers.py:35 +#: community/apps/application/serializers/application_version_serializers.py:59 +#: community/apps/application/serializers/chat_message_serializers.py:207 +#: community/apps/application/serializers/chat_message_serializers.py:270 +#: community/apps/application/serializers/chat_serializers.py:77 +#: community/apps/application/serializers/chat_serializers.py:102 +#: community/apps/application/serializers/chat_serializers.py:119 +#: community/apps/application/serializers/chat_serializers.py:287 +#: community/apps/application/serializers/chat_serializers.py:363 +#: community/apps/application/serializers/chat_serializers.py:440 +#: community/apps/application/swagger_api/application_api.py:87 +#: community/apps/application/swagger_api/application_api.py:101 +#: community/apps/application/swagger_api/application_api.py:112 +#: community/apps/application/swagger_api/application_api.py:143 +#: community/apps/application/swagger_api/application_api.py:392 +#: community/apps/application/swagger_api/application_api.py:413 +#: community/apps/application/swagger_api/application_api.py:424 +#: community/apps/application/swagger_api/application_statistics_api.py:21 +#: community/apps/application/swagger_api/application_version_api.py:42 +#: community/apps/application/swagger_api/application_version_api.py:56 +#: community/apps/application/swagger_api/chat_api.py:23 +#: community/apps/application/swagger_api/chat_api.py:33 +#: community/apps/application/swagger_api/chat_api.py:167 +#: community/apps/application/swagger_api/chat_api.py:168 +#: community/apps/application/swagger_api/chat_api.py:199 +#: community/apps/application/swagger_api/chat_api.py:222 +#: community/apps/application/swagger_api/chat_api.py:249 +#: community/apps/application/swagger_api/chat_api.py:281 +#: community/apps/application/swagger_api/chat_api.py:350 +#: community/apps/application/swagger_api/chat_api.py:410 +#: community/apps/application/swagger_api/chat_api.py:427 +#: community/apps/application/swagger_api/chat_api.py:460 +#: community/apps/application/views/chat_views.py:477 +msgid "Application ID" +msgstr "应用 ID" + +#: apps/xpack/serializers/platform_serializer.py:116 +msgid "Platform type, for example: wechat" +msgstr "平台类型,例如:wechat" + +#: apps/xpack/serializers/platform_serializer.py:125 +#: apps/xpack/serializers/platform_serializer.py:126 +msgid "Platform type" +msgstr "平台类型" + +#: apps/xpack/serializers/platform_serializer.py:128 +msgid "Status" +msgstr "状态" + +#: apps/xpack/serializers/platform_serializer.py:138 +#: apps/xpack/serializers/platform_serializer.py:139 +msgid "Configuration information" +msgstr "配置信息" + +#: apps/xpack/serializers/platform_serializer.py:191 +#, python-brace-format +msgid "The platform configuration corresponding to {type} was not found" +msgstr "平台配置 {type} 未找到" + +#: apps/xpack/serializers/platform_source_serializer.py:23 +#: apps/xpack/serializers/platform_source_serializer.py:32 +msgid "Corp ID is required" +msgstr "Corp ID 是必填项" + +#: apps/xpack/serializers/platform_source_serializer.py:28 +#: apps/xpack/serializers/platform_source_serializer.py:33 +msgid "App Key is required" +msgstr "App Key 是必填项" + +#: apps/xpack/serializers/platform_source_serializer.py:78 +msgid "Configuration information is wrong and failed to save" +msgstr "配置信息错误,保存失败" + +#: apps/xpack/serializers/platform_source_serializer.py:104 +msgid "Connection failed" +msgstr "连接失败" + +#: apps/xpack/serializers/platform_source_serializer.py:123 +msgid "Platform does not exist" +msgstr "平台不存在" + +#: apps/xpack/serializers/platform_source_serializer.py:134 +#| msgid "Unsupported file format" +msgid "Unsupported platform type" +msgstr "三方平台类型不支持" + +#: apps/xpack/serializers/qr_login/qr_login.py:28 +msgid "Team" +msgstr "团队成员" + +#: apps/xpack/serializers/system_params_serializers.py:63 +msgid "theme" +msgstr "主题" + +#: apps/xpack/serializers/system_params_serializers.py:70 +msgid "website icon" +msgstr "网站图标" + +#: apps/xpack/serializers/system_params_serializers.py:77 +msgid "login logo" +msgstr "登录logo" + +#: apps/xpack/serializers/system_params_serializers.py:84 +msgid "Login background image" +msgstr "登录背景图" + +#: apps/xpack/serializers/system_params_serializers.py:91 +msgid "website title" +msgstr "网站标题" + +#: apps/xpack/serializers/system_params_serializers.py:98 +msgid "website slogan" +msgstr "网站标语" + +#: apps/xpack/serializers/system_params_serializers.py:105 +msgid "Show user manual" +msgstr "是否显示用户手册" + +#: apps/xpack/serializers/system_params_serializers.py:112 +msgid "User manual address" +msgstr "用户手册地址" + +#: apps/xpack/serializers/system_params_serializers.py:119 +msgid "Show forum" +msgstr "是否显示论坛" + +#: apps/xpack/serializers/system_params_serializers.py:126 +msgid "Forum address" +msgstr "论坛地址" + +#: apps/xpack/serializers/system_params_serializers.py:133 +msgid "Show project" +msgstr "是否显示项目" + +#: apps/xpack/serializers/system_params_serializers.py:140 +msgid "Project address" +msgstr "项目地址" + +#: apps/xpack/serializers/tools.py:58 +#, python-brace-format +msgid "" +"Thinking about 【{question}】...If you want me to continue answering, please " +"reply {trigger_message}" +msgstr "" +"思考中【{question}】...如果您希望我继续回答,请回复“{trigger_message}”。" + +#: apps/xpack/serializers/tools.py:158 +msgid "" +"\n" +" ------------\n" +"[To be continued, reply \"Continue to answer the question]" +msgstr "" +"\n" +" ------------\n" +"【未完待续,回复“问题继续回答】" + +#: apps/xpack/serializers/tools.py:238 +#, python-brace-format +msgid "" +"To be continued, reply \"{trigger_message}\" to continue answering the " +"question" +msgstr "【未完待续,回复“{trigger_message}” 或 问题继续回答】" + +#: apps/xpack/swagger_api/application_setting_api.py:79 +msgid "Custom theme {theme_color: , header_font_color: }" +msgstr "自定义主题 {theme_color:, header_font_color: }" + +#: apps/xpack/swagger_api/application_setting_api.py:93 +msgid "Float location {top: 0, left: 0}" +msgstr "浮窗位置 {top: 0, left: 0}" + +#: apps/xpack/swagger_api/application_setting_api.py:101 +#: apps/xpack/swagger_api/application_setting_api.py:102 +#: apps/xpack/swagger_api/auth_api.py:10 apps/xpack/swagger_api/auth_api.py:11 +#: apps/xpack/swagger_api/auth_api.py:81 apps/xpack/swagger_api/auth_api.py:82 +msgid "Authentication configuration" +msgstr "认证配置" + +#: apps/xpack/swagger_api/application_setting_api.py:106 +#: apps/xpack/swagger_api/application_setting_api.py:107 +#: apps/xpack/swagger_api/auth_api.py:15 apps/xpack/swagger_api/auth_api.py:16 +#: apps/xpack/swagger_api/auth_api.py:30 apps/xpack/swagger_api/auth_api.py:87 +#: apps/xpack/swagger_api/auth_api.py:88 apps/xpack/views/auth.py:27 +#: apps/xpack/views/auth.py:28 +msgid "Authentication type" +msgstr "认证类型" + +#: apps/xpack/swagger_api/application_setting_api.py:109 +#: apps/xpack/swagger_api/application_setting_api.py:110 +#: apps/xpack/swagger_api/auth_api.py:18 apps/xpack/swagger_api/auth_api.py:19 +#: apps/xpack/swagger_api/auth_api.py:93 apps/xpack/swagger_api/auth_api.py:94 +msgid "Configuration" +msgstr "配置" + +#: apps/xpack/swagger_api/application_setting_api.py:112 +#: apps/xpack/swagger_api/application_setting_api.py:113 +#: apps/xpack/swagger_api/auth_api.py:21 apps/xpack/swagger_api/auth_api.py:22 +#: community/apps/common/swagger_api/common_api.py:72 +#: community/apps/common/swagger_api/common_api.py:73 +#: community/apps/dataset/serializers/document_serializers.py:819 +#: community/apps/dataset/serializers/document_serializers.py:820 +#: community/apps/dataset/serializers/document_serializers.py:838 +#: community/apps/dataset/serializers/document_serializers.py:839 +#: community/apps/dataset/serializers/paragraph_serializers.py:57 +#: community/apps/dataset/serializers/paragraph_serializers.py:71 +#: community/apps/dataset/serializers/paragraph_serializers.py:719 +#: community/apps/dataset/serializers/paragraph_serializers.py:720 +#: community/apps/dataset/swagger_api/problem_api.py:130 +#: community/apps/function_lib/serializers/function_lib_serializer.py:110 +#: community/apps/function_lib/serializers/function_lib_serializer.py:129 +#: community/apps/function_lib/serializers/function_lib_serializer.py:139 +#: community/apps/function_lib/swagger_api/function_lib_api.py:121 +#: community/apps/function_lib/swagger_api/function_lib_api.py:122 +#: community/apps/function_lib/swagger_api/function_lib_api.py:167 +#: community/apps/function_lib/swagger_api/function_lib_api.py:168 +#: community/apps/setting/serializers/team_serializers.py:46 +#: community/apps/users/serializers/user_serializers.py:473 +#: community/apps/users/serializers/user_serializers.py:496 +#: community/apps/users/serializers/user_serializers.py:584 +#: community/apps/users/serializers/user_serializers.py:585 +#: community/apps/users/serializers/user_serializers.py:721 +#: community/apps/users/serializers/user_serializers.py:737 +#: community/apps/users/serializers/user_serializers.py:738 +msgid "Is active" +msgstr "是否可用" + +#: apps/xpack/swagger_api/auth_api.py:37 +#| msgid "Form Configuration" +msgid "Wecom configuration" +msgstr "企业微信配置" + +#: apps/xpack/swagger_api/auth_api.py:38 +#| msgid "Get function details" +msgid "Wecom configuration details" +msgstr "企业微信配置详情" + +#: apps/xpack/swagger_api/auth_api.py:40 apps/xpack/swagger_api/auth_api.py:53 +msgid "Corp ID" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:41 +msgid "Agent ID" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:42 apps/xpack/swagger_api/auth_api.py:55 +#: apps/xpack/swagger_api/auth_api.py:67 +#| msgid "App Secret is required" +msgid "App Secret" +msgstr "App Secret 是必填项" + +#: apps/xpack/swagger_api/auth_api.py:43 apps/xpack/swagger_api/auth_api.py:56 +#: apps/xpack/swagger_api/auth_api.py:68 +#| msgid "Callback URL is required" +msgid "Callback URL" +msgstr "Callback URL 是必填项" + +#: apps/xpack/swagger_api/auth_api.py:50 +#| msgid "Configuration" +msgid "Dingtalk configuration" +msgstr "钉钉配置" + +#: apps/xpack/swagger_api/auth_api.py:51 +#| msgid "Get application details" +msgid "Dingtalk configuration details" +msgstr "钉钉配置详情" + +#: apps/xpack/swagger_api/auth_api.py:54 apps/xpack/swagger_api/auth_api.py:66 +msgid "App Key" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:63 +#| msgid "Form Configuration" +msgid "Feishu configuration" +msgstr "飞书配置" + +#: apps/xpack/swagger_api/auth_api.py:64 +#| msgid "Get function details" +msgid "Feishu configuration details" +msgstr "飞书配置详情" + +#: apps/xpack/swagger_api/license_api.py:22 +msgid "license status" +msgstr "License 状态" + +#: apps/xpack/swagger_api/license_api.py:24 +msgid "" +"License status, possible values are: valid, invalid, expired, which " +"respectively represent: valid, invalid, expired" +msgstr "" +"license状态,可能值为:valid、invalid、expired,分别代表:有效、无效、已过期" + +#: apps/xpack/swagger_api/license_api.py:26 +msgid "license details" +msgstr "License 详情" + +#: apps/xpack/swagger_api/license_api.py:30 +msgid "customer name" +msgstr "客户名称" + +#: apps/xpack/swagger_api/license_api.py:31 +msgid "customer name. For example: *** company." +msgstr "客户名称。例如:***公司。" + +#: apps/xpack/swagger_api/license_api.py:33 +msgid "independent software vendor" +msgstr "独立软件供应商" + +#: apps/xpack/swagger_api/license_api.py:35 +msgid "" +"Independent Software Vendor. For example: *** Company, suitable for the " +"embedded version of the product." +msgstr "独立软件供应商。例如:***公司,适用于产品的嵌入式版本。" + +#: apps/xpack/swagger_api/license_api.py:37 +msgid "Authorization deadline." +msgstr "授权截止时间" + +#: apps/xpack/swagger_api/license_api.py:39 +msgid "" +"Authorization deadline. For example: 2020-12-31, this license will expire on " +"2021-01-01." +msgstr "授权截止时间。例如:2020-12-31,此license将在2021-01-01到期。" + +#: apps/xpack/swagger_api/license_api.py:41 +msgid "product name." +msgstr "产品名称" + +#: apps/xpack/swagger_api/license_api.py:43 +msgid "Product name. For example: JumpServer, CMP, etc." +msgstr "产品名称。例如:CMP、KO、JS、MS。" + +#: apps/xpack/swagger_api/license_api.py:45 +msgid "product version." +msgstr "产品版本" + +#: apps/xpack/swagger_api/license_api.py:47 +msgid "Product version. For example: JumpServer 2.0, CMP 1.0, etc." +msgstr "产品版本。例如:Standard、Enterprise,代表标准版、企业版。" + +#: apps/xpack/swagger_api/license_api.py:49 +msgid "license version." +msgstr "License 版本" + +#: apps/xpack/swagger_api/license_api.py:51 +msgid "License version. For example: 1.0, 2.0, etc." +msgstr "License版本。例如:1.0、2.0、3.0等。" + +#: apps/xpack/swagger_api/license_api.py:53 +msgid "authorization quantity." +msgstr "认证数量" + +#: apps/xpack/swagger_api/license_api.py:55 +msgid "" +"Authorization quantity. For example: 100, this license can be used by 100 " +"users." +msgstr "授权数量。例如:cmp授权的cpu数量,或JS授权的资产数量。" + +#: apps/xpack/swagger_api/license_api.py:57 +msgid "Serial number, the unique identifier of the License." +msgstr "序列号,License唯一标识。" + +#: apps/xpack/swagger_api/license_api.py:59 +msgid "" +"Serial number, the unique identifier of the license. The customer support " +"portal will save the serial number after generating the license. If the " +"serial number is not recorded in the customer support portal, the license " +"will be regarded as an unknown source." +msgstr "" +"序列号,License唯一标识。客户支持门户生成License后会保存序列号,如果序列号在" +"客户支持门户中没有记录,则此License将被视为未知来源。" + +#: apps/xpack/swagger_api/license_api.py:61 +msgid "remarks" +msgstr "备注" + +#: apps/xpack/swagger_api/license_api.py:63 +msgid "" +"Remarks, record additional information, length limit is 50. For example, a " +"customer purchases two identical JumpServer subscriptions and uses them in " +"different computer rooms respectively. You can use this field to note the A " +"computer room and B computer room to help distinguish the licenses." +msgstr "" +"备注,记录额外的信息,长度限制50。例如某个客户买了两个同样的JumpServer订阅分" +"别在不同机房使用,可以用这个字段备注A机房B机房,帮助区别License。" + +#: apps/xpack/swagger_api/operate_log.py:12 +#: apps/xpack/swagger_api/operate_log.py:13 +#: apps/xpack/swagger_api/operate_log.py:38 +#: apps/xpack/swagger_api/operate_log.py:39 apps/xpack/views/operate_log.py:24 +#: apps/xpack/views/operate_log.py:36 +msgid "Operate log" +msgstr "操作日志" + +#: apps/xpack/swagger_api/operate_log.py:23 +#: apps/xpack/swagger_api/operate_log.py:24 +msgid "menu_label" +msgstr "操作菜单" + +#: apps/xpack/swagger_api/operate_log.py:26 +#: apps/xpack/swagger_api/operate_log.py:27 +msgid "operate_label" +msgstr "操作" + +#: apps/xpack/swagger_api/operate_log.py:42 +#: apps/xpack/swagger_api/operate_log.py:43 +#: community/apps/dataset/serializers/dataset_serializers.py:104 +msgid "id" +msgstr "" + +#: apps/xpack/swagger_api/operate_log.py:60 +#: apps/xpack/swagger_api/operate_log.py:61 +#| msgid "license details" +msgid "details" +msgstr "详情" + +#: apps/xpack/views/application_setting_views.py:22 +#: apps/xpack/views/application_setting_views.py:23 +#| msgid "Pro/Modify Application Settings" +msgid "Modify Application Settings" +msgstr "修改应用显示设置" + +#: apps/xpack/views/application_setting_views.py:24 +#: apps/xpack/views/application_setting_views.py:40 +msgid "Pro/Application/Public Access" +msgstr "专业版/应用/公共访问" + +#: apps/xpack/views/application_setting_views.py:37 +#: apps/xpack/views/application_setting_views.py:38 +#| msgid "Pro/Get Application Settings" +msgid "Get Application Settings" +msgstr "获取应用设置" + +#: apps/xpack/views/auth.py:29 +msgid "Authentication" +msgstr "认证" + +#: apps/xpack/views/auth.py:40 apps/xpack/views/auth.py:41 +msgid "Add or modify authentication configuration" +msgstr "添加或修改认证信息" + +#: apps/xpack/views/auth.py:44 apps/xpack/views/auth.py:58 +#: apps/xpack/views/auth.py:72 +msgid "System settings/login authentication" +msgstr "系统设置/登录认证" + +#: apps/xpack/views/auth.py:55 apps/xpack/views/auth.py:56 +msgid "Get authentication configuration" +msgstr "获取认证配置" + +#: apps/xpack/views/auth.py:69 apps/xpack/views/auth.py:70 +msgid "test connection" +msgstr "测试连接" + +#: apps/xpack/views/auth.py:96 apps/xpack/views/auth.py:97 +#: community/apps/users/views/user.py:173 +#: community/apps/users/views/user.py:174 +msgid "Log in" +msgstr "登录" + +#: apps/xpack/views/auth.py:101 apps/xpack/views/auth.py:114 +#: apps/xpack/views/auth.py:130 apps/xpack/views/auth.py:146 +#: apps/xpack/views/auth.py:207 apps/xpack/views/auth.py:224 +#: apps/xpack/views/auth.py:242 apps/xpack/views/auth.py:260 +#: apps/xpack/views/auth.py:278 apps/xpack/views/auth.py:296 +msgid "Three-party login" +msgstr "三方登录" + +#: apps/xpack/views/auth.py:111 apps/xpack/views/auth.py:112 +msgid "CAS login" +msgstr "CAS 登录" + +#: apps/xpack/views/auth.py:127 apps/xpack/views/auth.py:128 +msgid "OIDC login" +msgstr "OIDC 登录" + +#: apps/xpack/views/auth.py:143 apps/xpack/views/auth.py:144 +msgid "OAuth2 login" +msgstr "OAuth2 登录" + +#: apps/xpack/views/auth.py:160 apps/xpack/views/auth.py:161 +#: apps/xpack/views/auth.py:162 apps/xpack/views/auth.py:170 +#: apps/xpack/views/auth.py:194 apps/xpack/views/auth.py:195 +#: apps/xpack/views/auth.py:196 +msgid "Get platform information" +msgstr "获取平台信息" + +#: apps/xpack/views/auth.py:167 apps/xpack/views/auth.py:168 +msgid "Modify platform information" +msgstr "修改平台信息" + +#: apps/xpack/views/auth.py:175 apps/xpack/views/auth.py:176 +#: apps/xpack/views/auth.py:178 +msgid "Test platform connection" +msgstr "测试平台连接" + +#: apps/xpack/views/auth.py:185 apps/xpack/views/auth.py:186 +msgid "Scan code login type" +msgstr "扫码登录类型" + +#: apps/xpack/views/auth.py:187 +msgid "Scan code to log in" +msgstr "扫码登录" + +#: apps/xpack/views/auth.py:204 apps/xpack/views/auth.py:205 +msgid "DingTalk callback" +msgstr "钉钉回调" + +#: apps/xpack/views/auth.py:221 apps/xpack/views/auth.py:222 +#| msgid "DingTalk callback" +msgid "DingTalk OAuth2 callback" +msgstr "钉钉回调" + +#: apps/xpack/views/auth.py:239 apps/xpack/views/auth.py:240 +msgid "Lark callback" +msgstr "飞书回调" + +#: apps/xpack/views/auth.py:257 apps/xpack/views/auth.py:258 +#| msgid "Lark callback" +msgid "Lark OAuth2 callback" +msgstr "飞书回调" + +#: apps/xpack/views/auth.py:275 apps/xpack/views/auth.py:276 +msgid "Wecom callback" +msgstr "企业微信回调" + +#: apps/xpack/views/auth.py:293 apps/xpack/views/auth.py:294 +#| msgid "Wecom callback" +msgid "Wecom OAuth2 callback" +msgstr "企业微信回调" + +#: apps/xpack/views/dataset_lark_views.py:22 +#: apps/xpack/views/dataset_lark_views.py:23 +#| msgid "Create a knowledge base" +msgid "Create a lark knowledge base" +msgstr "创建知识库" + +#: apps/xpack/views/dataset_lark_views.py:26 +#: apps/xpack/views/dataset_lark_views.py:40 +#: community/apps/dataset/views/dataset.py:39 +#: community/apps/dataset/views/dataset.py:62 +#: community/apps/dataset/views/dataset.py:82 +#: community/apps/dataset/views/dataset.py:98 +#: community/apps/dataset/views/dataset.py:109 +#: community/apps/dataset/views/dataset.py:123 +#: community/apps/dataset/views/dataset.py:137 +#: community/apps/dataset/views/dataset.py:157 +#: community/apps/dataset/views/dataset.py:172 +#: community/apps/dataset/views/dataset.py:187 +#: community/apps/dataset/views/dataset.py:202 +#: community/apps/dataset/views/dataset.py:217 +#: community/apps/dataset/views/dataset.py:231 +#: community/apps/dataset/views/dataset.py:250 +msgid "Knowledge Base" +msgstr "知识库" + +#: apps/xpack/views/dataset_lark_views.py:36 +#: apps/xpack/views/dataset_lark_views.py:37 +#| msgid "Create a knowledge base" +msgid "Update the lark knowledge base" +msgstr "更新知识库" + +#: apps/xpack/views/dataset_lark_views.py:53 +#: apps/xpack/views/dataset_lark_views.py:54 +#| msgid "Get a list of applications available in the knowledge base" +msgid "Get the list of documents in the lark knowledge base" +msgstr "获取知识库文档列表" + +#: apps/xpack/views/dataset_lark_views.py:57 +#: apps/xpack/views/dataset_lark_views.py:74 +#: apps/xpack/views/dataset_lark_views.py:90 +#: apps/xpack/views/dataset_lark_views.py:110 +#: community/apps/dataset/views/document.py:34 +#: community/apps/dataset/views/document.py:47 +#: community/apps/dataset/views/document.py:62 +#: community/apps/dataset/views/document.py:81 +#: community/apps/dataset/views/document.py:102 +#: community/apps/dataset/views/document.py:123 +#: community/apps/dataset/views/document.py:137 +#: community/apps/dataset/views/document.py:158 +#: community/apps/dataset/views/document.py:178 +#: community/apps/dataset/views/document.py:193 +#: community/apps/dataset/views/document.py:208 +#: community/apps/dataset/views/document.py:224 +#: community/apps/dataset/views/document.py:244 +#: community/apps/dataset/views/document.py:265 +#: community/apps/dataset/views/document.py:284 +#: community/apps/dataset/views/document.py:306 +#: community/apps/dataset/views/document.py:324 +#: community/apps/dataset/views/document.py:349 +#: community/apps/dataset/views/document.py:364 +#: community/apps/dataset/views/document.py:380 +#: community/apps/dataset/views/document.py:396 +#: community/apps/dataset/views/document.py:413 +#: community/apps/dataset/views/document.py:429 +#: community/apps/dataset/views/document.py:442 +#: community/apps/dataset/views/document.py:467 +msgid "Knowledge Base/Documentation" +msgstr "知识库/文档" + +#: apps/xpack/views/dataset_lark_views.py:70 +#: apps/xpack/views/dataset_lark_views.py:71 +#| msgid "Create a knowledge base" +msgid "Import documents to the lark knowledge base" +msgstr "创建知识库" + +#: apps/xpack/views/dataset_lark_views.py:86 +#: apps/xpack/views/dataset_lark_views.py:87 +#| msgid "Create document" +msgid "Synchronize lark document" +msgstr "同步飞书文档" + +#: apps/xpack/views/dataset_lark_views.py:104 +#: apps/xpack/views/dataset_lark_views.py:105 +#| msgid "Batch sync documents" +msgid "Batch sync lark documents" +msgstr "批量同步飞书文档" + +#: apps/xpack/views/display.py:17 apps/xpack/views/display.py:18 +msgid "View appearance settings" +msgstr "查看外观设置" + +#: apps/xpack/views/display.py:19 apps/xpack/views/display.py:33 +msgid "System Settings/Appearance Settings" +msgstr "系统设置/外观设置" + +#: apps/xpack/views/display.py:30 apps/xpack/views/display.py:31 +msgid "Update appearance settings" +msgstr "更新外观设置" + +#: apps/xpack/views/license.py:29 apps/xpack/views/license.py:30 +msgid "Get license information" +msgstr "获取 License 信息" + +#: apps/xpack/views/license.py:38 apps/xpack/views/license.py:39 +msgid "Update license information" +msgstr "更新 License 信息" + +#: apps/xpack/views/license.py:44 +msgid "upload file" +msgstr "上传文件" + +#: apps/xpack/views/operate_log.py:21 apps/xpack/views/operate_log.py:22 +#| msgid "Get model parameter form" +msgid "Get menu operate log" +msgstr "获取菜单操作日志" + +#: apps/xpack/views/operate_log.py:33 apps/xpack/views/operate_log.py:34 +#| msgid "Get model parameter form" +msgid "Get operate log" +msgstr "获取操作日志" + +#: apps/xpack/views/platform.py:56 apps/xpack/views/platform.py:57 +msgid "Get platform configuration" +msgstr "获取平台配置" + +#: apps/xpack/views/platform.py:59 apps/xpack/views/platform.py:67 +msgid "Application/application access" +msgstr "应用/应用访问" + +#: apps/xpack/views/platform.py:63 apps/xpack/views/platform.py:64 +msgid "Update platform configuration" +msgstr "更新平台配置" + +#: apps/xpack/views/platform.py:80 apps/xpack/views/platform.py:81 +msgid "Get platform status" +msgstr "获取平台状态" + +#: apps/xpack/views/platform.py:86 +msgid "Application/Get platform status" +msgstr "应用/获取平台状态" + +#: apps/xpack/views/platform.py:96 apps/xpack/views/platform.py:97 +msgid "Update platform status" +msgstr "更新平台状态" + +#: apps/xpack/views/platform.py:103 +msgid "Application/Update platform status" +msgstr "应用/更新平台状态" + +#: apps/xpack/views/system_api_key_views.py:28 +#: apps/xpack/views/system_api_key_views.py:29 +msgid "Get personal system API_KEY list" +msgstr "获取个人系统 API_KEY 列表" + +#: apps/xpack/views/system_api_key_views.py:30 +#: apps/xpack/views/system_api_key_views.py:39 +#: apps/xpack/views/system_api_key_views.py:53 +#: apps/xpack/views/system_api_key_views.py:62 +msgid "Personal system/API_KEY" +msgstr "个人系统/API_KEY" + +#: apps/xpack/views/system_api_key_views.py:37 +#: apps/xpack/views/system_api_key_views.py:38 +msgid "Update personal system API_KEY" +msgstr "更新个人系统 API_KEY" + +#: apps/xpack/views/system_api_key_views.py:51 +#: apps/xpack/views/system_api_key_views.py:52 +msgid "Delete personal system API_KEY" +msgstr "删除个人系统 API_KEY" + +#: apps/xpack/views/system_api_key_views.py:60 +#: apps/xpack/views/system_api_key_views.py:61 +msgid "Add personal system API_KEY" +msgstr "添加个人系统 API_KEY" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:27 +msgid "Model type error" +msgstr "模型类型错误" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:37 +#: community/apps/common/field/common.py:21 +#: community/apps/common/field/common.py:34 +msgid "Message type error" +msgstr "消息类型错误" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:56 +msgid "Conversation list" +msgstr "对话列表" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:57 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:30 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:19 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:13 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:13 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:19 +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:13 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:13 +#: community/apps/application/serializers/application_serializers.py:72 +#: community/apps/application/serializers/chat_serializers.py:365 +#: community/apps/application/swagger_api/application_api.py:53 +#: community/apps/application/swagger_api/application_api.py:185 +#: community/apps/application/swagger_api/application_api.py:186 +#: community/apps/application/swagger_api/application_api.py:334 +#: community/apps/application/swagger_api/application_api.py:335 +msgid "Model id" +msgstr "模型 id" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:59 +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:30 +msgid "Paragraph List" +msgstr "段落列表" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:61 +#: community/apps/application/serializers/chat_message_serializers.py:201 +#: community/apps/application/serializers/chat_message_serializers.py:253 +#: community/apps/application/serializers/chat_serializers.py:76 +#: community/apps/application/serializers/chat_serializers.py:240 +#: community/apps/application/serializers/chat_serializers.py:439 +#: community/apps/application/serializers/chat_serializers.py:531 +#: community/apps/application/serializers/chat_serializers.py:587 +#: community/apps/application/serializers/chat_serializers.py:613 +#: community/apps/application/serializers/chat_serializers.py:672 +#: community/apps/application/serializers/chat_serializers.py:712 +#: community/apps/application/swagger_api/chat_api.py:38 +#: community/apps/application/swagger_api/chat_api.py:76 +#: community/apps/application/swagger_api/chat_api.py:171 +#: community/apps/application/swagger_api/chat_api.py:172 +#: community/apps/application/swagger_api/chat_api.py:286 +#: community/apps/application/swagger_api/chat_api.py:355 +#: community/apps/application/swagger_api/chat_api.py:432 +#: community/apps/application/swagger_api/chat_api.py:465 +#: community/apps/application/views/chat_views.py:482 +msgid "Conversation ID" +msgstr "对话 ID" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:63 +#: community/apps/application/flow/step_node/application_node/i_application_node.py:15 +#: community/apps/application/serializers/chat_message_serializers.py:254 +#: community/apps/application/serializers/chat_serializers.py:240 +msgid "User Questions" +msgstr "用户问题" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:66 +msgid "Post-processor" +msgstr "后置处理器" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:69 +msgid "Completion Question" +msgstr "补全问题" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:71 +#: community/apps/application/serializers/chat_message_serializers.py:203 +msgid "Streaming Output" +msgstr "流式输出" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:72 +#: community/apps/application/serializers/chat_message_serializers.py:208 +#: community/apps/application/serializers/chat_message_serializers.py:271 +#: community/apps/application/serializers/chat_serializers.py:103 +msgid "Client id" +msgstr "客户端 id" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:73 +#: community/apps/application/serializers/chat_message_serializers.py:209 +#: community/apps/application/serializers/chat_message_serializers.py:272 +msgid "Client Type" +msgstr "客户端类型" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:76 +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:46 +#: community/apps/application/swagger_api/application_api.py:262 +msgid "No reference segment settings" +msgstr "未查询到引用分段" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:78 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:31 +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:48 +#: community/apps/application/serializers/application_serializers.py:70 +#: community/apps/application/serializers/application_serializers.py:511 +#: community/apps/application/serializers/application_serializers.py:582 +#: community/apps/application/serializers/application_serializers.py:627 +#: community/apps/application/serializers/application_serializers.py:697 +#: community/apps/application/serializers/application_serializers.py:718 +#: community/apps/application/serializers/application_serializers.py:792 +#: community/apps/application/serializers/application_serializers.py:1228 +#: community/apps/application/serializers/chat_serializers.py:118 +#: community/apps/application/serializers/chat_serializers.py:285 +#: community/apps/application/serializers/chat_serializers.py:338 +#: community/apps/application/serializers/chat_serializers.py:360 +#: community/apps/function_lib/serializers/function_lib_serializer.py:332 +#: community/apps/function_lib/serializers/function_lib_serializer.py:358 +#: community/apps/function_lib/serializers/function_lib_serializer.py:387 +msgid "User ID" +msgstr "用户 ID" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:81 +#| msgid "Model parameter settings" +msgid "Model settings" +msgstr "模型参数设置" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:84 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:31 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:29 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:27 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:27 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:19 +#: community/apps/application/serializers/chat_serializers.py:382 +msgid "Model parameter settings" +msgstr "模型参数设置" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:91 +msgid "message type error" +msgstr "消息类型错误" + +#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:226 +#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:271 +msgid "" +"Sorry, the AI model is not configured. Please go to the application to set " +"up the AI model first." +msgstr "抱歉,没有配置 AI 模型,请先去应用中设置 AI 模型。" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:27 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:25 +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:25 +#: community/apps/application/serializers/chat_serializers.py:579 +msgid "question" +msgstr "问题" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:33 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:28 +msgid "History Questions" +msgstr "历史对答" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:35 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:25 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:21 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:18 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:24 +#: community/apps/application/swagger_api/application_api.py:55 +#: community/apps/application/swagger_api/application_api.py:56 +#: community/apps/application/swagger_api/application_api.py:188 +#: community/apps/application/swagger_api/application_api.py:189 +#: community/apps/application/swagger_api/application_api.py:337 +#: community/apps/application/swagger_api/application_api.py:338 +msgid "Number of multi-round conversations" +msgstr "多轮对话数量" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:38 +msgid "Maximum length of the knowledge base paragraph" +msgstr "最大携带知识库段落长度" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:40 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:22 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:16 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:22 +#: community/apps/application/serializers/application_serializers.py:108 +#: community/apps/application/serializers/application_serializers.py:138 +#: community/apps/application/swagger_api/application_api.py:286 +#: community/apps/application/swagger_api/application_api.py:287 +msgid "Prompt word" +msgstr "提示词" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:42 +#: community/apps/application/swagger_api/application_api.py:300 +#: community/apps/application/swagger_api/application_api.py:301 +msgid "System prompt words (role)" +msgstr "系统提示词(角色)" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:44 +msgid "Completion problem" +msgstr "补齐问题" + +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:34 +#: community/apps/application/serializers/application_serializers.py:237 +msgid "Question completion prompt" +msgstr "问题补全提示词" + +#: community/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py:20 +#: community/apps/application/serializers/chat_message_serializers.py:99 +#: community/apps/application/swagger_api/application_api.py:210 +#: community/apps/application/swagger_api/application_api.py:355 +#, python-brace-format +msgid "" +"() contains the user's question. Answer the guessed user's question based on " +"the context ({question}) Requirement: Output a complete question and put it " +"in the tag" +msgstr "" +"()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问" +"题,并且放在标签中" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:28 +msgid "System completes question text" +msgstr "系统补全问题文本" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:31 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:39 +msgid "Dataset id list" +msgstr "知识库 ID 列表" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:34 +msgid "List of document ids to exclude" +msgstr "要排除的文档 ID 列表" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:37 +msgid "List of exclusion vector ids" +msgstr "排除向量 ID 列表" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:40 +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:21 +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:24 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:24 +#: community/apps/application/serializers/application_serializers.py:121 +#: community/apps/application/serializers/chat_serializers.py:243 +#: community/apps/application/swagger_api/application_api.py:249 +#: community/apps/application/swagger_api/application_api.py:250 +msgid "Reference segment number" +msgstr "引用分段数" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:43 +#: community/apps/application/swagger_api/application_api.py:252 +#: community/apps/application/swagger_api/application_api.py:253 +msgid "Similarity" +msgstr "相似度" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:46 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:30 +#: community/apps/application/serializers/application_serializers.py:129 +#: community/apps/application/serializers/application_serializers.py:590 +#: community/apps/dataset/serializers/dataset_serializers.py:576 +#| msgid "Retrieval pattern embedding|keywords|blend" +msgid "The type only supports embedding|keywords|blend" +msgstr "检索模式 embedding|keywords|blend" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:47 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:31 +#: community/apps/application/serializers/application_serializers.py:130 +#: community/apps/application/serializers/application_serializers.py:591 +#: community/apps/application/swagger_api/application_api.py:259 +msgid "Retrieval Mode" +msgstr "检索方式" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:31 +#: community/apps/application/serializers/application_serializers.py:84 +#: community/apps/application/serializers/application_serializers.py:1026 +#: community/apps/application/serializers/application_serializers.py:1036 +#: community/apps/application/serializers/application_serializers.py:1046 +#: community/apps/dataset/serializers/dataset_serializers.py:801 +#: community/apps/dataset/serializers/document_serializers.py:746 +#: community/apps/setting/models_provider/tools.py:23 +msgid "Model does not exist" +msgstr "模型不存在" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:33 +#, python-brace-format +msgid "No permission to use this model {model_name}" +msgstr "无权使用此模型 {model_name}" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:41 +msgid "" +"The vector model of the associated knowledge base is inconsistent and the " +"segmentation cannot be recalled." +msgstr "关联知识库的向量模型不一致,无法召回分段。" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:43 +msgid "The knowledge base setting is wrong, please reset the knowledge base" +msgstr "知识库设置错误,请重新设置知识库!" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:21 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:15 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:21 +msgid "Role Setting" +msgstr "角色设置" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:28 +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:24 +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:29 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:47 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:26 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:22 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:26 +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:15 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:15 +msgid "Whether to return content" +msgstr "是否返回内容" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:35 +msgid "Context Type" +msgstr "内容类型" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:16 +msgid "API Input Fields" +msgstr "api 输入字段" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:18 +msgid "User Input Fields" +msgstr "用户输入字段" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:19 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:24 +#: community/apps/application/serializers/application_serializers.py:698 +#: community/apps/application/serializers/chat_message_serializers.py:274 +#: community/apps/function_lib/serializers/function_lib_serializer.py:359 +msgid "picture" +msgstr "图片" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:20 +#: community/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py:13 +#: community/apps/application/serializers/chat_message_serializers.py:275 +msgid "document" +msgstr "文档" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:21 +#: community/apps/application/serializers/chat_message_serializers.py:276 +msgid "Audio" +msgstr "音频" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:23 +#: community/apps/application/serializers/chat_message_serializers.py:278 +msgid "Child Nodes" +msgstr "子节点" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:24 +#: community/apps/application/flow/step_node/form_node/i_form_node.py:21 +msgid "Form Data" +msgstr "表单数据" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:58 +msgid "" +"Parameter value error: The uploaded document lacks file_id, and the document " +"upload fails" +msgstr "参数值错误: 上传的文档中缺少 file_id,文档上传失败" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:67 +msgid "" +"Parameter value error: The uploaded image lacks file_id, and the image " +"upload fails" +msgstr "参数值错误: 上传的图片中缺少 file_id,图片上传失败" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:77 +msgid "" +"Parameter value error: The uploaded audio lacks file_id, and the audio " +"upload fails." +msgstr "参数值错误: 上传的音频中缺少file_id,音频上传失败" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:19 +#: community/apps/application/serializers/chat_serializers.py:124 +msgid "Comparator" +msgstr "比较器" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:20 +#: community/apps/application/swagger_api/application_api.py:271 +msgid "value" +msgstr "值" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:21 +msgid "Fields" +msgstr "字段" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:25 +msgid "Branch id" +msgstr "分支 id" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:26 +msgid "Branch Type" +msgstr "分支类型" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:27 +msgid "Condition or|and" +msgstr "条件 or|and" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:20 +msgid "Response Type" +msgstr "响应类型" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:21 +#: community/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py:14 +msgid "Reference Field" +msgstr "引用字段" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:23 +msgid "Direct answer content" +msgstr "直接回答内容" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:30 +msgid "Reference field cannot be empty" +msgstr "引用字段不能为空" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:32 +msgid "Reference field error" +msgstr "引用字段错误" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:35 +msgid "Content cannot be empty" +msgstr "内容不能为空" + +#: community/apps/application/flow/step_node/form_node/i_form_node.py:19 +msgid "Form Configuration" +msgstr "表单配置" + +#: community/apps/application/flow/step_node/form_node/i_form_node.py:20 +msgid "Form output content" +msgstr "表单输出内容" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:22 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:24 +msgid "Variable Name" +msgstr "变量名" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:23 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:34 +msgid "Variable Value" +msgstr "变量值" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:27 +msgid "Library ID" +msgstr "函数库id" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:35 +msgid "The function has been deleted" +msgstr "函数已被删除" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:25 +msgid "Is this field required" +msgstr "字段是否必填" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:28 +msgid "The field only supports string|int|dict|array|float" +msgstr "字段只支持 string|int|dict|array|float" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:30 +#: community/apps/function_lib/serializers/function_lib_serializer.py:76 +#: community/apps/function_lib/swagger_api/function_lib_api.py:98 +#: community/apps/function_lib/swagger_api/function_lib_api.py:144 +#: community/apps/function_lib/swagger_api/function_lib_api.py:190 +msgid "source" +msgstr "来源" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:32 +#: community/apps/function_lib/serializers/function_lib_serializer.py:78 +msgid "The field only supports custom|reference" +msgstr "字段只支持 custom|reference" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:40 +#, python-brace-format +msgid "{field}, this field is required." +msgstr "{field}, 此字段为必填项。" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:46 +#: community/apps/function_lib/views/function_lib_views.py:131 +#: community/apps/function_lib/views/function_lib_views.py:145 +msgid "function" +msgstr "函数" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:15 +msgid "Prompt word (positive)" +msgstr "提示词(正向)" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:17 +msgid "Prompt word (negative)" +msgstr "提示词(负向)" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:24 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:20 +msgid "Conversation storage type" +msgstr "对话存储类型" + +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:26 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:33 +msgid "Maximum number of words in a quoted segment" +msgstr "最大引用分段字数" + +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:27 +#: community/apps/common/swagger_api/common_api.py:36 +#: community/apps/dataset/serializers/dataset_serializers.py:573 +msgid "similarity" +msgstr "相似度" + +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:17 +msgid "The audio file cannot be empty" +msgstr "音频文件不能为空" + +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:31 +msgid "" +"Parameter value error: The uploaded audio lacks file_id, and the audio " +"upload fails" +msgstr "参数值错误:上传的音频缺少file_id,音频上传失败" + +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:17 +msgid "Text content" +msgstr "文本内容" + +#: community/apps/application/flow/workflow_manage.py:107 +#, python-brace-format +msgid "The branch {branch} of the {node} node needs to be connected" +msgstr "{node} 节点的{branch}分支需要连接" + +#: community/apps/application/flow/workflow_manage.py:113 +#, python-brace-format +msgid "{node} Nodes cannot be considered as end nodes" +msgstr "{node} 节点不能当做结束节点" + +#: community/apps/application/flow/workflow_manage.py:123 +msgid "The next node that does not exist" +msgstr "不存在的下一个节点" + +#: community/apps/application/flow/workflow_manage.py:137 +msgid "The starting node is required" +msgstr "开始节点必填" + +#: community/apps/application/flow/workflow_manage.py:139 +msgid "There can only be one starting node" +msgstr "开始节点只能有一个" + +#: community/apps/application/flow/workflow_manage.py:147 +#, python-brace-format +msgid "The node {node} model does not exist" +msgstr "节点{node} 模型不存在 " + +#: community/apps/application/flow/workflow_manage.py:157 +#, python-brace-format +msgid "Node {node} is unavailable" +msgstr "节点{node} 不可用" + +#: community/apps/application/flow/workflow_manage.py:163 +#, python-brace-format +msgid "The library ID of node {node} cannot be empty" +msgstr "节点{node} 函数库id不能为空" + +#: community/apps/application/flow/workflow_manage.py:166 +#, python-brace-format +msgid "The function library for node {node} is not available" +msgstr "节点{node} 函数库不可用" + +#: community/apps/application/flow/workflow_manage.py:172 +msgid "Basic information node is required" +msgstr "基本信息节点必填" + +#: community/apps/application/flow/workflow_manage.py:174 +msgid "There can only be one basic information node" +msgstr "基本信息节点只能有一个" + +#: community/apps/application/serializers/application_serializers.py:75 +#: community/apps/application/serializers/chat_serializers.py:618 +#: community/apps/application/serializers/chat_serializers.py:677 +#: community/apps/application/serializers/chat_serializers.py:709 +#: community/apps/application/swagger_api/chat_api.py:365 +#: community/apps/application/swagger_api/chat_api.py:393 +#: community/apps/application/swagger_api/chat_api.py:394 +#: community/apps/application/swagger_api/chat_api.py:415 +#: community/apps/application/swagger_api/chat_api.py:494 +#: community/apps/application/swagger_api/chat_api.py:495 +msgid "Knowledge base id" +msgstr "知识库 id" + +#: community/apps/application/serializers/application_serializers.py:76 +msgid "Knowledge Base List" +msgstr "知识库列表" + +#: community/apps/application/serializers/application_serializers.py:90 +msgid "The knowledge base id does not exist" +msgstr "知识库 id 不存在" + +#: community/apps/application/serializers/application_serializers.py:107 +msgid "No reference status" +msgstr "无引用状态" + +#: community/apps/application/serializers/application_serializers.py:123 +msgid "Acquaintance" +msgstr "相似度" + +#: community/apps/application/serializers/application_serializers.py:126 +#: community/apps/application/swagger_api/application_api.py:256 +#: community/apps/application/swagger_api/application_api.py:257 +msgid "Maximum number of quoted characters" +msgstr "最多引用字符数" + +#: community/apps/application/serializers/application_serializers.py:133 +msgid "Segment settings not referenced" +msgstr "未引用分段设置" + +#: community/apps/application/serializers/application_serializers.py:140 +msgid "Role prompts" +msgstr "角色提示词" + +#: community/apps/application/serializers/application_serializers.py:142 +#: community/apps/application/swagger_api/application_api.py:303 +#: community/apps/application/swagger_api/application_api.py:305 +msgid "No citation segmentation prompt" +msgstr "无引用分段提示词" + +#: community/apps/application/serializers/application_serializers.py:144 +msgid "Thinking process switch" +msgstr "思考过程开关" + +#: community/apps/application/serializers/application_serializers.py:148 +msgid "The thinking process begins to mark" +msgstr "思考过程开始标记" + +#: community/apps/application/serializers/application_serializers.py:151 +msgid "End of thinking process marker" +msgstr "思考过程结束标记" + +#: community/apps/application/serializers/application_serializers.py:156 +#: community/apps/application/serializers/application_serializers.py:482 +#: community/apps/application/serializers/application_serializers.py:623 +#: community/apps/application/swagger_api/application_api.py:49 +#: community/apps/application/swagger_api/application_api.py:50 +#: community/apps/application/swagger_api/application_api.py:181 +#: community/apps/application/swagger_api/application_api.py:182 +#: community/apps/application/swagger_api/application_api.py:330 +#: community/apps/application/swagger_api/application_api.py:331 +#: community/apps/application/swagger_api/application_api.py:377 +msgid "Application Name" +msgstr "应用名称" + +#: community/apps/application/serializers/application_serializers.py:159 +#: community/apps/application/serializers/application_serializers.py:484 +#: community/apps/application/serializers/application_serializers.py:625 +#: community/apps/application/swagger_api/application_api.py:51 +#: community/apps/application/swagger_api/application_api.py:52 +#: community/apps/application/swagger_api/application_api.py:183 +#: community/apps/application/swagger_api/application_api.py:184 +#: community/apps/application/swagger_api/application_api.py:332 +#: community/apps/application/swagger_api/application_api.py:333 +#: community/apps/application/swagger_api/application_api.py:382 +msgid "Application Description" +msgstr "应用描述" + +#: community/apps/application/serializers/application_serializers.py:160 +msgid "Workflow Objects" +msgstr "工作流对象" + +#: community/apps/application/serializers/application_serializers.py:162 +#: community/apps/application/serializers/application_serializers.py:225 +#: community/apps/application/serializers/application_serializers.py:492 +#: community/apps/application/swagger_api/application_api.py:57 +#: community/apps/application/swagger_api/application_api.py:58 +#: community/apps/application/swagger_api/application_api.py:190 +#: community/apps/application/swagger_api/application_api.py:191 +#: community/apps/application/swagger_api/application_api.py:339 +#: community/apps/application/swagger_api/application_api.py:340 +msgid "Opening remarks" +msgstr "开场白" + +#: community/apps/application/serializers/application_serializers.py:214 +#: community/apps/dataset/serializers/dataset_serializers.py:105 +#: community/apps/dataset/serializers/dataset_serializers.py:106 +msgid "application name" +msgstr "应用名称" + +#: community/apps/application/serializers/application_serializers.py:217 +msgid "application describe" +msgstr "应用描述" + +#: community/apps/application/serializers/application_serializers.py:219 +#: community/apps/application/serializers/application_serializers.py:486 +msgid "Model" +msgstr "模型" + +#: community/apps/application/serializers/application_serializers.py:223 +#: community/apps/application/serializers/application_serializers.py:490 +msgid "Historical chat records" +msgstr "历史聊天记录" + +#: community/apps/application/serializers/application_serializers.py:228 +#: community/apps/application/serializers/application_serializers.py:494 +msgid "Related Knowledge Base" +msgstr "关联知识库" + +#: community/apps/application/serializers/application_serializers.py:235 +#: community/apps/application/serializers/application_serializers.py:504 +#: community/apps/application/serializers/chat_serializers.py:379 +msgid "Question completion" +msgstr "问题补全" + +#: community/apps/application/serializers/application_serializers.py:239 +#: community/apps/application/swagger_api/application_api.py:203 +#: community/apps/application/swagger_api/application_api.py:349 +msgid "Application Type" +msgstr "应用类型" + +#: community/apps/application/serializers/application_serializers.py:243 +msgid "Application type only supports SIMPLE|WORK_FLOW" +msgstr "应用类型只支持 SIMPLE|WORK_FLOW" + +#: community/apps/application/serializers/application_serializers.py:247 +#: community/apps/application/serializers/application_serializers.py:508 +msgid "Model parameters" +msgstr "模型参数" + +#: community/apps/application/serializers/application_serializers.py:255 +msgid "Host" +msgstr "主机" + +#: community/apps/application/serializers/application_serializers.py:256 +msgid "protocol" +msgstr "协议" + +#: community/apps/application/serializers/application_serializers.py:339 +#: community/apps/application/swagger_api/application_api.py:153 +#: community/apps/application/swagger_api/application_api.py:154 +msgid "Reset Token" +msgstr "重置 Token" + +#: community/apps/application/serializers/application_serializers.py:340 +msgid "Is it enabled" +msgstr "是否开启" + +#: community/apps/application/serializers/application_serializers.py:343 +#: community/apps/application/swagger_api/application_api.py:158 +#: community/apps/application/swagger_api/application_api.py:159 +msgid "Number of visits" +msgstr "访问次数" + +#: community/apps/application/serializers/application_serializers.py:345 +#: community/apps/application/swagger_api/application_api.py:160 +#: community/apps/application/swagger_api/application_api.py:161 +msgid "Whether to enable whitelist" +msgstr "是否开启白名单" + +#: community/apps/application/serializers/application_serializers.py:348 +#: community/apps/application/serializers/application_serializers.py:349 +#: community/apps/application/swagger_api/application_api.py:163 +#: community/apps/application/swagger_api/application_api.py:164 +msgid "Whitelist" +msgstr "白名单" + +#: community/apps/application/serializers/application_serializers.py:352 +#: community/apps/application/swagger_api/application_api.py:166 +#: community/apps/application/swagger_api/application_api.py:167 +msgid "Whether to display knowledge sources" +msgstr "是否显示知识来源" + +#: community/apps/application/serializers/application_serializers.py:423 +msgid "access_token" +msgstr "access_token" + +#: community/apps/application/serializers/application_serializers.py:425 +msgid "Certification Information" +msgstr "认证信息" + +#: community/apps/application/serializers/application_serializers.py:462 +msgid "Invalid access_token" +msgstr "无效的access_token" + +#: community/apps/application/serializers/application_serializers.py:473 +msgid "Wrong password" +msgstr "密码错误" + +#: community/apps/application/serializers/application_serializers.py:498 +msgid "Dataset settings" +msgstr "知识库设置" + +#: community/apps/application/serializers/application_serializers.py:501 +msgid "Model setup" +msgstr "模型设置" + +#: community/apps/application/serializers/application_serializers.py:505 +msgid "Icon" +msgstr "icon 图标" + +#: community/apps/application/serializers/application_serializers.py:515 +#: community/apps/application/serializers/application_serializers.py:722 +#: community/apps/setting/serializers/valid_serializers.py:29 +msgid "" +"The community version supports up to 5 applications. If you need more " +"applications, please contact us (https://fit2cloud.com/)." +msgstr "" +"社区版最多支持 5 个应用,如需拥有更多应用,请联系我们(https://" +"fit2cloud.com/)" + +#: community/apps/application/serializers/application_serializers.py:583 +msgid "Query text" +msgstr "查询文本" + +#: community/apps/application/serializers/application_serializers.py:585 +msgid "topN" +msgstr "topN" + +#: community/apps/application/serializers/application_serializers.py:587 +msgid "Relevance" +msgstr "相似度" + +#: community/apps/application/serializers/application_serializers.py:596 +#: community/apps/application/serializers/application_serializers.py:705 +#: community/apps/application/serializers/application_serializers.py:797 +msgid "Application id does not exist" +msgstr "应用 ID 不存在" + +#: community/apps/application/serializers/application_serializers.py:628 +msgid "Select User ID" +msgstr "选择用户 ID" + +#: community/apps/application/serializers/application_serializers.py:717 +#: community/apps/dataset/serializers/document_serializers.py:164 +#: community/apps/dataset/serializers/document_serializers.py:213 +#: community/apps/dataset/serializers/document_serializers.py:220 +#: community/apps/dataset/serializers/file_serializers.py:59 +#: community/apps/dataset/views/file.py:35 +#: community/apps/dataset/views/file.py:44 +#: community/apps/function_lib/serializers/function_lib_serializer.py:331 +msgid "file" +msgstr "文件" + +#: community/apps/application/serializers/application_serializers.py:732 +#: community/apps/common/handle/impl/qa/zip_parse_qa_handle.py:62 +#: community/apps/common/handle/impl/zip_split_handle.py:56 +#: community/apps/dataset/serializers/document_serializers.py:874 +#: community/apps/dataset/serializers/document_serializers.py:882 +#: community/apps/function_lib/serializers/function_lib_serializer.py:343 +msgid "Unsupported file format" +msgstr "文件格式不支持" + +#: community/apps/application/serializers/application_serializers.py:872 +msgid "work_flow is a required field" +msgstr "work_flow是必填字段" + +#: community/apps/application/serializers/application_serializers.py:934 +#: community/apps/application/serializers/application_serializers.py:1076 +#, python-brace-format +msgid "Unknown knowledge base id {dataset_id}, unable to associate" +msgstr "未知的知识库 id {dataset_id},无法关联" + +#: community/apps/application/serializers/application_serializers.py:954 +msgid "Illegal User" +msgstr "非法用户" + +#: community/apps/application/serializers/application_serializers.py:1028 +#: community/apps/application/serializers/application_serializers.py:1038 +#: community/apps/application/serializers/application_serializers.py:1048 +#, python-brace-format +msgid "No permission to use this model:{model_name}" +msgstr "用户没有使用该模型:{model_name}的权限" + +#: community/apps/application/serializers/application_serializers.py:1259 +#: community/apps/application/swagger_api/chat_api.py:498 +#: community/apps/application/swagger_api/chat_api.py:499 +msgid "Availability" +msgstr "是否可用" + +#: community/apps/application/serializers/application_serializers.py:1263 +#: community/apps/application/swagger_api/application_api.py:129 +#: community/apps/application/swagger_api/application_api.py:130 +msgid "Is cross-domain allowed" +msgstr "是否允许跨域" + +#: community/apps/application/serializers/application_serializers.py:1268 +msgid "Cross-domain address" +msgstr "跨域地址" + +#: community/apps/application/serializers/application_serializers.py:1269 +#: community/apps/application/swagger_api/application_api.py:131 +msgid "Cross-domain list" +msgstr "跨域列表" + +#: community/apps/application/serializers/application_serializers.py:1274 +msgid "ApiKeyid" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1295 +msgid "APIKey does not exist" +msgstr "APIKey 不存在" + +#: community/apps/application/serializers/application_version_serializers.py:30 +#: community/apps/application/swagger_api/application_version_api.py:24 +#: community/apps/application/swagger_api/application_version_api.py:25 +#: community/apps/application/swagger_api/application_version_api.py:47 +#: community/apps/application/swagger_api/application_version_api.py:70 +#: community/apps/application/swagger_api/application_version_api.py:71 +msgid "Version Name" +msgstr "版本名称" + +#: community/apps/application/serializers/application_version_serializers.py:37 +#: community/apps/application/serializers/chat_serializers.py:115 +#: community/apps/application/serializers/chat_serializers.py:240 +msgid "summary" +msgstr "摘要" + +#: community/apps/application/serializers/application_version_serializers.py:61 +msgid "Workflow version id" +msgstr "工作流版本 id" + +#: community/apps/application/serializers/application_version_serializers.py:71 +#: community/apps/application/serializers/application_version_serializers.py:86 +msgid "Workflow version does not exist" +msgstr "工作流版本不存在" + +#: community/apps/application/serializers/chat_message_serializers.py:195 +#: community/apps/dataset/serializers/paragraph_serializers.py:47 +#: community/apps/dataset/serializers/paragraph_serializers.py:180 +#: community/apps/dataset/serializers/paragraph_serializers.py:692 +#: community/apps/dataset/serializers/paragraph_serializers.py:705 +#: community/apps/dataset/serializers/paragraph_serializers.py:706 +#: community/apps/dataset/serializers/problem_serializers.py:41 +#: community/apps/dataset/serializers/problem_serializers.py:52 +#: community/apps/dataset/serializers/problem_serializers.py:113 +#: community/apps/dataset/swagger_api/problem_api.py:24 +#: community/apps/dataset/swagger_api/problem_api.py:25 +#: community/apps/dataset/swagger_api/problem_api.py:109 +#: community/apps/dataset/swagger_api/problem_api.py:110 +#: community/apps/dataset/swagger_api/problem_api.py:126 +#: community/apps/dataset/swagger_api/problem_api.py:127 +#: community/apps/dataset/swagger_api/problem_api.py:154 +#: community/apps/dataset/swagger_api/problem_api.py:169 +msgid "content" +msgstr "内容" + +#: community/apps/application/serializers/chat_message_serializers.py:196 +#: community/apps/setting/serializers/team_serializers.py:45 +#: community/apps/users/serializers/user_serializers.py:472 +#: community/apps/users/serializers/user_serializers.py:495 +#: community/apps/users/serializers/user_serializers.py:586 +msgid "Role" +msgstr "角色" + +#: community/apps/application/serializers/chat_message_serializers.py:202 +msgid "Regenerate" +msgstr "重新生成" + +#: community/apps/application/serializers/chat_message_serializers.py:256 +msgid "Is the answer in streaming mode" +msgstr "是否流式回答" + +#: community/apps/application/serializers/chat_message_serializers.py:257 +msgid "Do you want to reply again" +msgstr "是否重新回答" + +#: community/apps/application/serializers/chat_message_serializers.py:259 +#: community/apps/application/serializers/chat_serializers.py:442 +#: community/apps/application/serializers/chat_serializers.py:534 +#: community/apps/application/serializers/chat_serializers.py:590 +#: community/apps/application/serializers/chat_serializers.py:616 +#: community/apps/application/serializers/chat_serializers.py:675 +#: community/apps/application/swagger_api/chat_api.py:148 +#: community/apps/application/swagger_api/chat_api.py:149 +#: community/apps/application/swagger_api/chat_api.py:360 +#: community/apps/application/swagger_api/chat_api.py:437 +#: community/apps/application/swagger_api/chat_api.py:470 +msgid "Conversation record id" +msgstr "对话记录 ID" + +#: community/apps/application/serializers/chat_message_serializers.py:262 +msgid "Node id" +msgstr "节点 ID" + +#: community/apps/application/serializers/chat_message_serializers.py:265 +#: community/apps/application/swagger_api/chat_api.py:142 +#: community/apps/application/swagger_api/chat_api.py:143 +msgid "Runtime node id" +msgstr "运行时节点 ID" + +#: community/apps/application/serializers/chat_message_serializers.py:268 +msgid "Node parameters" +msgstr "节点参数" + +#: community/apps/application/serializers/chat_message_serializers.py:273 +msgid "Global variables" +msgstr "全局变量" + +#: community/apps/application/serializers/chat_message_serializers.py:286 +#: community/apps/application/serializers/chat_message_serializers.py:421 +#: community/apps/application/serializers/chat_serializers.py:469 +msgid "Conversation does not exist" +msgstr "对话不存在" + +#: community/apps/application/serializers/chat_message_serializers.py:303 +msgid "The number of visits exceeds today's visits" +msgstr "访问次数超过今日访问量" + +#: community/apps/application/serializers/chat_message_serializers.py:314 +msgid "The current model is not available" +msgstr "当前模型不可用" + +#: community/apps/application/serializers/chat_message_serializers.py:316 +msgid "The model is downloading, please try again later" +msgstr "模型正在下载中,请稍后再试" + +#: community/apps/application/serializers/chat_message_serializers.py:361 +#: community/apps/application/serializers/chat_serializers.py:599 +#: community/apps/application/serializers/chat_serializers.py:645 +#: community/apps/application/serializers/chat_serializers.py:694 +msgid "Conversation record does not exist" +msgstr "对话记录不存在" + +#: community/apps/application/serializers/chat_message_serializers.py:454 +#: community/apps/application/serializers/chat_serializers.py:314 +msgid "The application has not been published. Please use it after publishing." +msgstr "应用未发布,请发布后使用" + +#: community/apps/application/serializers/chat_serializers.py:55 +msgid "node" +msgstr "节点" + +#: community/apps/application/serializers/chat_serializers.py:56 +msgid "Connection" +msgstr "连线" + +#: community/apps/application/serializers/chat_serializers.py:71 +#: community/apps/application/swagger_api/chat_api.py:48 +#: community/apps/application/swagger_api/chat_api.py:49 +#: community/apps/application/swagger_api/chat_api.py:169 +#: community/apps/application/swagger_api/chat_api.py:170 +#: community/apps/application/swagger_api/chat_api.py:256 +msgid "abstract" +msgstr "摘要" + +#: community/apps/application/serializers/chat_serializers.py:121 +#: community/apps/application/swagger_api/chat_api.py:258 +msgid "Minimum number of likes" +msgstr "最小点赞数" + +#: community/apps/application/serializers/chat_serializers.py:123 +#: community/apps/application/swagger_api/chat_api.py:260 +msgid "Minimum number of clicks" +msgstr "最小点踩数" + +#: community/apps/application/serializers/chat_serializers.py:126 +msgid "Only supports and|or" +msgstr "只支持 and|or" + +#: community/apps/application/serializers/chat_serializers.py:241 +msgid "Problem after optimization" +msgstr "优化后的问题" + +#: community/apps/application/serializers/chat_serializers.py:242 +msgid "answer" +msgstr "回答" + +#: community/apps/application/serializers/chat_serializers.py:242 +msgid "User feedback" +msgstr "用户反馈" + +#: community/apps/application/serializers/chat_serializers.py:244 +msgid "Section title + content" +msgstr "分段标题+内容" + +#: community/apps/application/serializers/chat_serializers.py:245 +#: community/apps/application/views/chat_views.py:385 +#: community/apps/application/views/chat_views.py:386 +msgid "Annotation" +msgstr "标注" + +#: community/apps/application/serializers/chat_serializers.py:245 +msgid "Consuming tokens" +msgstr "消耗tokens" + +#: community/apps/application/serializers/chat_serializers.py:245 +msgid "Time consumed (s)" +msgstr "耗时(s)" + +#: community/apps/application/serializers/chat_serializers.py:246 +msgid "Question Time" +msgstr "提问时间" + +#: community/apps/application/serializers/chat_serializers.py:337 +msgid "Workflow" +msgstr "工作流" + +#: community/apps/application/serializers/chat_serializers.py:369 +msgid "Multi-round conversation" +msgstr "多轮对话" + +#: community/apps/application/serializers/chat_serializers.py:372 +msgid "Related Datasets" +msgstr "关联数据集" + +#: community/apps/application/serializers/chat_serializers.py:449 +msgid "Application authentication information does not exist" +msgstr "不存在的应用认证信息" + +#: community/apps/application/serializers/chat_serializers.py:451 +msgid "Displaying knowledge sources is not enabled" +msgstr "未开启显示知识来源" + +#: community/apps/application/serializers/chat_serializers.py:537 +msgid "Bidding Status" +msgstr "投标状态" + +#: community/apps/application/serializers/chat_serializers.py:546 +msgid "" +"Voting on the current session minutes, please do not send repeated requests" +msgstr "正在对当前会话纪要进行投票中,请勿重复发送请求" + +#: community/apps/application/serializers/chat_serializers.py:551 +msgid "Non-existent conversation chat_record_id" +msgstr "不存在的对话 chat_record_id" + +#: community/apps/application/serializers/chat_serializers.py:568 +msgid "Already voted, please cancel first and then vote again" +msgstr "已经投票过,请先取消后再进行投票" + +#: community/apps/application/serializers/chat_serializers.py:575 +#: community/apps/application/swagger_api/chat_api.py:379 +#: community/apps/application/swagger_api/chat_api.py:380 +#: community/apps/dataset/swagger_api/problem_api.py:128 +#: community/apps/dataset/swagger_api/problem_api.py:129 +msgid "Section title" +msgstr "段落标题" + +#: community/apps/application/serializers/chat_serializers.py:576 +#: community/apps/application/swagger_api/chat_api.py:381 +#: community/apps/application/swagger_api/chat_api.py:382 +#: community/apps/application/swagger_api/chat_api.py:483 +#: community/apps/application/swagger_api/chat_api.py:484 +#: community/apps/common/swagger_api/common_api.py:57 +#: community/apps/common/swagger_api/common_api.py:58 +msgid "Paragraph content" +msgstr "段落内容" + +#: community/apps/application/serializers/chat_serializers.py:620 +#: community/apps/application/serializers/chat_serializers.py:679 +#: community/apps/application/serializers/chat_serializers.py:710 +#: community/apps/application/swagger_api/chat_api.py:370 +#: community/apps/application/swagger_api/chat_api.py:395 +#: community/apps/application/swagger_api/chat_api.py:396 +#: community/apps/application/swagger_api/chat_api.py:496 +#: community/apps/application/swagger_api/chat_api.py:497 +msgid "Document id" +msgstr "文档 ID" + +#: community/apps/application/serializers/chat_serializers.py:626 +#: community/apps/application/serializers/chat_serializers.py:717 +#: community/apps/dataset/serializers/paragraph_serializers.py:576 +msgid "The document id is incorrect" +msgstr "文档 id 不正确" + +#: community/apps/application/serializers/chat_serializers.py:681 +#: community/apps/application/swagger_api/chat_api.py:310 +#: community/apps/application/swagger_api/chat_api.py:311 +msgid "Paragraph id" +msgstr "段落 ID" + +#: community/apps/application/serializers/chat_serializers.py:697 +#, python-brace-format +msgid "" +"The paragraph id is wrong. The current conversation record does not exist. " +"[{paragraph_id}] paragraph id" +msgstr "段落id错误。当前对话记录不存在。[{paragraph_id}] 段落id" + +#: community/apps/application/serializers/chat_serializers.py:736 +msgid "Conversation records that do not exist" +msgstr "存在不存在的对话记录" + +#: community/apps/application/swagger_api/application_api.py:24 +#: community/apps/application/views/chat_views.py:470 +#: community/apps/application/views/chat_views.py:471 +msgid "Upload files" +msgstr "上传文件" + +#: community/apps/application/swagger_api/application_api.py:35 +#: community/apps/application/swagger_api/application_api.py:36 +msgid "Application authentication token" +msgstr "应用认证 token" + +#: community/apps/application/swagger_api/application_api.py:48 +#: community/apps/application/swagger_api/application_version_api.py:22 +#: community/apps/application/swagger_api/application_version_api.py:23 +msgid "Primary key id" +msgstr "主键 id" + +#: community/apps/application/swagger_api/application_api.py:60 +msgid "Example List" +msgstr "示例列表" + +#: community/apps/application/swagger_api/application_api.py:61 +#: community/apps/application/swagger_api/application_api.py:62 +msgid "Affiliation user" +msgstr "所属用户" + +#: community/apps/application/swagger_api/application_api.py:64 +msgid "Is publish" +msgstr "是否发布" + +#: community/apps/application/swagger_api/application_api.py:66 +#: community/apps/application/swagger_api/application_api.py:67 +#: community/apps/application/swagger_api/application_version_api.py:28 +#: community/apps/application/swagger_api/application_version_api.py:29 +#: community/apps/application/swagger_api/chat_api.py:185 +#: community/apps/application/swagger_api/chat_api.py:186 +#: community/apps/application/swagger_api/chat_api.py:335 +#: community/apps/application/swagger_api/chat_api.py:336 +#: community/apps/application/swagger_api/chat_api.py:503 +#: community/apps/application/swagger_api/chat_api.py:504 +msgid "Creation time" +msgstr "创建时间" + +#: community/apps/application/swagger_api/application_api.py:69 +#: community/apps/application/swagger_api/application_api.py:70 +#: community/apps/application/swagger_api/application_version_api.py:30 +#: community/apps/application/swagger_api/application_version_api.py:31 +#: community/apps/application/swagger_api/chat_api.py:332 +#: community/apps/application/swagger_api/chat_api.py:333 +#: community/apps/application/swagger_api/chat_api.py:500 +#: community/apps/application/swagger_api/chat_api.py:501 +msgid "Modification time" +msgstr "修改时间" + +#: community/apps/application/swagger_api/application_api.py:74 +#: community/apps/application/swagger_api/application_api.py:194 +#: community/apps/application/swagger_api/application_api.py:195 +#: community/apps/application/swagger_api/application_api.py:343 +#: community/apps/application/swagger_api/application_api.py:344 +#: community/apps/application/swagger_api/chat_api.py:229 +#: community/apps/application/swagger_api/chat_api.py:230 +msgid "List of associated knowledge base IDs" +msgstr "关联知识库 ID 列表" + +#: community/apps/application/swagger_api/application_api.py:76 +msgid "List of associated knowledge base IDs (returned when querying details)" +msgstr "关联知识库ID列表(查询详情时返回)" + +#: community/apps/application/swagger_api/application_api.py:91 +msgid "Model Type" +msgstr "模型类型" + +#: community/apps/application/swagger_api/application_api.py:117 +msgid "Application api_key id" +msgstr "应用 api_key id" + +#: community/apps/application/swagger_api/application_api.py:126 +#: community/apps/application/swagger_api/application_api.py:127 +#: community/apps/application/swagger_api/application_api.py:156 +#: community/apps/application/swagger_api/application_api.py:157 +msgid "Is activation" +msgstr "是否可用" + +#: community/apps/application/swagger_api/application_api.py:198 +#: community/apps/application/swagger_api/application_api.py:347 +#: community/apps/application/swagger_api/application_api.py:348 +msgid "Problem Optimization" +msgstr "问题优化" + +#: community/apps/application/swagger_api/application_api.py:199 +msgid "Whether to enable problem optimization" +msgstr "是否开启问题优化" + +#: community/apps/application/swagger_api/application_api.py:204 +#: community/apps/application/swagger_api/application_api.py:350 +msgid "Application Type SIMPLE | WORK_FLOW" +msgstr "应用类型 SIMPLE | WORK_FLOW" + +#: community/apps/application/swagger_api/application_api.py:207 +#: community/apps/application/swagger_api/application_api.py:208 +#: community/apps/application/swagger_api/application_api.py:352 +#: community/apps/application/swagger_api/application_api.py:353 +msgid "Question optimization tips" +msgstr "问题优化提示词" + +#: community/apps/application/swagger_api/application_api.py:211 +#: community/apps/application/swagger_api/application_api.py:212 +#: community/apps/application/swagger_api/application_api.py:356 +#: community/apps/application/swagger_api/application_api.py:357 +msgid "Text-to-speech model ID" +msgstr "文本转语音模型 ID" + +#: community/apps/application/swagger_api/application_api.py:213 +#: community/apps/application/swagger_api/application_api.py:214 +#: community/apps/application/swagger_api/application_api.py:358 +#: community/apps/application/swagger_api/application_api.py:359 +msgid "Speech-to-text model id" +msgstr "语音转文本模型 ID" + +#: community/apps/application/swagger_api/application_api.py:215 +#: community/apps/application/swagger_api/application_api.py:216 +#: community/apps/application/swagger_api/application_api.py:360 +#: community/apps/application/swagger_api/application_api.py:361 +msgid "Is speech-to-text enabled" +msgstr "是否开启语音转文本" + +#: community/apps/application/swagger_api/application_api.py:217 +#: community/apps/application/swagger_api/application_api.py:218 +#: community/apps/application/swagger_api/application_api.py:362 +#: community/apps/application/swagger_api/application_api.py:363 +msgid "Is text-to-speech enabled" +msgstr "是否开启文本转语音" + +#: community/apps/application/swagger_api/application_api.py:219 +#: community/apps/application/swagger_api/application_api.py:220 +#: community/apps/application/swagger_api/application_api.py:364 +#: community/apps/application/swagger_api/application_api.py:365 +msgid "Text-to-speech type" +msgstr "文本转语音类型" + +#: community/apps/application/swagger_api/application_api.py:233 +msgid "Node List" +msgstr "节点列表" + +#: community/apps/application/swagger_api/application_api.py:236 +msgid "Connection List" +msgstr "连线列表" + +#: community/apps/application/swagger_api/application_api.py:266 +msgid "state" +msgstr "状态" + +#: community/apps/application/swagger_api/application_api.py:268 +msgid "ai_questioning|designated_answer" +msgstr "ai作答|指定答案" + +#: community/apps/application/swagger_api/application_api.py:273 +msgid "" +"ai_questioning: is the title, designated_answer: is the designated answer " +"content" +msgstr "ai作答:就是题词,指定回答:就是指定回答内容" + +#: community/apps/application/swagger_api/application_api.py:403 +#: community/apps/function_lib/swagger_api/function_lib_api.py:216 +msgid "Upload image files" +msgstr "上传图片文件" + +#: community/apps/application/swagger_api/application_api.py:434 +#: community/apps/application/swagger_api/application_api.py:435 +msgid "Text" +msgstr "文本" + +#: community/apps/application/swagger_api/application_statistics_api.py:41 +#: community/apps/application/swagger_api/application_statistics_api.py:42 +#: community/apps/application/swagger_api/chat_api.py:490 +#: community/apps/application/swagger_api/chat_api.py:491 +msgid "Number of Likes" +msgstr "点赞数" + +#: community/apps/application/swagger_api/application_statistics_api.py:44 +#: community/apps/application/swagger_api/chat_api.py:492 +#: community/apps/application/swagger_api/chat_api.py:493 +msgid "Number of thumbs-downs" +msgstr "点踩数" + +#: community/apps/application/swagger_api/application_statistics_api.py:45 +#: community/apps/application/swagger_api/application_statistics_api.py:46 +msgid "Number of tokens used" +msgstr "token使用数量" + +#: community/apps/application/swagger_api/application_statistics_api.py:47 +#: community/apps/application/swagger_api/application_statistics_api.py:48 +msgid "Number of conversations" +msgstr "对话次数" + +#: community/apps/application/swagger_api/application_statistics_api.py:49 +#: community/apps/application/swagger_api/application_statistics_api.py:50 +msgid "Number of customers" +msgstr "客户数量" + +#: community/apps/application/swagger_api/application_statistics_api.py:51 +#: community/apps/application/swagger_api/application_statistics_api.py:52 +msgid "Number of new customers" +msgstr "客户新增数量" + +#: community/apps/application/swagger_api/application_statistics_api.py:54 +#: community/apps/application/swagger_api/application_statistics_api.py:69 +#: community/apps/application/swagger_api/application_statistics_api.py:70 +msgid "time" +msgstr "日期" + +#: community/apps/application/swagger_api/application_statistics_api.py:55 +msgid "Time, this field is only available when querying trends" +msgstr "日期,只有查询趋势的时候才有该字段" + +#: community/apps/application/swagger_api/application_statistics_api.py:66 +#: community/apps/application/swagger_api/application_statistics_api.py:83 +msgid "New quantity" +msgstr "新增数量" + +#: community/apps/application/swagger_api/application_statistics_api.py:81 +#: community/apps/application/swagger_api/application_statistics_api.py:82 +msgid "Today's new quantity" +msgstr "今日新增数量" + +#: community/apps/application/swagger_api/application_version_api.py:26 +#: community/apps/application/swagger_api/application_version_api.py:27 +msgid "Workflow data" +msgstr "工作流数据" + +#: community/apps/application/swagger_api/application_version_api.py:61 +msgid "Application version id" +msgstr "应用版本 id" + +#: community/apps/application/swagger_api/chat_api.py:61 +#: community/apps/application/swagger_api/chat_api.py:62 +#: community/apps/application/swagger_api/chat_api.py:92 +#: community/apps/dataset/serializers/problem_serializers.py:91 +msgid "problem" +msgstr "问题" + +#: community/apps/application/swagger_api/chat_api.py:68 +msgid "Question content" +msgstr "问题内容" + +#: community/apps/application/swagger_api/chat_api.py:72 +msgid "role" +msgstr "角色" + +#: community/apps/application/swagger_api/chat_api.py:77 +#: community/apps/application/swagger_api/chat_api.py:93 +msgid "regenerate" +msgstr "重新生成" + +#: community/apps/application/swagger_api/chat_api.py:79 +msgid "Stream Output" +msgstr "流式输出" + +#: community/apps/application/swagger_api/chat_api.py:94 +msgid "Is it streaming output" +msgstr "是否流式输出" + +#: community/apps/application/swagger_api/chat_api.py:96 +#: community/apps/application/swagger_api/chat_api.py:97 +#| msgid "Form Data" +msgid "Form data" +msgstr "表单数据" + +#: community/apps/application/swagger_api/chat_api.py:101 +#: community/apps/application/swagger_api/chat_api.py:102 +#| msgid "state list" +msgid "Image list" +msgstr "图片列表" + +#: community/apps/application/swagger_api/chat_api.py:107 +msgid "Image name" +msgstr "图片名称" + +#: community/apps/application/swagger_api/chat_api.py:109 +msgid "Image URL" +msgstr "图片地址" + +#: community/apps/application/swagger_api/chat_api.py:115 +#: community/apps/application/swagger_api/chat_api.py:116 +#: community/apps/dataset/views/document.py:133 +#: community/apps/dataset/views/document.py:134 +msgid "Document list" +msgstr "文档列表" + +#: community/apps/application/swagger_api/chat_api.py:122 +msgid "Document name" +msgstr "文档名称" + +#: community/apps/application/swagger_api/chat_api.py:124 +msgid "Document URL" +msgstr "文档地址" + +#: community/apps/application/swagger_api/chat_api.py:129 +#: community/apps/application/swagger_api/chat_api.py:130 +#| msgid "id list" +msgid "Audio list" +msgstr "音频列表" + +#: community/apps/application/swagger_api/chat_api.py:135 +msgid "Audio name" +msgstr "音频名称" + +#: community/apps/application/swagger_api/chat_api.py:137 +msgid "Audio URL" +msgstr "音频地址" + +#: community/apps/application/swagger_api/chat_api.py:145 +#: community/apps/application/swagger_api/chat_api.py:146 +msgid "Node data" +msgstr "节点数据" + +#: community/apps/application/swagger_api/chat_api.py:151 +#: community/apps/application/swagger_api/chat_api.py:152 +msgid "Child node" +msgstr "子节点数据" + +#: community/apps/application/swagger_api/chat_api.py:173 +#: community/apps/application/swagger_api/chat_api.py:174 +msgid "Number of dialogue questions" +msgstr "对话提问数量" + +#: community/apps/application/swagger_api/chat_api.py:176 +#: community/apps/application/swagger_api/chat_api.py:177 +msgid "Number of tags" +msgstr "标记数量" + +#: community/apps/application/swagger_api/chat_api.py:178 +#: community/apps/application/swagger_api/chat_api.py:179 +#: community/apps/common/swagger_api/common_api.py:64 +#: community/apps/common/swagger_api/common_api.py:65 +#: community/apps/dataset/serializers/paragraph_serializers.py:711 +#: community/apps/dataset/serializers/paragraph_serializers.py:712 +msgid "Number of likes" +msgstr "点赞数量" + +#: community/apps/application/swagger_api/chat_api.py:180 +#: community/apps/application/swagger_api/chat_api.py:181 +msgid "Number of clicks" +msgstr "点踩数量" + +#: community/apps/application/swagger_api/chat_api.py:182 +#: community/apps/application/swagger_api/chat_api.py:183 +msgid "Change time" +msgstr "修改时间" + +#: community/apps/application/swagger_api/chat_api.py:224 +msgid "Application ID, pass when modifying, do not pass when creating" +msgstr "应用id,修改的时候传,创建的时候不传" + +#: community/apps/application/swagger_api/chat_api.py:225 +#: community/apps/application/swagger_api/chat_api.py:226 +msgid "Model ID" +msgstr "模型 ID" + +#: community/apps/application/swagger_api/chat_api.py:232 +#: community/apps/application/swagger_api/chat_api.py:234 +msgid "Do you want to initiate multiple sessions" +msgstr "是否开启多轮会话" + +#: community/apps/application/swagger_api/chat_api.py:237 +msgid "Problem optimization" +msgstr "问题优化" + +#: community/apps/application/swagger_api/chat_api.py:238 +msgid "Do you want to enable problem optimization" +msgstr "是否开启问题优化" + +#: community/apps/application/swagger_api/chat_api.py:254 +msgid "Historical days" +msgstr "历史天数" + +#: community/apps/application/swagger_api/chat_api.py:262 +msgid "or|and comparator" +msgstr "or|and 比较器" + +#: community/apps/application/swagger_api/chat_api.py:266 +#| msgid "Start time" +msgid "start time" +msgstr "开始时间" + +#: community/apps/application/swagger_api/chat_api.py:291 +msgid "Is it ascending order" +msgstr "是否升序" + +#: community/apps/application/swagger_api/chat_api.py:304 +msgid "Session log id" +msgstr "会话日志 id" + +#: community/apps/application/swagger_api/chat_api.py:305 +msgid "Conversation log id" +msgstr "对话日志 ID" + +#: community/apps/application/swagger_api/chat_api.py:306 +#: community/apps/application/swagger_api/chat_api.py:307 +#: community/apps/application/swagger_api/chat_api.py:446 +msgid "Voting Status" +msgstr "投票状态" + +#: community/apps/application/swagger_api/chat_api.py:308 +#: community/apps/application/swagger_api/chat_api.py:309 +msgid "Dataset id" +msgstr "数据集 id" + +#: community/apps/application/swagger_api/chat_api.py:312 +#: community/apps/application/swagger_api/chat_api.py:313 +msgid "Resource ID" +msgstr "资源 ID" + +#: community/apps/application/swagger_api/chat_api.py:314 +#: community/apps/application/swagger_api/chat_api.py:315 +msgid "Resource Type" +msgstr "资源类型" + +#: community/apps/application/swagger_api/chat_api.py:317 +#: community/apps/application/swagger_api/chat_api.py:318 +msgid "Number of tokens consumed by the question" +msgstr "问题消耗 token 数量" + +#: community/apps/application/swagger_api/chat_api.py:320 +#: community/apps/application/swagger_api/chat_api.py:321 +msgid "The number of tokens consumed by the answer" +msgstr "答案消耗 token 数量" + +#: community/apps/application/swagger_api/chat_api.py:324 +#: community/apps/application/swagger_api/chat_api.py:325 +msgid "Improved annotation list" +msgstr "改进标注列表" + +#: community/apps/application/swagger_api/chat_api.py:328 +msgid "Corresponding session Corresponding subscript" +msgstr "对应会话对应下标" + +#: community/apps/application/swagger_api/chat_api.py:329 +msgid "Corresponding session id corresponding subscript" +msgstr "对应会话id对应下标" + +#: community/apps/application/swagger_api/chat_api.py:397 +#: community/apps/application/swagger_api/chat_api.py:398 +msgid "Conversation id list" +msgstr "会话 id 列表" + +#: community/apps/application/swagger_api/chat_api.py:447 +msgid "-1: Cancel vote | 0: Agree | 1: Oppose" +msgstr "-1:取消投票|0:赞同|1:反对" + +#: community/apps/application/swagger_api/chat_api.py:485 +#: community/apps/application/swagger_api/chat_api.py:486 +#: community/apps/common/swagger_api/common_api.py:59 +#: community/apps/common/swagger_api/common_api.py:60 +#: community/apps/dataset/serializers/paragraph_serializers.py:687 +#: community/apps/dataset/serializers/paragraph_serializers.py:707 +#: community/apps/dataset/serializers/paragraph_serializers.py:708 +msgid "title" +msgstr "标题" + +#: community/apps/application/swagger_api/chat_api.py:486 +#: community/apps/common/swagger_api/common_api.py:60 +msgid "Description of xxx" +msgstr "xxx 描述" + +#: community/apps/application/swagger_api/chat_api.py:487 +#: community/apps/application/swagger_api/chat_api.py:488 +#: community/apps/common/swagger_api/common_api.py:61 +#: community/apps/common/swagger_api/common_api.py:62 +msgid "Number of hits" +msgstr "命中数量" + +#: community/apps/application/views/application_version_views.py:28 +#: community/apps/application/views/application_version_views.py:29 +#: community/apps/application/views/application_views.py:489 +#: community/apps/application/views/application_views.py:490 +msgid "Get the application list" +msgstr "获取应用列表" + +#: community/apps/application/views/application_version_views.py:32 +#: community/apps/application/views/application_version_views.py:50 +#: community/apps/application/views/application_version_views.py:68 +#: community/apps/application/views/application_version_views.py:83 +msgid "Application/Version" +msgstr "应用/版本" + +#: community/apps/application/views/application_version_views.py:45 +#: community/apps/application/views/application_version_views.py:46 +msgid "Get the list of application versions by page" +msgstr "获取应用版本列表分页" + +#: community/apps/application/views/application_version_views.py:64 +#: community/apps/application/views/application_version_views.py:65 +msgid "Get application version details" +msgstr "获取应用版本详情" + +#: community/apps/application/views/application_version_views.py:78 +#: community/apps/application/views/application_version_views.py:79 +msgid "Modify application version information" +msgstr "修改应用版本信息" + +#: community/apps/application/views/application_views.py:42 +#: community/apps/application/views/application_views.py:43 +msgid "User Statistics" +msgstr "用户统计" + +#: community/apps/application/views/application_views.py:44 +#: community/apps/application/views/application_views.py:70 +#: community/apps/application/views/application_views.py:95 +#: community/apps/application/views/application_views.py:121 +msgid "Application/Statistics" +msgstr "应用/统计" + +#: community/apps/application/views/application_views.py:68 +#: community/apps/application/views/application_views.py:69 +msgid "User demographic trends" +msgstr "用户统计趋势" + +#: community/apps/application/views/application_views.py:93 +#: community/apps/application/views/application_views.py:94 +msgid "Conversation statistics" +msgstr "对话相关统计" + +#: community/apps/application/views/application_views.py:119 +#: community/apps/application/views/application_views.py:120 +msgid "Dialogue-related statistical trends" +msgstr "对话相关统计趋势" + +#: community/apps/application/views/application_views.py:150 +#: community/apps/application/views/application_views.py:151 +msgid "Modify application icon" +msgstr "修改应用图标" + +#: community/apps/application/views/application_views.py:152 +#: community/apps/application/views/application_views.py:175 +#: community/apps/application/views/application_views.py:189 +#: community/apps/application/views/application_views.py:202 +#: community/apps/application/views/application_views.py:216 +#: community/apps/application/views/application_views.py:236 +#: community/apps/application/views/application_views.py:255 +#: community/apps/application/views/application_views.py:274 +#: community/apps/application/views/application_views.py:313 +#: community/apps/application/views/application_views.py:482 +#: community/apps/application/views/application_views.py:493 +#: community/apps/application/views/application_views.py:508 +#: community/apps/application/views/application_views.py:535 +#: community/apps/application/views/application_views.py:555 +#: community/apps/application/views/application_views.py:575 +#: community/apps/application/views/application_views.py:593 +#: community/apps/application/views/application_views.py:614 +#: community/apps/application/views/application_views.py:635 +#: community/apps/application/views/application_views.py:670 +msgid "Application" +msgstr "应用" + +#: community/apps/application/views/application_views.py:173 +msgid "Import Application" +msgstr "导入应用" + +#: community/apps/application/views/application_views.py:187 +msgid "Export Application" +msgstr "导出应用" + +#: community/apps/application/views/application_views.py:200 +#: community/apps/application/views/application_views.py:201 +msgid "Get embedded js" +msgstr "获取嵌入 js" + +#: community/apps/application/views/application_views.py:214 +#: community/apps/application/views/application_views.py:215 +msgid "Get a list of models" +msgstr "获取模型列表" + +#: community/apps/application/views/application_views.py:234 +#: community/apps/application/views/application_views.py:235 +#: community/apps/setting/views/model.py:100 +#: community/apps/setting/views/model.py:101 +msgid "Get model parameter form" +msgstr "获取模型参数表单" + +#: community/apps/application/views/application_views.py:253 +#: community/apps/application/views/application_views.py:254 +msgid "Get a list of function libraries" +msgstr "获取函数库列表" + +#: community/apps/application/views/application_views.py:272 +#: community/apps/application/views/application_views.py:273 +msgid "Get library details" +msgstr "获取函数库详情" + +#: community/apps/application/views/application_views.py:292 +#: community/apps/application/views/application_views.py:293 +msgid "Get the list of apps created by the current user" +msgstr "获取当前用户创建的应用列表" + +#: community/apps/application/views/application_views.py:294 +#: community/apps/application/views/application_views.py:333 +#: community/apps/application/views/chat_views.py:74 +#: community/apps/application/views/chat_views.py:93 +#: community/apps/application/views/chat_views.py:105 +#: community/apps/application/views/chat_views.py:118 +#: community/apps/application/views/chat_views.py:347 +msgid "Application/Chat" +msgstr "应用/对话" + +#: community/apps/application/views/application_views.py:311 +#: community/apps/application/views/application_views.py:312 +msgid "Get application data" +msgstr "获取应用数据" + +#: community/apps/application/views/application_views.py:331 +#: community/apps/application/views/application_views.py:332 +msgid "Get application related information" +msgstr "获取应用相关信息" + +#: community/apps/application/views/application_views.py:346 +#: community/apps/application/views/application_views.py:347 +msgid "Add ApiKey" +msgstr "添加 ApiKey" + +#: community/apps/application/views/application_views.py:348 +#: community/apps/application/views/application_views.py:364 +#: community/apps/application/views/application_views.py:383 +#: community/apps/application/views/application_views.py:402 +msgid "Application/API_KEY" +msgstr "应用/API_KEY" + +#: community/apps/application/views/application_views.py:362 +#: community/apps/application/views/application_views.py:363 +msgid "Get the application API_KEY list" +msgstr "获取应用 API_KEY 列表" + +#: community/apps/application/views/application_views.py:381 +#: community/apps/application/views/application_views.py:382 +msgid "Modify application API_KEY" +msgstr "修改应用 API_KEY" + +#: community/apps/application/views/application_views.py:400 +#: community/apps/application/views/application_views.py:401 +msgid "Delete Application API_KEY" +msgstr "删除应用 API_KEY" + +#: community/apps/application/views/application_views.py:421 +#: community/apps/application/views/application_views.py:422 +msgid "Modify Application AccessToken" +msgstr "修改应用访问限制" + +#: community/apps/application/views/application_views.py:423 +#: community/apps/application/views/application_views.py:441 +msgid "Application/Public Access" +msgstr "应用/公共访问" + +#: community/apps/application/views/application_views.py:438 +#: community/apps/application/views/application_views.py:439 +msgid "Get the application AccessToken information" +msgstr "获取应用 AccessToken 信息" + +#: community/apps/application/views/application_views.py:462 +#: community/apps/application/views/application_views.py:463 +msgid "Application Certification" +msgstr "应用认证" + +#: community/apps/application/views/application_views.py:465 +msgid "Application/Certification" +msgstr "应用/认证" + +#: community/apps/application/views/application_views.py:479 +#: community/apps/application/views/application_views.py:480 +msgid "Create an application" +msgstr "创建应用" + +#: community/apps/application/views/application_views.py:505 +msgid "Hit Test List" +msgstr "命中测试列表" + +#: community/apps/application/views/application_views.py:530 +#: community/apps/application/views/application_views.py:531 +msgid "Publishing an application" +msgstr "发布应用" + +#: community/apps/application/views/application_views.py:551 +#: community/apps/application/views/application_views.py:552 +msgid "Deleting application" +msgstr "删除应用" + +#: community/apps/application/views/application_views.py:570 +#: community/apps/application/views/application_views.py:571 +msgid "Modify the application" +msgstr "修改应用" + +#: community/apps/application/views/application_views.py:589 +#: community/apps/application/views/application_views.py:590 +msgid "Get application details" +msgstr "获取应用详情" + +#: community/apps/application/views/application_views.py:609 +#: community/apps/application/views/application_views.py:610 +msgid "Get the knowledge base available to the current application" +msgstr "获取当前应用可用的知识库" + +#: community/apps/application/views/application_views.py:630 +#: community/apps/application/views/application_views.py:631 +msgid "Get the application list by page" +msgstr "获取应用列表分页" + +#: community/apps/application/views/application_views.py:665 +#: community/apps/application/views/application_views.py:666 +msgid "text to speech" +msgstr "文本转语音类型" + +#: community/apps/application/views/chat_views.py:36 +#: community/apps/application/views/chat_views.py:37 +msgid "OpenAI Interface Dialogue" +msgstr "openai接口对话" + +#: community/apps/application/views/chat_views.py:39 +msgid "OpenAI Dialogue" +msgstr "openai对话" + +#: community/apps/application/views/chat_views.py:52 +#: community/apps/application/views/chat_views.py:53 +msgid "Export conversation" +msgstr "导出对话" + +#: community/apps/application/views/chat_views.py:55 +#: community/apps/application/views/chat_views.py:156 +#: community/apps/application/views/chat_views.py:174 +#: community/apps/application/views/chat_views.py:197 +#: community/apps/application/views/chat_views.py:217 +#: community/apps/application/views/chat_views.py:235 +#: community/apps/application/views/chat_views.py:257 +#: community/apps/application/views/chat_views.py:282 +#: community/apps/application/views/chat_views.py:302 +#: community/apps/application/views/chat_views.py:324 +#: community/apps/application/views/chat_views.py:489 +msgid "Application/Conversation Log" +msgstr "应用/对话日志" + +#: community/apps/application/views/chat_views.py:71 +#: community/apps/application/views/chat_views.py:72 +msgid "Get the session id according to the application id" +msgstr "获取应用id对应的会话id" + +#: community/apps/application/views/chat_views.py:90 +#: community/apps/application/views/chat_views.py:91 +msgid "Get the workflow temporary session id" +msgstr "获取工作流临时会话id" + +#: community/apps/application/views/chat_views.py:102 +#: community/apps/application/views/chat_views.py:103 +msgid "Get a temporary session id" +msgstr "获取临时会话id" + +#: community/apps/application/views/chat_views.py:115 +#: community/apps/application/views/chat_views.py:116 +msgid "dialogue" +msgstr "对话" + +#: community/apps/application/views/chat_views.py:152 +#: community/apps/application/views/chat_views.py:153 +msgid "Get the conversation list" +msgstr "获取对话列表" + +#: community/apps/application/views/chat_views.py:172 +#: community/apps/application/views/chat_views.py:173 +msgid "Delete a conversation" +msgstr "删除对话" + +#: community/apps/application/views/chat_views.py:192 +#: community/apps/application/views/chat_views.py:193 +msgid "Get client conversation list by paging" +msgstr "获取客户对话列表分页" + +#: community/apps/application/views/chat_views.py:215 +#: community/apps/application/views/chat_views.py:216 +msgid "Client deletes conversation" +msgstr "客户端删除对话" + +#: community/apps/application/views/chat_views.py:232 +#: community/apps/application/views/chat_views.py:233 +msgid "Client modifies dialogue summary" +msgstr "客户端修改对话摘要" + +#: community/apps/application/views/chat_views.py:253 +#: community/apps/application/views/chat_views.py:254 +msgid "Get the conversation list by page" +msgstr "获取对话列表分页" + +#: community/apps/application/views/chat_views.py:278 +#: community/apps/application/views/chat_views.py:279 +msgid "Get conversation record details" +msgstr "获取对话记录详情" + +#: community/apps/application/views/chat_views.py:298 +#: community/apps/application/views/chat_views.py:299 +msgid "Get a list of conversation records" +msgstr "获取对话记录列表" + +#: community/apps/application/views/chat_views.py:319 +#: community/apps/application/views/chat_views.py:320 +msgid "Get the conversation history list by page" +msgstr "获取对话历史列表分页" + +#: community/apps/application/views/chat_views.py:342 +#: community/apps/application/views/chat_views.py:343 +msgid "Like, Dislike" +msgstr "点赞,点踩" + +#: community/apps/application/views/chat_views.py:365 +#: community/apps/application/views/chat_views.py:366 +msgid "Get the list of marked paragraphs" +msgstr "获取标记段落列表" + +#: community/apps/application/views/chat_views.py:369 +#: community/apps/application/views/chat_views.py:390 +#: community/apps/application/views/chat_views.py:442 +msgid "Application/Conversation Log/Annotation" +msgstr "应用/对话日志/标注" + +#: community/apps/application/views/chat_views.py:412 +#: community/apps/application/views/chat_views.py:413 +msgid "Add to Knowledge Base" +msgstr "添加到知识库" + +#: community/apps/application/views/chat_views.py:416 +msgid "Application/Conversation Log/Add to Knowledge Base" +msgstr "应用/对话日志/添加到知识库" + +#: community/apps/application/views/chat_views.py:438 +#: community/apps/application/views/chat_views.py:439 +msgid "Delete a Annotation" +msgstr "删除标注" + +#: community/apps/application/views/chat_views.py:487 +#: community/apps/dataset/views/file.py:28 +#: community/apps/dataset/views/file.py:29 +#: community/apps/dataset/views/file.py:34 +msgid "Upload file" +msgstr "上传文件" + +#: community/apps/common/auth/authenticate.py:62 +#: community/apps/common/auth/authenticate.py:83 +msgid "Not logged in, please log in first" +msgstr "未登录,请先登录" + +#: community/apps/common/auth/authenticate.py:68 +#: community/apps/common/auth/authenticate.py:74 +#: community/apps/common/auth/authenticate.py:89 +#: community/apps/common/auth/authenticate.py:95 +msgid "Authentication information is incorrect! illegal user" +msgstr "非法用户!认证信息不正确" + +#: community/apps/common/auth/authentication.py:94 +msgid "No permission to access" +msgstr "没有权限访问" + +#: community/apps/common/auth/handle/impl/application_key.py:23 +#: community/apps/common/auth/handle/impl/application_key.py:25 +msgid "Secret key is invalid" +msgstr "secret key无效" + +#: community/apps/common/auth/handle/impl/public_access_token.py:48 +#: community/apps/common/auth/handle/impl/public_access_token.py:50 +#: community/apps/common/auth/handle/impl/public_access_token.py:52 +#: community/apps/common/auth/handle/impl/public_access_token.py:54 +msgid "Authentication information is incorrect" +msgstr "认证信息不正确" + +#: community/apps/common/auth/handle/impl/user_token.py:34 +msgid "Login expired" +msgstr "登录过期" + +#: community/apps/common/constants/exception_code_constants.py:31 +msgid "The username or password is incorrect" +msgstr "用户名或密码错误" + +#: community/apps/common/constants/exception_code_constants.py:32 +msgid "Please log in first and bring the user Token" +msgstr "请先登录并携带用户Token" + +#: community/apps/common/constants/exception_code_constants.py:33 +#: community/apps/users/serializers/user_serializers.py:429 +msgid "Email sending failed" +msgstr "邮箱发送失败" + +#: community/apps/common/constants/exception_code_constants.py:34 +msgid "Email format error" +msgstr "邮箱格式错误" + +#: community/apps/common/constants/exception_code_constants.py:35 +msgid "The email has been registered, please log in directly" +msgstr "邮箱已注册,请直接登录" + +#: community/apps/common/constants/exception_code_constants.py:36 +msgid "The email is not registered, please register first" +msgstr "邮箱未注册,请先注册" + +#: community/apps/common/constants/exception_code_constants.py:38 +msgid "The verification code is incorrect or the verification code has expired" +msgstr "验证码错误或验证码已过期" + +#: community/apps/common/constants/exception_code_constants.py:39 +msgid "The username has been registered, please log in directly" +msgstr "用户名已注册,请直接登录" + +#: community/apps/common/constants/exception_code_constants.py:41 +msgid "" +"The username cannot be empty and must be between 6 and 20 characters long." +msgstr "用户名不能为空,且长度必须在6-20个字符之间。" + +#: community/apps/common/constants/exception_code_constants.py:43 +msgid "Password and confirmation password are inconsistent" +msgstr "密码和确认密码不一致" + +#: community/apps/common/constants/permission_constants.py:61 +msgid "ADMIN" +msgstr "管理员" + +#: community/apps/common/constants/permission_constants.py:61 +msgid "Admin, prefabs are not currently used" +msgstr "管理员,预制目前不会使用" + +#: community/apps/common/constants/permission_constants.py:62 +msgid "USER" +msgstr "用户" + +#: community/apps/common/constants/permission_constants.py:62 +msgid "All user permissions" +msgstr "所有用户权限" + +#: community/apps/common/constants/permission_constants.py:63 +msgid "chat" +msgstr "对话" + +#: community/apps/common/constants/permission_constants.py:63 +msgid "Only has application dialog interface permissions" +msgstr "只拥有应用对话接口权限" + +#: community/apps/common/constants/permission_constants.py:64 +msgid "Apply private key" +msgstr "应用私钥" + +#: community/apps/common/event/__init__.py:30 +msgid "The download process was interrupted, please try again" +msgstr "下载过程中断,请重试" + +#: community/apps/common/event/listener_manage.py:91 +#, python-brace-format +msgid "Query vector data: {paragraph_id_list} error {error} {traceback}" +msgstr "向量数据查询: {paragraph_id_list} 错误 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:96 +#, python-brace-format +msgid "Start--->Embedding paragraph: {paragraph_id_list}" +msgstr "开始--->嵌入段落: {paragraph_id_list}" + +#: community/apps/common/event/listener_manage.py:108 +#, python-brace-format +msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}" +msgstr "向量化段落: {paragraph_id_list} 错误 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:114 +#, python-brace-format +msgid "End--->Embedding paragraph: {paragraph_id_list}" +msgstr "结束--->嵌入段落: {paragraph_id_list}" + +#: community/apps/common/event/listener_manage.py:123 +#, python-brace-format +msgid "Start--->Embedding paragraph: {paragraph_id}" +msgstr "开始--->嵌入段落: {paragraph_id}" + +#: community/apps/common/event/listener_manage.py:148 +#, python-brace-format +msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}" +msgstr "向量化段落: {paragraph_id} 错误 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:153 +#, python-brace-format +msgid "End--->Embedding paragraph: {paragraph_id}" +msgstr "结束--->嵌入段落: {paragraph_id}" + +#: community/apps/common/event/listener_manage.py:269 +#, python-brace-format +msgid "Start--->Embedding document: {document_id}" +msgstr "开始--->嵌入文档: {document_id}" + +#: community/apps/common/event/listener_manage.py:291 +#, python-brace-format +msgid "Vectorized document: {document_id} error {error} {traceback}" +msgstr "向量化文档: {document_id} 错误 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:296 +#, python-brace-format +msgid "End--->Embedding document: {document_id}" +msgstr "结束--->嵌入文档: {document_id}" + +#: community/apps/common/event/listener_manage.py:307 +#, python-brace-format +msgid "Start--->Embedding dataset: {dataset_id}" +msgstr "开始--->嵌入知识库: {dataset_id}" + +#: community/apps/common/event/listener_manage.py:311 +#, python-brace-format +msgid "Start--->Embedding document: {document_list}" +msgstr "开始--->嵌入文档: {document_list}" + +#: community/apps/common/event/listener_manage.py:315 +#: community/apps/embedding/task/embedding.py:123 +#, python-brace-format +msgid "Vectorized dataset: {dataset_id} error {error} {traceback}" +msgstr "向量化知识库: {dataset_id} 错误 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:318 +#, python-brace-format +msgid "End--->Embedding dataset: {dataset_id}" +msgstr "结束--->嵌入知识库: {dataset_id}" + +#: community/apps/common/field/common.py:45 +msgid "not a function" +msgstr "不是函数" + +#: community/apps/common/forms/base_field.py:64 +#, python-brace-format +msgid "The field {field_label} is required" +msgstr "字段 {field_label} 是必填的" + +#: community/apps/common/forms/slider_field.py:56 +#, python-brace-format +msgid "The {field_label} cannot be less than {min}" +msgstr "字段 {field_label} 不能小于 {min}" + +#: community/apps/common/forms/slider_field.py:62 +#, python-brace-format +msgid "The {field_label} cannot be greater than {max}" +msgstr "{field_label} 不能大于 {max}" + +#: community/apps/common/handle/handle_exception.py:30 +msgid "Unknown exception" +msgstr "未知异常" + +#: community/apps/common/handle/impl/pdf_split_handle.py:278 +#, python-brace-format +msgid "This document has no preface and is treated as ordinary text: {e}" +msgstr "文档没有前言,视为普通文本: {e}" + +#: community/apps/common/init/init_doc.py:26 +#: community/apps/common/init/init_doc.py:45 +msgid "Intelligent customer service platform" +msgstr "智能客服平台" + +#: community/apps/common/job/clean_chat_job.py:25 +msgid "start clean chat log" +msgstr "开始清理对话日志" + +#: community/apps/common/job/clean_chat_job.py:71 +msgid "end clean chat log" +msgstr "结束清理对话日志" + +#: community/apps/common/job/clean_debug_file_job.py:21 +msgid "start clean debug file" +msgstr "开始清理调试文件" + +#: community/apps/common/job/clean_debug_file_job.py:25 +msgid "end clean debug file" +msgstr "结束清理调试文件" + +#: community/apps/common/job/client_access_num_job.py:25 +msgid "start reset access_num" +msgstr "开始重置访问次数" + +#: community/apps/common/job/client_access_num_job.py:27 +msgid "end reset access_num" +msgstr "结束重置访问次数" + +#: community/apps/common/log/log.py:37 +msgid "unknown" +msgstr "未知的" + +#: community/apps/common/response/result.py:24 +msgid "Success" +msgstr "成功" + +#: community/apps/common/response/result.py:36 +#: community/apps/common/response/result.py:80 +#: community/apps/common/response/result.py:82 +msgid "current page" +msgstr "当前页" + +#: community/apps/common/response/result.py:42 +#: community/apps/common/response/result.py:85 +#: community/apps/common/response/result.py:87 +msgid "page size" +msgstr "每页数量" + +#: community/apps/common/response/result.py:53 +#: community/apps/common/response/result.py:101 +#: community/apps/common/response/result.py:130 +msgid "response parameters" +msgstr "响应参数" + +#: community/apps/common/response/result.py:59 +#: community/apps/common/response/result.py:107 +#: community/apps/common/response/result.py:136 +msgid "response code" +msgstr "响应码" + +#: community/apps/common/response/result.py:61 +#: community/apps/common/response/result.py:109 +#: community/apps/common/response/result.py:138 +msgid "success:200 fail:other" +msgstr "成功:200 失败:其他" + +#: community/apps/common/response/result.py:64 +#: community/apps/common/response/result.py:112 +#: community/apps/common/response/result.py:141 +msgid "prompt" +msgstr "提示" + +#: community/apps/common/response/result.py:65 +#: community/apps/common/response/result.py:113 +#: community/apps/common/response/result.py:142 +msgid "success" +msgstr "成功" + +#: community/apps/common/response/result.py:66 +#: community/apps/common/response/result.py:114 +#: community/apps/common/response/result.py:143 +msgid "error prompt" +msgstr "错误提示" + +#: community/apps/common/response/result.py:72 +#: community/apps/common/response/result.py:74 +msgid "total number of data" +msgstr "总条数" + +#: community/apps/common/swagger_api/common_api.py:24 +#: community/apps/dataset/serializers/dataset_serializers.py:569 +msgid "query text" +msgstr "查询文本" + +#: community/apps/common/swagger_api/common_api.py:42 +msgid "Retrieval pattern embedding|keywords|blend" +msgstr "检索模式 embedding|keywords|blend" + +#: community/apps/common/swagger_api/common_api.py:66 +#: community/apps/common/swagger_api/common_api.py:67 +msgid "Number of clicks and dislikes" +msgstr "点踩数" + +#: community/apps/common/swagger_api/common_api.py:74 +#: community/apps/common/swagger_api/common_api.py:75 +msgid "relevance score" +msgstr "相关性得分" + +#: community/apps/common/swagger_api/common_api.py:76 +#: community/apps/common/swagger_api/common_api.py:77 +msgid "Comprehensive score, used for ranking" +msgstr "综合得分,用于排序" + +#: community/apps/common/swagger_api/common_api.py:78 +#: community/apps/common/swagger_api/common_api.py:79 +#: community/apps/users/serializers/user_serializers.py:591 +#: community/apps/users/serializers/user_serializers.py:592 +msgid "Update time" +msgstr "更新时间" + +#: community/apps/common/swagger_api/common_api.py:81 +#: community/apps/common/swagger_api/common_api.py:82 +#: community/apps/users/serializers/user_serializers.py:589 +#: community/apps/users/serializers/user_serializers.py:590 +msgid "Create time" +msgstr "创建时间" + +#: community/apps/common/util/common.py:239 +msgid "Text-to-speech node, the text content must be of string type" +msgstr "文本转语音节点,文本内容必须是字符串类型" + +#: community/apps/common/util/common.py:241 +msgid "Text-to-speech node, the text content cannot be empty" +msgstr "文本转语音节点,文本内容不能为空" + +#: community/apps/dataset/serializers/common_serializers.py:87 +msgid "source url" +msgstr "文档地址" + +#: community/apps/dataset/serializers/common_serializers.py:89 +#: community/apps/dataset/serializers/dataset_serializers.py:333 +#: community/apps/dataset/serializers/dataset_serializers.py:390 +#: community/apps/dataset/serializers/dataset_serializers.py:391 +#: community/apps/dataset/serializers/document_serializers.py:155 +#: community/apps/dataset/serializers/document_serializers.py:181 +msgid "selector" +msgstr "选择器" + +#: community/apps/dataset/serializers/common_serializers.py:96 +#: community/apps/dataset/serializers/dataset_serializers.py:341 +#, python-brace-format +msgid "URL error, cannot parse [{source_url}]" +msgstr "URL错误,无法解析 [{source_url}]" + +#: community/apps/dataset/serializers/common_serializers.py:105 +#: community/apps/dataset/serializers/common_serializers.py:124 +#: community/apps/dataset/serializers/common_serializers.py:125 +#: community/apps/dataset/serializers/document_serializers.py:85 +#: community/apps/dataset/swagger_api/document_api.py:23 +#: community/apps/dataset/swagger_api/document_api.py:24 +#: community/apps/dataset/swagger_api/document_api.py:49 +#: community/apps/dataset/swagger_api/document_api.py:50 +msgid "id list" +msgstr "id 列表" + +#: community/apps/dataset/serializers/common_serializers.py:115 +#, python-brace-format +msgid "The following id does not exist: {error_id_list}" +msgstr "id不存在: {error_id_list}" + +#: community/apps/dataset/serializers/common_serializers.py:183 +#: community/apps/dataset/serializers/common_serializers.py:207 +msgid "The knowledge base is inconsistent with the vector model" +msgstr "知识库未向量模型不一致" + +#: community/apps/dataset/serializers/common_serializers.py:185 +#: community/apps/dataset/serializers/common_serializers.py:209 +msgid "Knowledge base setting error, please reset the knowledge base" +msgstr "知识库设置错误,请重新设置知识库" + +#: community/apps/dataset/serializers/dataset_serializers.py:109 +#: community/apps/dataset/serializers/dataset_serializers.py:110 +#: community/apps/setting/serializers/model_apply_serializers.py:51 +msgid "model id" +msgstr "模型 id" + +#: community/apps/dataset/serializers/dataset_serializers.py:112 +#: community/apps/dataset/serializers/dataset_serializers.py:114 +msgid "Whether to start multiple rounds of dialogue" +msgstr "是否开启多轮对话" + +#: community/apps/dataset/serializers/dataset_serializers.py:115 +#: community/apps/dataset/serializers/dataset_serializers.py:116 +msgid "opening remarks" +msgstr "开场白" + +#: community/apps/dataset/serializers/dataset_serializers.py:118 +msgid "example" +msgstr "示例" + +#: community/apps/dataset/serializers/dataset_serializers.py:119 +msgid "User id" +msgstr "用户 id" + +#: community/apps/dataset/serializers/dataset_serializers.py:121 +#: community/apps/dataset/serializers/dataset_serializers.py:122 +msgid "Whether to publish" +msgstr "是否发布" + +#: community/apps/dataset/serializers/dataset_serializers.py:124 +#: community/apps/dataset/serializers/dataset_serializers.py:125 +#: community/apps/dataset/serializers/dataset_serializers.py:304 +#: community/apps/dataset/serializers/dataset_serializers.py:305 +#: community/apps/dataset/serializers/dataset_serializers.py:366 +#: community/apps/dataset/serializers/dataset_serializers.py:367 +#: community/apps/dataset/serializers/dataset_serializers.py:511 +#: community/apps/dataset/serializers/dataset_serializers.py:512 +#: community/apps/dataset/serializers/dataset_serializers.py:942 +#: community/apps/dataset/serializers/dataset_serializers.py:943 +#: community/apps/dataset/serializers/document_serializers.py:824 +#: community/apps/dataset/serializers/document_serializers.py:825 +#: community/apps/dataset/serializers/paragraph_serializers.py:200 +#: community/apps/dataset/serializers/paragraph_serializers.py:201 +#: community/apps/dataset/serializers/paragraph_serializers.py:724 +#: community/apps/dataset/serializers/paragraph_serializers.py:725 +#: community/apps/dataset/swagger_api/problem_api.py:33 +#: community/apps/dataset/swagger_api/problem_api.py:34 +#: community/apps/dataset/swagger_api/problem_api.py:135 +#: community/apps/dataset/swagger_api/problem_api.py:136 +#: community/apps/function_lib/swagger_api/function_lib_api.py:32 +#: community/apps/function_lib/swagger_api/function_lib_api.py:33 +msgid "create time" +msgstr "创建时间" + +#: community/apps/dataset/serializers/dataset_serializers.py:127 +#: community/apps/dataset/serializers/dataset_serializers.py:128 +#: community/apps/dataset/serializers/dataset_serializers.py:301 +#: community/apps/dataset/serializers/dataset_serializers.py:302 +#: community/apps/dataset/serializers/dataset_serializers.py:363 +#: community/apps/dataset/serializers/dataset_serializers.py:364 +#: community/apps/dataset/serializers/dataset_serializers.py:508 +#: community/apps/dataset/serializers/dataset_serializers.py:509 +#: community/apps/dataset/serializers/dataset_serializers.py:939 +#: community/apps/dataset/serializers/dataset_serializers.py:940 +#: community/apps/dataset/serializers/document_serializers.py:821 +#: community/apps/dataset/serializers/document_serializers.py:822 +#: community/apps/dataset/serializers/paragraph_serializers.py:197 +#: community/apps/dataset/serializers/paragraph_serializers.py:198 +#: community/apps/dataset/serializers/paragraph_serializers.py:721 +#: community/apps/dataset/serializers/paragraph_serializers.py:722 +#: community/apps/dataset/swagger_api/problem_api.py:30 +#: community/apps/dataset/swagger_api/problem_api.py:31 +#: community/apps/dataset/swagger_api/problem_api.py:132 +#: community/apps/dataset/swagger_api/problem_api.py:133 +#: community/apps/function_lib/swagger_api/function_lib_api.py:34 +#: community/apps/function_lib/swagger_api/function_lib_api.py:35 +msgid "update time" +msgstr "更新时间" + +#: community/apps/dataset/serializers/dataset_serializers.py:257 +#: community/apps/dataset/serializers/dataset_serializers.py:260 +#: community/apps/dataset/serializers/document_serializers.py:211 +#: community/apps/dataset/serializers/document_serializers.py:218 +#: community/apps/dataset/serializers/document_serializers.py:987 +#: community/apps/dataset/serializers/document_serializers.py:1016 +msgid "file list" +msgstr "文件列表" + +#: community/apps/dataset/serializers/dataset_serializers.py:269 +msgid "upload files " +msgstr "上传文件" + +#: community/apps/dataset/serializers/dataset_serializers.py:297 +#: community/apps/dataset/serializers/dataset_serializers.py:298 +#: community/apps/dataset/serializers/dataset_serializers.py:359 +#: community/apps/dataset/serializers/dataset_serializers.py:360 +#: community/apps/dataset/serializers/dataset_serializers.py:504 +#: community/apps/dataset/serializers/dataset_serializers.py:505 +#: community/apps/dataset/serializers/dataset_serializers.py:935 +#: community/apps/dataset/serializers/dataset_serializers.py:936 +#: community/apps/dataset/serializers/document_serializers.py:814 +#: community/apps/dataset/serializers/document_serializers.py:815 +msgid "char length" +msgstr "字符长度" + +#: community/apps/dataset/serializers/dataset_serializers.py:299 +#: community/apps/dataset/serializers/dataset_serializers.py:300 +#: community/apps/dataset/serializers/dataset_serializers.py:361 +#: community/apps/dataset/serializers/dataset_serializers.py:362 +#: community/apps/dataset/serializers/dataset_serializers.py:506 +#: community/apps/dataset/serializers/dataset_serializers.py:507 +#: community/apps/dataset/serializers/dataset_serializers.py:937 +#: community/apps/dataset/serializers/dataset_serializers.py:938 +msgid "document count" +msgstr "文档数量" + +#: community/apps/dataset/serializers/dataset_serializers.py:308 +#: community/apps/dataset/serializers/dataset_serializers.py:309 +#: community/apps/dataset/serializers/dataset_serializers.py:370 +#: community/apps/dataset/serializers/dataset_serializers.py:371 +#: community/apps/dataset/serializers/dataset_serializers.py:515 +#: community/apps/dataset/serializers/dataset_serializers.py:516 +#: community/apps/dataset/serializers/document_serializers.py:290 +#: community/apps/dataset/serializers/document_serializers.py:485 +msgid "document list" +msgstr "文档列表" + +#: community/apps/dataset/serializers/dataset_serializers.py:327 +#: community/apps/dataset/serializers/dataset_serializers.py:388 +#: community/apps/dataset/serializers/dataset_serializers.py:389 +msgid "web source url" +msgstr "web站点url" + +#: community/apps/dataset/serializers/dataset_serializers.py:414 +#: community/apps/setting/serializers/valid_serializers.py:26 +msgid "" +"The community version supports up to 50 knowledge bases. If you need more " +"knowledge bases, please contact us (https://fit2cloud.com/)." +msgstr "" +"社区版最多支持 50 个知识库,如需拥有更多知识库,请联系我们(https://" +"fit2cloud.com/)。" + +#: community/apps/dataset/serializers/dataset_serializers.py:533 +#: community/apps/dataset/serializers/dataset_serializers.py:534 +msgid "documents" +msgstr "文档" + +#: community/apps/dataset/serializers/dataset_serializers.py:577 +msgid "search mode" +msgstr "搜索模式" + +#: community/apps/dataset/serializers/dataset_serializers.py:582 +#: community/apps/dataset/serializers/dataset_serializers.py:618 +#: community/apps/dataset/serializers/dataset_serializers.py:706 +msgid "id does not exist" +msgstr "ID 不存在" + +#: community/apps/dataset/serializers/dataset_serializers.py:609 +msgid "sync type" +msgstr "同步类型" + +#: community/apps/dataset/serializers/dataset_serializers.py:611 +msgid "The synchronization type only supports:replace|complete" +msgstr "同步类型只支持:replace|complete" + +#: community/apps/dataset/serializers/dataset_serializers.py:620 +#: community/apps/dataset/serializers/document_serializers.py:499 +msgid "Synchronization is only supported for web site types" +msgstr "只有web站点类型才支持同步" + +#: community/apps/dataset/serializers/dataset_serializers.py:694 +msgid "" +"Synchronization type->replace: replacement synchronization, complete: " +"complete synchronization" +msgstr "同步类型->replace:替换同步,complete:完整同步" + +#: community/apps/dataset/serializers/dataset_serializers.py:803 +#: community/apps/dataset/serializers/document_serializers.py:748 +#: community/apps/setting/models_provider/tools.py:25 +msgid "No permission to use this model" +msgstr "无权限使用该模型" + +#: community/apps/dataset/serializers/dataset_serializers.py:815 +msgid "Failed to send the vectorization task, please try again later!" +msgstr "向量化任务发送失败,请稍后再试!" + +#: community/apps/dataset/serializers/dataset_serializers.py:911 +#: community/apps/dataset/serializers/document_serializers.py:846 +msgid "meta" +msgstr "知识库元数据" + +#: community/apps/dataset/serializers/dataset_serializers.py:913 +msgid "Knowledge base metadata->web:{source_url:xxx,selector:'xxx'},base:{}" +msgstr "知识库元数据->web:{source_url:xxx,selector:'xxx'},base:{}" + +#: community/apps/dataset/serializers/document_serializers.py:87 +#: community/apps/dataset/serializers/document_serializers.py:100 +#: community/apps/dataset/serializers/document_serializers.py:416 +#: community/apps/dataset/swagger_api/document_api.py:37 +#: community/apps/dataset/swagger_api/document_api.py:51 +msgid "task type" +msgstr "任务类型" + +#: community/apps/dataset/serializers/document_serializers.py:95 +#: community/apps/dataset/serializers/document_serializers.py:108 +msgid "task type not support" +msgstr "任务类型不支持" + +#: community/apps/dataset/serializers/document_serializers.py:115 +#: community/apps/dataset/serializers/document_serializers.py:188 +#: community/apps/dataset/serializers/document_serializers.py:200 +#: community/apps/dataset/serializers/document_serializers.py:201 +#: community/apps/dataset/serializers/document_serializers.py:412 +#: community/apps/dataset/serializers/document_serializers.py:476 +#: community/apps/dataset/serializers/document_serializers.py:836 +#: community/apps/dataset/serializers/document_serializers.py:837 +msgid "document name" +msgstr "文档名称" + +#: community/apps/dataset/serializers/document_serializers.py:118 +msgid "The type only supports optimization|directly_return" +msgstr "类型只支持 optimization|directly_return" + +#: community/apps/dataset/serializers/document_serializers.py:120 +#: community/apps/dataset/serializers/document_serializers.py:414 +#: community/apps/dataset/serializers/document_serializers.py:480 +#: community/apps/dataset/serializers/document_serializers.py:840 +#: community/apps/dataset/swagger_api/document_api.py:25 +msgid "hit handling method" +msgstr "命中处理方法" + +#: community/apps/dataset/serializers/document_serializers.py:126 +#: community/apps/dataset/serializers/document_serializers.py:844 +#: community/apps/dataset/swagger_api/document_api.py:27 +msgid "directly return similarity" +msgstr "直接返回相似度" + +#: community/apps/dataset/serializers/document_serializers.py:129 +#: community/apps/dataset/serializers/document_serializers.py:415 +msgid "document is active" +msgstr "文档是否可用" + +#: community/apps/dataset/serializers/document_serializers.py:150 +#: community/apps/dataset/serializers/document_serializers.py:152 +msgid "document url list" +msgstr "文档 url 列表" + +#: community/apps/dataset/serializers/document_serializers.py:178 +#: community/apps/dataset/serializers/document_serializers.py:179 +msgid "source url list" +msgstr "文档地址列表" + +#: community/apps/dataset/serializers/document_serializers.py:202 +#: community/apps/dataset/serializers/document_serializers.py:203 +msgid "paragraphs" +msgstr "段落" + +#: community/apps/dataset/serializers/document_serializers.py:227 +msgid "The template type only supports excel|csv" +msgstr "模版类型只支持 excel|csv" + +#: community/apps/dataset/serializers/document_serializers.py:237 +msgid "Export template type csv|excel" +msgstr "导出模版类型 csv|excel" + +#: community/apps/dataset/serializers/document_serializers.py:289 +#: community/apps/dataset/serializers/paragraph_serializers.py:304 +#: community/apps/dataset/serializers/paragraph_serializers.py:436 +msgid "target dataset id" +msgstr "目标知识库 id" + +#: community/apps/dataset/serializers/document_serializers.py:391 +#: community/apps/dataset/serializers/paragraph_serializers.py:305 +#: community/apps/dataset/serializers/paragraph_serializers.py:441 +msgid "target document id" +msgstr "目标文档 id" + +#: community/apps/dataset/serializers/document_serializers.py:399 +#: community/apps/dataset/serializers/document_serializers.py:400 +msgid "document id list" +msgstr "文档 id 列表" + +#: community/apps/dataset/serializers/document_serializers.py:418 +msgid "order by" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:653 +msgid "Section title (optional)" +msgstr "分段标题(选填)" + +#: community/apps/dataset/serializers/document_serializers.py:654 +msgid "" +"Section content (required, question answer, no more than 4096 characters)" +msgstr "分段内容(必填,问题答案,最长不超过4096个字符)" + +#: community/apps/dataset/serializers/document_serializers.py:655 +msgid "Question (optional, one per line in the cell)" +msgstr "问题(选填,单元格内一行一个)" + +#: community/apps/dataset/serializers/document_serializers.py:765 +msgid "The task is being executed, please do not send it repeatedly." +msgstr "任务正在执行中,请勿重复发送" + +#: community/apps/dataset/serializers/document_serializers.py:842 +msgid "ai optimization: optimization, direct return: directly_return" +msgstr "ai优化: optimization, 直接返回: directly_return" + +#: community/apps/dataset/serializers/document_serializers.py:848 +msgid "Document metadata->web:{source_url:xxx,selector:'xxx'},base:{}" +msgstr "文档元数据->web:{source_url:xxx,selector:'xxx'},base:{}" + +#: community/apps/dataset/serializers/document_serializers.py:859 +msgid "dataset id not exist" +msgstr "知识库 id 不存在" + +#: community/apps/dataset/serializers/document_serializers.py:990 +#: community/apps/dataset/serializers/document_serializers.py:1020 +msgid "limit" +msgstr "分段长度" + +#: community/apps/dataset/serializers/document_serializers.py:994 +#: community/apps/dataset/serializers/document_serializers.py:996 +msgid "patterns" +msgstr "分段标识列表" + +#: community/apps/dataset/serializers/document_serializers.py:999 +msgid "Auto Clean" +msgstr "自动清洗" + +#: community/apps/dataset/serializers/document_serializers.py:1006 +msgid "The maximum size of the uploaded file cannot exceed 100MB" +msgstr "文件上传最大大小不能超过100MB" + +#: community/apps/dataset/serializers/document_serializers.py:1025 +msgid "Segmented regular list" +msgstr "分段正则列表" + +#: community/apps/dataset/serializers/document_serializers.py:1029 +#: community/apps/dataset/serializers/document_serializers.py:1030 +msgid "Whether to clear special characters" +msgstr "是否清除特殊字符" + +#: community/apps/dataset/serializers/document_serializers.py:1049 +msgid "space" +msgstr "空格" + +#: community/apps/dataset/serializers/document_serializers.py:1050 +msgid "semicolon" +msgstr "分号" + +#: community/apps/dataset/serializers/document_serializers.py:1050 +msgid "comma" +msgstr "逗号" + +#: community/apps/dataset/serializers/document_serializers.py:1051 +msgid "period" +msgstr "句号" + +#: community/apps/dataset/serializers/document_serializers.py:1051 +msgid "enter" +msgstr "回车" + +#: community/apps/dataset/serializers/document_serializers.py:1052 +msgid "blank line" +msgstr "空行" + +#: community/apps/dataset/serializers/document_serializers.py:1165 +msgid "Hit handling method is required" +msgstr "命中处理方式必填" + +#: community/apps/dataset/serializers/document_serializers.py:1167 +msgid "The hit processing method must be directly_return|optimization" +msgstr "命中处理方式必须是 directly_return|optimization" + +#: community/apps/dataset/serializers/document_serializers.py:1213 +#: community/apps/dataset/serializers/paragraph_serializers.py:753 +msgid "The task is being executed, please do not send it again." +msgstr "任务正在执行中,请勿重复发送" + +#: community/apps/dataset/serializers/file_serializers.py:82 +msgid "File not found" +msgstr "文件不存在" + +#: community/apps/dataset/serializers/image_serializers.py:23 +msgid "image" +msgstr "图片" + +#: community/apps/dataset/serializers/image_serializers.py:42 +msgid "Image not found" +msgstr "图片不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:52 +#: community/apps/dataset/serializers/paragraph_serializers.py:68 +#: community/apps/dataset/serializers/paragraph_serializers.py:69 +#: community/apps/dataset/serializers/paragraph_serializers.py:82 +#: community/apps/dataset/serializers/paragraph_serializers.py:85 +#: community/apps/dataset/serializers/paragraph_serializers.py:91 +#: community/apps/dataset/serializers/paragraph_serializers.py:93 +#: community/apps/dataset/serializers/paragraph_serializers.py:653 +msgid "section title" +msgstr "段落标题" + +#: community/apps/dataset/serializers/paragraph_serializers.py:65 +#: community/apps/dataset/serializers/paragraph_serializers.py:66 +msgid "section content" +msgstr "段落内容" + +#: community/apps/dataset/serializers/paragraph_serializers.py:73 +#: community/apps/dataset/serializers/paragraph_serializers.py:74 +#: community/apps/dataset/serializers/problem_serializers.py:88 +msgid "problem list" +msgstr "问题列表" + +#: community/apps/dataset/serializers/paragraph_serializers.py:100 +#: community/apps/dataset/serializers/paragraph_serializers.py:172 +#: community/apps/dataset/serializers/paragraph_serializers.py:214 +#: community/apps/dataset/serializers/paragraph_serializers.py:276 +#: community/apps/dataset/serializers/paragraph_serializers.py:308 +#: community/apps/dataset/serializers/paragraph_serializers.py:456 +#: community/apps/dataset/serializers/paragraph_serializers.py:563 +#: community/apps/dataset/serializers/problem_serializers.py:57 +#: community/apps/dataset/swagger_api/problem_api.py:61 +msgid "paragraph id" +msgstr "段落 id" + +#: community/apps/dataset/serializers/paragraph_serializers.py:105 +#: community/apps/dataset/serializers/paragraph_serializers.py:467 +msgid "Paragraph id does not exist" +msgstr "段落 id 不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:134 +msgid "Already associated, please do not associate again" +msgstr "已经关联,请勿重复关联" + +#: community/apps/dataset/serializers/paragraph_serializers.py:191 +#: community/apps/dataset/serializers/paragraph_serializers.py:192 +msgid "question content" +msgstr "问题内容" + +#: community/apps/dataset/serializers/paragraph_serializers.py:193 +#: community/apps/dataset/serializers/paragraph_serializers.py:709 +#: community/apps/dataset/swagger_api/problem_api.py:26 +msgid "hit num" +msgstr "命中数量" + +#: community/apps/dataset/serializers/paragraph_serializers.py:210 +#: community/apps/dataset/serializers/paragraph_serializers.py:281 +#: community/apps/dataset/serializers/problem_serializers.py:39 +#: community/apps/dataset/serializers/problem_serializers.py:64 +#: community/apps/dataset/serializers/problem_serializers.py:194 +#: community/apps/dataset/swagger_api/problem_api.py:101 +msgid "problem id" +msgstr "问题 id" + +#: community/apps/dataset/serializers/paragraph_serializers.py:222 +msgid "Paragraph does not exist" +msgstr "段落不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:224 +msgid "Problem does not exist" +msgstr "问题不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:306 +#: community/apps/dataset/serializers/paragraph_serializers.py:449 +#: community/apps/dataset/serializers/paragraph_serializers.py:450 +msgid "paragraph id list" +msgstr "段落 id 列表" + +#: community/apps/dataset/serializers/paragraph_serializers.py:317 +msgid "The document to be migrated is consistent with the target document" +msgstr "文档迁移的文档与目标文档一致" + +#: community/apps/dataset/serializers/paragraph_serializers.py:319 +#, python-brace-format +msgid "The document id does not exist [{document_id}]" +msgstr "文档 id 不存在 [{document_id}]" + +#: community/apps/dataset/serializers/paragraph_serializers.py:323 +#, python-brace-format +msgid "The target document id does not exist [{document_id}]" +msgstr "目标文档 id 不存在 [{document_id}]" + +#: community/apps/dataset/serializers/paragraph_serializers.py:503 +msgid "Problem id does not exist" +msgstr "问题 id 不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:713 +#: community/apps/dataset/serializers/paragraph_serializers.py:714 +msgid "Number of dislikes" +msgstr "点踩数量" + +#: community/apps/dataset/serializers/problem_serializers.py:50 +msgid "Issue ID is passed when modifying, not when creating." +msgstr "问题 ID 在修改时传递,创建时不传递" + +#: community/apps/dataset/serializers/problem_serializers.py:62 +#: community/apps/dataset/swagger_api/problem_api.py:51 +#: community/apps/dataset/swagger_api/problem_api.py:52 +#: community/apps/dataset/swagger_api/problem_api.py:83 +#: community/apps/dataset/swagger_api/problem_api.py:84 +msgid "problem id list" +msgstr "问题 id 列表" + +#: community/apps/dataset/swagger_api/document_api.py:38 +#: community/apps/dataset/swagger_api/document_api.py:52 +msgid "1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents" +msgstr "1|2|3 1:向量化|2:生成问题|3:同步文档" + +#: community/apps/dataset/swagger_api/document_api.py:64 +#: community/apps/dataset/swagger_api/document_api.py:65 +msgid "state list" +msgstr "状态列表" + +#: community/apps/dataset/swagger_api/image_api.py:22 +msgid "image file" +msgstr "图片文件" + +#: community/apps/dataset/swagger_api/problem_api.py:54 +#: community/apps/dataset/swagger_api/problem_api.py:55 +msgid "Associated paragraph information list" +msgstr "关联段落信息列表" + +#: community/apps/dataset/swagger_api/problem_api.py:131 +msgid "Hit num" +msgstr "命中数量" + +#: community/apps/dataset/task/generate.py:95 +#, python-brace-format +msgid "" +"Generate issue based on document: {document_id} error {error}{traceback}" +msgstr "生成问题基于文档: {document_id} 错误 {error}{traceback}" + +#: community/apps/dataset/task/generate.py:99 +#, python-brace-format +msgid "End--->Generate problem: {document_id}" +msgstr "结束--->生成问题: {document_id}" + +#: community/apps/dataset/task/sync.py:29 +#: community/apps/dataset/task/sync.py:43 +#, python-brace-format +msgid "Start--->Start synchronization web knowledge base:{dataset_id}" +msgstr "开始--->开始同步web知识库:{dataset_id}" + +#: community/apps/dataset/task/sync.py:34 +#: community/apps/dataset/task/sync.py:47 +#, python-brace-format +msgid "End--->End synchronization web knowledge base:{dataset_id}" +msgstr "结束--->结束同步web知识库:{dataset_id}" + +#: community/apps/dataset/task/sync.py:36 +#: community/apps/dataset/task/sync.py:49 +#, python-brace-format +msgid "Synchronize web knowledge base:{dataset_id} error{error}{traceback}" +msgstr "同步web知识库:{dataset_id} 错误{error}{traceback}" + +#: community/apps/dataset/task/tools.py:114 +#, python-brace-format +msgid "Association problem failed {error}" +msgstr "关联问题失败 {error}" + +#: community/apps/dataset/views/dataset.py:35 +#: community/apps/dataset/views/dataset.py:36 +msgid "Synchronize the knowledge base of the website" +msgstr "同步Web站点知识库" + +#: community/apps/dataset/views/dataset.py:57 +#: community/apps/dataset/views/dataset.py:58 +msgid "Create QA knowledge base" +msgstr "创建QA知识库" + +#: community/apps/dataset/views/dataset.py:77 +#: community/apps/dataset/views/dataset.py:78 +msgid "Create a web site knowledge base" +msgstr "创建web站点知识库" + +#: community/apps/dataset/views/dataset.py:93 +#: community/apps/dataset/views/dataset.py:94 +msgid "Get a list of applications available in the knowledge base" +msgstr "获取知识库中可用的应用列表" + +#: community/apps/dataset/views/dataset.py:105 +#: community/apps/dataset/views/dataset.py:106 +msgid "Get a list of knowledge bases" +msgstr "获取知识库列表" + +#: community/apps/dataset/views/dataset.py:119 +#: community/apps/dataset/views/dataset.py:120 +msgid "Create a knowledge base" +msgstr "创建知识库" + +#: community/apps/dataset/views/dataset.py:134 +msgid "Hit test list" +msgstr "命中测试列表" + +#: community/apps/dataset/views/dataset.py:154 +msgid "Re-vectorize" +msgstr "重新向量化" + +#: community/apps/dataset/views/dataset.py:170 +msgid "Export knowledge base" +msgstr "导出知识库" + +#: community/apps/dataset/views/dataset.py:184 +#: community/apps/dataset/views/dataset.py:185 +msgid "Export knowledge base containing images" +msgstr "导出ZIP知识库" + +#: community/apps/dataset/views/dataset.py:199 +msgid "Delete knowledge base" +msgstr "删除知识库" + +#: community/apps/dataset/views/dataset.py:213 +#: community/apps/dataset/views/dataset.py:214 +msgid "Query knowledge base details based on knowledge base id" +msgstr "根据知识库id查询知识库详情" + +#: community/apps/dataset/views/dataset.py:226 +#: community/apps/dataset/views/dataset.py:227 +msgid "Modify knowledge base information" +msgstr "修改知识库信息" + +#: community/apps/dataset/views/dataset.py:245 +#: community/apps/dataset/views/dataset.py:246 +#: community/apps/dataset/views/document.py:463 +#: community/apps/dataset/views/document.py:464 +msgid "Get the knowledge base paginated list" +msgstr "获取知识库文档分页列表" + +#: community/apps/dataset/views/document.py:31 +#: community/apps/dataset/views/document.py:32 +msgid "Get QA template" +msgstr "获取问答模版" + +#: community/apps/dataset/views/document.py:44 +#: community/apps/dataset/views/document.py:45 +msgid "Get form template" +msgstr "获取表单模版" + +#: community/apps/dataset/views/document.py:57 +#: community/apps/dataset/views/document.py:58 +msgid "Create Web site documents" +msgstr "创建web站点文档" + +#: community/apps/dataset/views/document.py:77 +#: community/apps/dataset/views/document.py:78 +msgid "Import QA and create documentation" +msgstr "导入问答并创建文档" + +#: community/apps/dataset/views/document.py:98 +#: community/apps/dataset/views/document.py:99 +msgid "Import tables and create documents" +msgstr "导入表格并创建文档" + +#: community/apps/dataset/views/document.py:118 +#: community/apps/dataset/views/document.py:119 +msgid "Create document" +msgstr "创建文档" + +#: community/apps/dataset/views/document.py:152 +#: community/apps/dataset/views/document.py:153 +msgid "Modify document hit processing methods in batches" +msgstr "修改文档命中处理方式批量" + +#: community/apps/dataset/views/document.py:171 +#: community/apps/dataset/views/document.py:172 +msgid "Create documents in batches" +msgstr "批量创建文档" + +#: community/apps/dataset/views/document.py:187 +#: community/apps/dataset/views/document.py:188 +msgid "Batch sync documents" +msgstr "批量同步文档" + +#: community/apps/dataset/views/document.py:202 +#: community/apps/dataset/views/document.py:203 +msgid "Delete documents in batches" +msgstr "批量删除文档" + +#: community/apps/dataset/views/document.py:220 +#: community/apps/dataset/views/document.py:221 +msgid "Synchronize web site types" +msgstr "同步web站点类型" + +#: community/apps/dataset/views/document.py:239 +#: community/apps/dataset/views/document.py:240 +msgid "Cancel task" +msgstr "取消任务" + +#: community/apps/dataset/views/document.py:260 +#: community/apps/dataset/views/document.py:261 +msgid "Cancel tasks in batches" +msgstr "批量取消任务" + +#: community/apps/dataset/views/document.py:279 +#: community/apps/dataset/views/document.py:280 +msgid "Refresh document vector library" +msgstr "文档向量化" + +#: community/apps/dataset/views/document.py:300 +#: community/apps/dataset/views/document.py:301 +msgid "Batch refresh document vector library" +msgstr "批量文档向量化" + +#: community/apps/dataset/views/document.py:319 +#: community/apps/dataset/views/document.py:320 +msgid "Migrate documents in batches" +msgstr "批量迁移文档" + +#: community/apps/dataset/views/document.py:346 +#: community/apps/dataset/views/document.py:347 +msgid "Export document" +msgstr "导出文档" + +#: community/apps/dataset/views/document.py:361 +#: community/apps/dataset/views/document.py:362 +msgid "Export Zip document" +msgstr "导出Zip文档" + +#: community/apps/dataset/views/document.py:376 +#: community/apps/dataset/views/document.py:377 +msgid "Get document details" +msgstr "获取文档详情" + +#: community/apps/dataset/views/document.py:391 +#: community/apps/dataset/views/document.py:392 +msgid "Modify document" +msgstr "修改文档" + +#: community/apps/dataset/views/document.py:409 +#: community/apps/dataset/views/document.py:410 +msgid "Delete document" +msgstr "删除文档" + +#: community/apps/dataset/views/document.py:427 +#: community/apps/dataset/views/document.py:428 +msgid "Get a list of segment IDs" +msgstr "获取分段id列表" + +#: community/apps/dataset/views/document.py:439 +#: community/apps/dataset/views/document.py:440 +msgid "Segmented document" +msgstr "分段文档" + +#: community/apps/dataset/views/file.py:42 +#: community/apps/dataset/views/file.py:43 +msgid "Get file" +msgstr "获取文件" + +#: community/apps/dataset/views/image.py:28 +#: community/apps/dataset/views/image.py:29 +#: community/apps/dataset/views/image.py:34 +msgid "Upload image" +msgstr "上传图片" + +#: community/apps/dataset/views/image.py:35 +#: community/apps/dataset/views/image.py:44 +msgid "Image" +msgstr "图片" + +#: community/apps/dataset/views/image.py:42 +#: community/apps/dataset/views/image.py:43 +msgid "Get Image" +msgstr "获取图片" + +#: community/apps/dataset/views/paragraph.py:28 +#: community/apps/dataset/views/paragraph.py:29 +msgid "Paragraph list" +msgstr "段落列表" + +#: community/apps/dataset/views/paragraph.py:32 +#: community/apps/dataset/views/paragraph.py:51 +#: community/apps/dataset/views/paragraph.py:69 +#: community/apps/dataset/views/paragraph.py:85 +#: community/apps/dataset/views/paragraph.py:103 +#: community/apps/dataset/views/paragraph.py:121 +#: community/apps/dataset/views/paragraph.py:140 +#: community/apps/dataset/views/paragraph.py:156 +#: community/apps/dataset/views/paragraph.py:172 +#: community/apps/dataset/views/paragraph.py:193 +#: community/apps/dataset/views/paragraph.py:211 +#: community/apps/dataset/views/paragraph.py:238 +msgid "Knowledge Base/Documentation/Paragraph" +msgstr "知识库/文档/段落" + +#: community/apps/dataset/views/paragraph.py:46 +#: community/apps/dataset/views/paragraph.py:47 +msgid "Create Paragraph" +msgstr "创建段落" + +#: community/apps/dataset/views/paragraph.py:64 +#: community/apps/dataset/views/paragraph.py:65 +msgid "Add associated questions" +msgstr "添加关联问题" + +#: community/apps/dataset/views/paragraph.py:80 +#: community/apps/dataset/views/paragraph.py:81 +msgid "Get a list of paragraph questions" +msgstr "获取段落问题列表" + +#: community/apps/dataset/views/paragraph.py:99 +#: community/apps/dataset/views/paragraph.py:100 +msgid "Disassociation issue" +msgstr "取消关联问题" + +#: community/apps/dataset/views/paragraph.py:117 +#: community/apps/dataset/views/paragraph.py:118 +msgid "Related questions" +msgstr "关联问题" + +#: community/apps/dataset/views/paragraph.py:135 +#: community/apps/dataset/views/paragraph.py:136 +msgid "Modify paragraph data" +msgstr "修改段落数据" + +#: community/apps/dataset/views/paragraph.py:152 +#: community/apps/dataset/views/paragraph.py:153 +msgid "Get paragraph details" +msgstr "获取段落详情" + +#: community/apps/dataset/views/paragraph.py:168 +#: community/apps/dataset/views/paragraph.py:169 +msgid "Delete paragraph" +msgstr "删除段落" + +#: community/apps/dataset/views/paragraph.py:187 +#: community/apps/dataset/views/paragraph.py:188 +msgid "Delete paragraphs in batches" +msgstr "批量删除段落" + +#: community/apps/dataset/views/paragraph.py:206 +#: community/apps/dataset/views/paragraph.py:207 +msgid "Migrate paragraphs in batches" +msgstr "批量迁移段落" + +#: community/apps/dataset/views/paragraph.py:233 +#: community/apps/dataset/views/paragraph.py:234 +msgid "Get paragraph list by pagination" +msgstr "获取分页段落列表" + +#: community/apps/dataset/views/problem.py:28 +#: community/apps/dataset/views/problem.py:29 +msgid "Question list" +msgstr "问题列表" + +#: community/apps/dataset/views/problem.py:32 +#: community/apps/dataset/views/problem.py:50 +#: community/apps/dataset/views/problem.py:68 +#: community/apps/dataset/views/problem.py:88 +#: community/apps/dataset/views/problem.py:103 +#: community/apps/dataset/views/problem.py:120 +#: community/apps/dataset/views/problem.py:136 +#: community/apps/dataset/views/problem.py:155 +msgid "Knowledge Base/Documentation/Paragraph/Question" +msgstr "知识库/文档/段落/问题" + +#: community/apps/dataset/views/problem.py:45 +#: community/apps/dataset/views/problem.py:46 +msgid "Create question" +msgstr "创建问题" + +#: community/apps/dataset/views/problem.py:64 +#: community/apps/dataset/views/problem.py:65 +msgid "Get a list of associated paragraphs" +msgstr "获取关联段落列表" + +#: community/apps/dataset/views/problem.py:82 +#: community/apps/dataset/views/problem.py:83 +msgid "Batch deletion issues" +msgstr "批量删除问题" + +#: community/apps/dataset/views/problem.py:98 +#: community/apps/dataset/views/problem.py:99 +msgid "Batch associated paragraphs" +msgstr "批量关联段落" + +#: community/apps/dataset/views/problem.py:116 +#: community/apps/dataset/views/problem.py:117 +msgid "Delete question" +msgstr "删除问题" + +#: community/apps/dataset/views/problem.py:131 +#: community/apps/dataset/views/problem.py:132 +msgid "Modify question" +msgstr "修改问题" + +#: community/apps/dataset/views/problem.py:150 +#: community/apps/dataset/views/problem.py:151 +msgid "Get the list of questions by page" +msgstr "获取分页问题列表" + +#: community/apps/embedding/task/embedding.py:30 +#: community/apps/embedding/task/embedding.py:81 +#, python-brace-format +msgid "Failed to obtain vector model: {error} {traceback}" +msgstr "获取向量模型失败: {error} {traceback}" + +#: community/apps/embedding/task/embedding.py:110 +#, python-brace-format +msgid "Start--->Vectorized dataset: {dataset_id}" +msgstr "开始--->向量化知识库: {dataset_id}" + +#: community/apps/embedding/task/embedding.py:114 +#, python-brace-format +msgid "Dataset documentation: {document_names}" +msgstr "知识库文档: {document_names}" + +#: community/apps/embedding/task/embedding.py:127 +#, python-brace-format +msgid "End--->Vectorized dataset: {dataset_id}" +msgstr "结束--->向量化知识库: {dataset_id}" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:70 +#: community/apps/function_lib/serializers/function_lib_serializer.py:83 +#: community/apps/function_lib/swagger_api/function_lib_api.py:68 +#: community/apps/function_lib/swagger_api/function_lib_api.py:69 +#: community/apps/function_lib/swagger_api/function_lib_api.py:84 +#: community/apps/function_lib/swagger_api/function_lib_api.py:85 +#: community/apps/function_lib/swagger_api/function_lib_api.py:130 +#: community/apps/function_lib/swagger_api/function_lib_api.py:131 +#: community/apps/function_lib/swagger_api/function_lib_api.py:176 +#: community/apps/function_lib/swagger_api/function_lib_api.py:177 +msgid "variable name" +msgstr "变量名" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:71 +#: community/apps/function_lib/swagger_api/function_lib_api.py:88 +#: community/apps/function_lib/swagger_api/function_lib_api.py:89 +#: community/apps/function_lib/swagger_api/function_lib_api.py:134 +#: community/apps/function_lib/swagger_api/function_lib_api.py:135 +#: community/apps/function_lib/swagger_api/function_lib_api.py:180 +#: community/apps/function_lib/swagger_api/function_lib_api.py:181 +msgid "required" +msgstr "必填" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:74 +msgid "fields only support string|int|dict|array|float" +msgstr "字段只支持string|int|dict|array|float" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:85 +#: community/apps/function_lib/swagger_api/function_lib_api.py:72 +#: community/apps/function_lib/swagger_api/function_lib_api.py:73 +msgid "variable value" +msgstr "变量值" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:93 +#: community/apps/function_lib/serializers/function_lib_serializer.py:104 +#: community/apps/function_lib/serializers/function_lib_serializer.py:119 +#: community/apps/function_lib/serializers/py_lint_serializer.py:23 +#: community/apps/function_lib/swagger_api/function_lib_api.py:28 +#: community/apps/function_lib/swagger_api/function_lib_api.py:29 +#: community/apps/function_lib/swagger_api/function_lib_api.py:75 +#: community/apps/function_lib/swagger_api/function_lib_api.py:76 +#: community/apps/function_lib/swagger_api/function_lib_api.py:117 +#: community/apps/function_lib/swagger_api/function_lib_api.py:118 +#: community/apps/function_lib/swagger_api/function_lib_api.py:163 +#: community/apps/function_lib/swagger_api/function_lib_api.py:164 +#: community/apps/function_lib/swagger_api/py_lint_api.py:22 +#: community/apps/function_lib/swagger_api/py_lint_api.py:23 +msgid "function content" +msgstr "函数内容" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:98 +#: community/apps/function_lib/serializers/function_lib_serializer.py:114 +#: community/apps/function_lib/serializers/function_lib_serializer.py:135 +#: community/apps/function_lib/serializers/function_lib_serializer.py:388 +#: community/apps/function_lib/swagger_api/function_lib_api.py:24 +#: community/apps/function_lib/swagger_api/function_lib_api.py:25 +#: community/apps/function_lib/swagger_api/function_lib_api.py:46 +#: community/apps/function_lib/swagger_api/function_lib_api.py:113 +#: community/apps/function_lib/swagger_api/function_lib_api.py:114 +#: community/apps/function_lib/swagger_api/function_lib_api.py:159 +#: community/apps/function_lib/swagger_api/function_lib_api.py:160 +msgid "function name" +msgstr "函数名" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:101 +#: community/apps/function_lib/serializers/function_lib_serializer.py:117 +#: community/apps/function_lib/serializers/function_lib_serializer.py:138 +#: community/apps/function_lib/swagger_api/function_lib_api.py:26 +#: community/apps/function_lib/swagger_api/function_lib_api.py:27 +#: community/apps/function_lib/swagger_api/function_lib_api.py:51 +#: community/apps/function_lib/swagger_api/function_lib_api.py:115 +#: community/apps/function_lib/swagger_api/function_lib_api.py:116 +#: community/apps/function_lib/swagger_api/function_lib_api.py:161 +#: community/apps/function_lib/swagger_api/function_lib_api.py:162 +msgid "function description" +msgstr "函数描述" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:232 +msgid "field has no value set" +msgstr "字段没有设置值" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:248 +#: community/apps/function_lib/serializers/function_lib_serializer.py:253 +msgid "type error" +msgstr "类型错误" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:256 +#, python-brace-format +msgid "Field: {name} Type: {_type} Value: {value} Type conversion error" +msgstr "字段: {name} 类型: {_type} 值: {value} 类型转换错误" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:261 +msgid "function id" +msgstr "函数 id" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:267 +#: community/apps/function_lib/serializers/function_lib_serializer.py:303 +#: community/apps/function_lib/serializers/function_lib_serializer.py:366 +#: community/apps/function_lib/serializers/function_lib_serializer.py:396 +msgid "Function does not exist" +msgstr "函数不存在" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:357 +#: community/apps/function_lib/serializers/function_lib_serializer.py:386 +#| msgid "function" +msgid "function ID" +msgstr "函数" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:23 +#: community/apps/function_lib/swagger_api/function_lib_api.py:205 +msgid "ID" +msgstr "" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:30 +#: community/apps/function_lib/swagger_api/function_lib_api.py:31 +msgid "input field" +msgstr "输入字段" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:62 +#: community/apps/function_lib/swagger_api/function_lib_api.py:78 +#: community/apps/function_lib/swagger_api/function_lib_api.py:124 +#: community/apps/function_lib/swagger_api/function_lib_api.py:170 +msgid "Input variable list" +msgstr "输入变量列表" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:94 +#: community/apps/function_lib/swagger_api/function_lib_api.py:140 +#: community/apps/function_lib/swagger_api/function_lib_api.py:186 +msgid "Field type string|int|dict|array|float" +msgstr "字段类型 string|int|dict|array|float" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:100 +#: community/apps/function_lib/swagger_api/function_lib_api.py:146 +#: community/apps/function_lib/swagger_api/function_lib_api.py:192 +msgid "The source only supports custom|reference" +msgstr "来源只支持custom|reference" + +#: community/apps/function_lib/views/function_lib_views.py:28 +#: community/apps/function_lib/views/function_lib_views.py:29 +msgid "Get function list" +msgstr "获取函数列表" + +#: community/apps/function_lib/views/function_lib_views.py:30 +#: community/apps/function_lib/views/function_lib_views.py:46 +#: community/apps/function_lib/views/function_lib_views.py:59 +#: community/apps/function_lib/views/function_lib_views.py:74 +#: community/apps/function_lib/views/function_lib_views.py:85 +#: community/apps/function_lib/views/function_lib_views.py:95 +#: community/apps/function_lib/views/function_lib_views.py:111 +#: community/apps/function_lib/views/py_lint.py:29 +msgid "Function" +msgstr "函数库" + +#: community/apps/function_lib/views/function_lib_views.py:43 +#: community/apps/function_lib/views/function_lib_views.py:44 +msgid "Create function" +msgstr "创建函数" + +#: community/apps/function_lib/views/function_lib_views.py:56 +#: community/apps/function_lib/views/function_lib_views.py:57 +msgid "Debug function" +msgstr "调试函数" + +#: community/apps/function_lib/views/function_lib_views.py:71 +#: community/apps/function_lib/views/function_lib_views.py:72 +msgid "Update function" +msgstr "更新函数" + +#: community/apps/function_lib/views/function_lib_views.py:83 +#: community/apps/function_lib/views/function_lib_views.py:84 +msgid "Delete function" +msgstr "删除函数" + +#: community/apps/function_lib/views/function_lib_views.py:93 +#: community/apps/function_lib/views/function_lib_views.py:94 +msgid "Get function details" +msgstr "获取函数详情" + +#: community/apps/function_lib/views/function_lib_views.py:106 +#: community/apps/function_lib/views/function_lib_views.py:107 +msgid "Get function list by pagination" +msgstr "获取分页函数列表" + +#: community/apps/function_lib/views/function_lib_views.py:129 +#| msgid "not a function" +msgid "Import function" +msgstr "导入函数" + +#: community/apps/function_lib/views/function_lib_views.py:143 +#| msgid "not a function" +msgid "Export function" +msgstr "导出函数" + +#: community/apps/function_lib/views/py_lint.py:26 +#: community/apps/function_lib/views/py_lint.py:27 +msgid "Check code" +msgstr "检查代码" + +#: community/apps/setting/models_provider/base_model_provider.py:66 +msgid "Model type cannot be empty" +msgstr "模型类型不能为空" + +#: community/apps/setting/models_provider/base_model_provider.py:91 +msgid "The current platform does not support downloading models" +msgstr "当前平台不支持下载模型" + +#: community/apps/setting/models_provider/base_model_provider.py:146 +msgid "LLM" +msgstr "大语言模型" + +#: community/apps/setting/models_provider/base_model_provider.py:147 +msgid "Embedding Model" +msgstr "向量模型" + +#: community/apps/setting/models_provider/base_model_provider.py:148 +msgid "Speech2Text" +msgstr "语音识别" + +#: community/apps/setting/models_provider/base_model_provider.py:149 +msgid "TTS" +msgstr "语音合成" + +#: community/apps/setting/models_provider/base_model_provider.py:150 +msgid "Vision Model" +msgstr "视觉模型" + +#: community/apps/setting/models_provider/base_model_provider.py:151 +msgid "Image Generation" +msgstr "图片生成" + +#: community/apps/setting/models_provider/base_model_provider.py:152 +msgid "Rerank" +msgstr "重排模型" + +#: community/apps/setting/models_provider/base_model_provider.py:226 +msgid "The model does not support" +msgstr "模型不支持" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 +msgid "" +"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " +"Lab, developers can integrate high-quality text retrieval and sorting " +"through the LlamaIndex framework." +msgstr "" +"阿里巴巴通义实验室开发的GTE-Rerank文本排序系列模型,开发者可以通过LlamaIndex" +"框架进行集成高质量文本检索、排序。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 +msgid "" +"Chinese (including various dialects such as Cantonese), English, Japanese, " +"and Korean support free switching between multiple languages." +msgstr "中文(含粤语等各种方言)、英文、日语、韩语支持多个语种自由切换" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 +msgid "" +"CosyVoice is based on a new generation of large generative speech models, " +"which can predict emotions, intonation, rhythm, etc. based on context, and " +"has better anthropomorphic effects." +msgstr "" +"CosyVoice基于新一代生成式语音大模型,能根据上下文预测情绪、语调、韵律等,具有" +"更好的拟人效果" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 +msgid "" +"Universal text vector is Tongyi Lab's multi-language text unified vector " +"model based on the LLM base. It provides high-level vector services for " +"multiple mainstream languages around the world and helps developers quickly " +"convert text data into high-quality vector data." +msgstr "" +"通用文本向量,是通义实验室基于LLM底座的多语言文本统一向量模型,面向全球多个主" +"流语种,提供高水准的向量服务,帮助开发者将文本数据快速转换为高质量的向量数" +"据。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40 +msgid "" +"Tongyi Wanxiang - a large image model for text generation, supports " +"bilingual input in Chinese and English, and supports the input of reference " +"pictures for reference content or reference style migration. Key styles " +"include but are not limited to watercolor, oil painting, Chinese painting, " +"sketch, flat illustration, two-dimensional, and 3D. Cartoon." +msgstr "" +"通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容" +"或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二" +"次元、3D卡通。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 +msgid "Alibaba Cloud Bailian" +msgstr "阿里云百炼" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:28 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:40 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:68 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:55 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:45 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:23 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:58 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:41 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:39 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:44 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:27 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:31 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:44 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:22 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:61 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:40 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:68 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:19 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:78 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:53 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:46 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:29 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:24 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:47 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:19 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:39 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:25 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:59 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:39 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:40 +#, python-brace-format +msgid "{model_type} Model type is not supported" +msgstr "模型类型 {model_type} 不支持" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:32 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填项" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:52 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:55 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:43 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:54 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:56 +#: community/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py:43 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:54 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py:54 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:52 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:60 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:76 +#: community/apps/setting/models_provider/impl/xf_model_provider/model/tts.py:101 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:34 +#: community/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py:44 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:56 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py:49 +msgid "Hello" +msgstr "你好!" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:38 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:86 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:73 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:65 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:40 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:77 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:61 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:38 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:45 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:51 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:64 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:39 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:80 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:86 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:64 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:104 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:55 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:70 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:38 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:38 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:50 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:84 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:41 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:65 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:60 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:40 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:37 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:77 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:56 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:61 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:59 +#, python-brace-format +msgid "" +"Verification failed, please check whether the parameters are correct: {error}" +msgstr "验证失败,请检查参数是否正确: {error}" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:12 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:20 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:14 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:41 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:14 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:22 +msgid "Temperature" +msgstr "温度" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:13 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:21 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:42 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:23 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic" +msgstr "较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:21 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:29 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:31 +msgid "Output the maximum Tokens" +msgstr "输出最大Token数" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:30 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:32 +msgid "Specify the maximum number of tokens that the model can generate" +msgstr "指定模型可以生成的最大 tokens 数" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:72 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:60 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:32 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:50 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:28 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:63 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:46 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:46 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:62 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:63 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:49 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:27 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:66 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:72 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:49 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:45 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:55 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:72 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:34 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:71 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:29 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:52 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:40 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:59 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:64 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:44 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:45 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填项" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:14 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:15 +msgid "Image size" +msgstr "图片尺寸" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22 +msgid "Specify the size of the generated image, such as: 1024x1024" +msgstr "指定生成图片的尺寸, 如: 1024x1024" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:43 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:41 +msgid "Number of pictures" +msgstr "图片数量" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34 +msgid "Specify the number of generated images" +msgstr "指定生成图片的数量" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Style" +msgstr "风格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Specify the style of generated images" +msgstr "指定生成图片的风格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45 +msgid "Default value, the image style is randomly output by the model" +msgstr "默认值,图片风格由模型随机输出" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46 +msgid "photography" +msgstr "摄影" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47 +msgid "Portraits" +msgstr "人像写真" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48 +msgid "3D cartoon" +msgstr "3D卡通" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49 +msgid "animation" +msgstr "动画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50 +msgid "painting" +msgstr "油画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51 +msgid "watercolor" +msgstr "水彩" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52 +msgid "sketch" +msgstr "素描" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53 +msgid "Chinese painting" +msgstr "中国画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54 +msgid "flat illustration" +msgstr "扁平插画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15 +msgid "timbre" +msgstr "音色" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +msgid "Chinese sounds can support mixed scenes of Chinese and English" +msgstr "中文音色支持中英文混合场景" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +msgid "Long Xiaochun" +msgstr "龙小淳" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21 +msgid "Long Xiaoxia" +msgstr "龙小夏" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22 +msgid "Long Xiaochen" +msgstr "龙小诚" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23 +msgid "Long Xiaobai" +msgstr "龙小白" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24 +msgid "Long laotie" +msgstr "龙老铁" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25 +msgid "Long Shu" +msgstr "龙书" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 +msgid "Long Shuo" +msgstr "龙硕" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 +msgid "Long Jing" +msgstr "龙婧" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 +msgid "Long Miao" +msgstr "龙妙" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 +msgid "Long Yue" +msgstr "龙悦" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 +msgid "Long Yuan" +msgstr "龙媛" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 +msgid "Long Fei" +msgstr "龙飞" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 +msgid "Long Jielidou" +msgstr "龙杰力豆" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 +msgid "Long Tong" +msgstr "龙彤" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 +msgid "Long Xiang" +msgstr "龙祥" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "speaking speed" +msgstr "语速" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +msgid "[0.5,2], the default is 1, usually one decimal place is enough" +msgstr "[0.5,2],默认为1,通常一位小数就足够了" + +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:34 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:74 +msgid "API URL" +msgstr "" + +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:35 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:75 +msgid "API Key" +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 +msgid "" +"An update to Claude 2 that doubles the context window and improves " +"reliability, hallucination rates, and evidence-based accuracy in long " +"documents and RAG contexts." +msgstr "" +"Claude 2 的更新,采用双倍的上下文窗口,并在长文档和 RAG 上下文中提高可靠性、" +"幻觉率和循证准确性。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 +msgid "" +"Anthropic is a powerful model that can handle a variety of tasks, from " +"complex dialogue and creative content generation to detailed command " +"obedience." +msgstr "" +"Anthropic 功能强大的模型,可处理各种任务,从复杂的对话和创意内容生成到详细的" +"指令服从。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 +msgid "" +"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" +"instant responsiveness. The model can answer simple queries and requests " +"quickly. Customers will be able to build seamless AI experiences that mimic " +"human interactions. Claude 3 Haiku can process images and return text " +"output, and provides 200K context windows." +msgstr "" +"Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该" +"模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体" +"验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 +msgid "" +"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " +"intelligence and speed, especially when it comes to handling enterprise " +"workloads. This model offers maximum utility while being priced lower than " +"competing products, and it's been engineered to be a solid choice for " +"deploying AI at scale." +msgstr "" +"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在" +"处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过" +"精心设计,是大规模部署人工智能的可靠选择。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 +msgid "" +"The Claude 3.5 Sonnet raises the industry standard for intelligence, " +"outperforming competing models and the Claude 3 Opus in extensive " +"evaluations, with the speed and cost-effectiveness of our mid-range models." +msgstr "" +"Claude 3.5 Sonnet提高了智能的行业标准,在广泛的评估中超越了竞争对手的型号和" +"Claude 3 Opus,具有我们中端型号的速度和成本效益。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 +msgid "" +"A faster, more affordable but still very powerful model that can handle a " +"range of tasks including casual conversation, text analysis, summarization " +"and document question answering." +msgstr "" +"一种更快速、更实惠但仍然非常强大的模型,它可以处理一系列任务,包括随意对话、" +"文本分析、摘要和文档问题回答。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 +msgid "" +"Titan Text Premier is the most powerful and advanced model in the Titan Text " +"series, designed to deliver exceptional performance for a variety of " +"enterprise applications. With its cutting-edge features, it delivers greater " +"accuracy and outstanding results, making it an excellent choice for " +"organizations looking for a top-notch text processing solution." +msgstr "" +"Titan Text Premier 是 Titan Text 系列中功能强大且先进的型号,旨在为各种企业应" +"用程序提供卓越的性能。凭借其尖端功能,它提供了更高的准确性和出色的结果,使其" +"成为寻求一流文本处理解决方案的组织的绝佳选择。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 +msgid "" +"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" +"tuning English-language tasks, including summarization and copywriting, " +"where customers require smaller, more cost-effective, and highly " +"customizable models." +msgstr "" +"Amazon Titan Text Lite 是一种轻量级的高效模型,非常适合英语任务的微调,包括摘" +"要和文案写作等,在这种场景下,客户需要更小、更经济高效且高度可定制的模型" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 +msgid "" +"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " +"it ideal for a variety of high-level general language tasks, such as open-" +"ended text generation and conversational chat, as well as support in " +"retrieval-augmented generation (RAG). At launch, the model is optimized for " +"English, but other languages are supported." +msgstr "" +"Amazon Titan Text Express 的上下文长度长达 8000 个 tokens,因而非常适合各种高" +"级常规语言任务,例如开放式文本生成和对话式聊天,以及检索增强生成(RAG)中的支" +"持。在发布时,该模型针对英语进行了优化,但也支持其他语言。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 +msgid "" +"7B dense converter for rapid deployment and easy customization. Small in " +"size yet powerful in a variety of use cases. Supports English and code, as " +"well as 32k context windows." +msgstr "" +"7B 密集型转换器,可快速部署,易于定制。体积虽小,但功能强大,适用于各种用例。" +"支持英语和代码,以及 32k 的上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 +msgid "" +"Advanced Mistral AI large-scale language model capable of handling any " +"language task, including complex multilingual reasoning, text understanding, " +"transformation, and code generation." +msgstr "" +"先进的 Mistral AI 大型语言模型,能够处理任何语言任务,包括复杂的多语言推理、" +"文本理解、转换和代码生成。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 +msgid "" +"Ideal for content creation, conversational AI, language understanding, R&D, " +"and enterprise applications" +msgstr "非常适合内容创作、会话式人工智能、语言理解、研发和企业应用" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 +msgid "" +"Ideal for limited computing power and resources, edge devices, and faster " +"training times." +msgstr "非常适合有限的计算能力和资源、边缘设备和更快的训练时间。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 +msgid "" +"Titan Embed Text is the largest embedding model in the Amazon Titan Embed " +"series and can handle various text embedding tasks, such as text " +"classification, text similarity calculation, etc." +msgstr "" +"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以处理各种文本" +"嵌入任务,如文本分类、文本相似度计算等。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 +#, python-brace-format +msgid "The following fields are required: {keys}" +msgstr "以下字段是必填项: {keys}" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:64 +msgid "Verification failed, please check whether the parameters are correct" +msgstr "验证失败,请检查参数是否正确" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28 +msgid "Picture quality" +msgstr "图片质量" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17 +msgid "" +"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " +"to find one that suits your desired tone and audience. The current voiceover " +"is optimized for English." +msgstr "" +"尝试不同的声音(合金、回声、寓言、缟玛瑙、新星和闪光),找到一种适合您所需的" +"音调和听众的声音。当前的语音针对英语进行了优化。" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 +msgid "Good at common conversational tasks, supports 32K contexts" +msgstr "擅长通用对话任务,支持 32K 上下文" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 +msgid "Good at handling programming tasks, supports 16K contexts" +msgstr "擅长处理编程任务,支持 16K 上下文" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 +msgid "Latest Gemini 1.0 Pro model, updated with Google update" +msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 +msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" +msgstr "最新的Gemini 1.0 Pro Vision模型,随Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 +msgid "Latest Gemini 1.5 Flash model, updated with Google updates" +msgstr "最新的Gemini 1.5 Flash模型,随Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53 +msgid "convert audio to text" +msgstr "将音频转换为文本" + +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:53 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:54 +msgid "Model catalog" +msgstr "模型目录" + +#: community/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py:39 +msgid "local model" +msgstr "本地模型" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:43 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:48 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:35 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:43 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:24 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:44 +msgid "API domain name is invalid" +msgstr "API域名无效" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:35 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:48 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:53 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:40 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:48 +msgid "The model does not exist, please download the model first" +msgstr "模型不存在,请先下载模型" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 7B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。" +"这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 13B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。" +"这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 70B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。" +"这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 +msgid "" +"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " +"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " +"that it has strong Chinese conversation capabilities." +msgstr "" +"由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-" +"chat-hf进行LoRA微调,使其具备较强的中文对话能力。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 8 billion " +"parameters." +msgstr "Meta Llama 3:迄今为止最有能力的公开产品LLM。80亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 70 billion " +"parameters." +msgstr "Meta Llama 3:迄今为止最有能力的公开产品LLM。700亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 +msgid "" +"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 500 million parameters." +msgstr "" +"qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有" +"显著增强。所有规模的模型都支持32768个tokens的上下文长度。5亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 +msgid "" +"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 1.8 billion parameters." +msgstr "" +"qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有" +"显著增强。所有规模的模型都支持32768个tokens的上下文长度。18亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 +msgid "" +"Compared with previous versions, qwen 1.5 4b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"4 billion parameters." +msgstr "" +"qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。40亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 +msgid "" +"Compared with previous versions, qwen 1.5 7b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"7 billion parameters." +msgstr "" +"qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。70亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 +msgid "" +"Compared with previous versions, qwen 1.5 14b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"14 billion parameters." +msgstr "" +"qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。140亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 +msgid "" +"Compared with previous versions, qwen 1.5 32b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"32 billion parameters." +msgstr "" +"qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。320亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 +msgid "" +"Compared with previous versions, qwen 1.5 72b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"72 billion parameters." +msgstr "" +"qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。720亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 +msgid "" +"Compared with previous versions, qwen 1.5 110b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 110 billion parameters." +msgstr "" +"qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有" +"显著增强。所有规模的模型都支持32768个tokens的上下文长度。1100亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 +msgid "" +"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " +"model." +msgstr "Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 +msgid "" +"A high-performance open embedding model with a large token context window." +msgstr "一个具有大 tokens上下文窗口的高性能开放嵌入模型。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " +"or 1792x1024 pixels." +msgstr "" +"图像生成端点允许您根据文本提示创建原始图像。使用 DALL·E 3 时,图像的尺寸可以" +"为 1024x1024、1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +msgid "" +" \n" +"By default, images are produced in standard quality, but with DALL·E 3 you " +"can set quality: \"hd\" to enhance detail. Square, standard quality images " +"are generated fastest.\n" +" " +msgstr "" +"默认情况下,图像以标准质量生成,但使用 DALL·E 3 时,您可以设置质量:“hd”以增" +"强细节。方形、标准质量的图像生成速度最快。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 +msgid "" +"You can use DALL·E 3 to request 1 image at a time (requesting more images by " +"issuing parallel requests), or use DALL·E 2 with the n parameter to request " +"up to 10 images at a time." +msgstr "" +"您可以使用 DALL·E 3 一次请求 1 个图像(通过发出并行请求来请求更多图像),或者" +"使用带有 n 参数的 DALL·E 2 一次最多请求 10 个图像。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111 +msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-3.5-turbo,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38 +msgid "Latest gpt-4, updated with OpenAI adjustments" +msgstr "最新的gpt-4,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99 +msgid "" +"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " +"adjustments" +msgstr "最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102 +msgid "" +"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " +"adjustments" +msgstr "最新的gpt-4o-mini,比gpt-4o更便宜、更快,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46 +msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49 +msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo-preview,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53 +msgid "" +"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " +"tokens" +msgstr "2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57 +msgid "" +"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " +"tokens" +msgstr "2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61 +msgid "" +"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " +"13, 2024" +msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65 +msgid "" +"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" +msgstr "2024年5月13日的gpt-4o快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69 +msgid "" +"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72 +msgid "" +"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75 +msgid "" +"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " +"tokens" +msgstr "2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63 +msgid "Tongyi Qianwen" +msgstr "通义千问" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:46 +msgid "Please provide server URL" +msgstr "请提供服务器URL" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:49 +msgid "Please provide the model" +msgstr "请提供模型" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:52 +msgid "Please provide the API Key" +msgstr "请提供API密钥" + +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 +msgid "Tencent Cloud" +msgstr "腾讯云" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:41 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:88 +#, python-brace-format +msgid "{keys} is required" +msgstr "{keys} 是必填项" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "painting style" +msgstr "绘画风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "If not passed, the default value is 201 (Japanese anime style)" +msgstr "如果未传递,则默认值为201(日本动漫风格)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18 +msgid "Not limited to style" +msgstr "不限于风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19 +msgid "ink painting" +msgstr "水墨画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20 +msgid "concept art" +msgstr "概念艺术" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21 +msgid "Oil painting 1" +msgstr "油画1" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22 +msgid "Oil Painting 2 (Van Gogh)" +msgstr "油画2(梵高)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23 +msgid "watercolor painting" +msgstr "水彩画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24 +msgid "pixel art" +msgstr "像素画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25 +msgid "impasto style" +msgstr "厚涂风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26 +msgid "illustration" +msgstr "插图" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27 +msgid "paper cut style" +msgstr "剪纸风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28 +msgid "Impressionism 1 (Monet)" +msgstr "印象派1(莫奈)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29 +msgid "Impressionism 2" +msgstr "印象派2" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31 +msgid "classical portraiture" +msgstr "古典肖像画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32 +msgid "black and white sketch" +msgstr "黑白素描画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33 +msgid "cyberpunk" +msgstr "赛博朋克" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34 +msgid "science fiction style" +msgstr "科幻风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35 +msgid "dark style" +msgstr "暗黑风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37 +msgid "vaporwave" +msgstr "蒸汽波" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38 +msgid "Japanese animation" +msgstr "日系动漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39 +msgid "monster style" +msgstr "怪兽风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40 +msgid "Beautiful ancient style" +msgstr "唯美古风" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41 +msgid "retro anime" +msgstr "复古动漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42 +msgid "Game cartoon hand drawing" +msgstr "游戏卡通手绘" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43 +msgid "Universal realistic style" +msgstr "通用写实风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "Generate image resolution" +msgstr "生成图像分辨率" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "If not transmitted, the default value is 768:768." +msgstr "不传默认使用768:768。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 +msgid "" +"The most effective version of the current hybrid model, the trillion-level " +"parameter scale MOE-32K long article model. Reaching the absolute leading " +"level on various benchmarks, with complex instructions and reasoning, " +"complex mathematical capabilities, support for function call, and " +"application focus optimization in fields such as multi-language translation, " +"finance, law, and medical care" +msgstr "" +"当前混元模型中效果最优版本,万亿级参数规模 MOE-32K 长文模型。在各种 " +"benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 " +"functioncall,在多语言翻译、金融法律医疗等领域应用重点优化" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 +msgid "" +"A better routing strategy is adopted to simultaneously alleviate the " +"problems of load balancing and expert convergence. For long articles, the " +"needle-in-a-haystack index reaches 99.9%" +msgstr "" +"采用更优的路由策略,同时缓解了负载均衡和专家趋同的问题。长文方面,大海捞针指" +"标达到99.9%" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 +msgid "" +"Upgraded to MOE structure, the context window is 256k, leading many open " +"source models in multiple evaluation sets such as NLP, code, mathematics, " +"industry, etc." +msgstr "" +"升级为 MOE 结构,上下文窗口为 256k ,在 NLP,代码,数学,行业等多项评测集上领" +"先众多开源模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 +msgid "" +"Hunyuan's latest version of the role-playing model, a role-playing model " +"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " +"model combined with the role-playing scene data set for additional training, " +"and has better basic effects in role-playing scenes." +msgstr "" +"混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合" +"角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 +msgid "" +"Hunyuan's latest MOE architecture FunctionCall model has been trained with " +"high-quality FunctionCall data and has a context window of 32K, leading in " +"multiple dimensions of evaluation indicators." +msgstr "" +"混元最新 MOE 架构 FunctionCall 模型,经过高质量的 FunctionCall 数据训练,上下" +"文窗口达 32K,在多个维度的评测指标上处于领先。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 +msgid "" +"Hunyuan's latest code generation model, after training the base model with " +"200B high-quality code data, and iterating on high-quality SFT data for half " +"a year, the context long window length has been increased to 8K, and it " +"ranks among the top in the automatic evaluation indicators of code " +"generation in the five major languages; the five major languages In the " +"manual high-quality evaluation of 10 comprehensive code tasks that consider " +"all aspects, the performance is in the first echelon." +msgstr "" +"混元最新代码生成模型,经过 200B 高质量代码数据增训基座模型,迭代半年高质量 " +"SFT 数据训练,上下文长窗口长度增大到 8K,五大语言代码生成自动评测指标上位居前" +"列;五大语言10项考量各方面综合代码任务人工高质量评测上,性能处于第一梯队" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 +msgid "" +"Tencent's Hunyuan Embedding interface can convert text into high-quality " +"vector data. The vector dimension is 1024 dimensions." +msgstr "" +"腾讯混元 Embedding 接口,可以将文本转化为高质量的向量数据。向量维度为1024维。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 +msgid "Mixed element visual model" +msgstr "混元视觉模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 +msgid "Hunyuan graph model" +msgstr "混元生图模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 +msgid "Tencent Hunyuan" +msgstr "腾讯混元" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 +msgid "Facebook’s 125M parameter model" +msgstr "Facebook的125M参数模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 +msgid "BAAI’s 7B parameter model" +msgstr "BAAI的7B参数模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 +msgid "BAAI’s 13B parameter mode" +msgstr "BAAI的13B参数模型" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 +msgid "" +"If the gap between width, height and 512 is too large, the picture rendering " +"effect will be poor and the probability of excessive delay will increase " +"significantly. Recommended ratio and corresponding width and height before " +"super score: width*height" +msgstr "" +"宽、高与512差距过大,则出图效果不佳、延迟过长概率显著增加。超分前建议比例及对" +"应宽高:width*height" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29 +msgid "Universal female voice" +msgstr "通用女声" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25 +msgid "Supernatural timbre-ZiZi 2.0" +msgstr "超自然音色-梓梓2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26 +msgid "Supernatural timbre-ZiZi" +msgstr "超自然音色-梓梓" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27 +msgid "Supernatural sound-Ranran 2.0" +msgstr "超自然音色-燃燃2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28 +msgid "Supernatural sound-Ranran" +msgstr "超自然音色-燃燃" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30 +msgid "Universal male voice" +msgstr "通用男声" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +msgid "[0.2,3], the default is 1, usually one decimal place is enough" +msgstr "[0.2,3],默认为1,通常保留一位小数即可" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 +msgid "" +"The user goes to the model inference page of Volcano Ark to create an " +"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " +"it." +msgstr "" +"用户前往火山方舟的模型推理页面创建推理接入点,这里需要输入ep-xxxxxxxxxx-yyyy" +"进行调用" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 +msgid "Universal 2.0-Vincent Diagram" +msgstr "通用2.0-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 +msgid "Universal 2.0Pro-Vincent Chart" +msgstr "通用2.0Pro-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 +msgid "Universal 1.4-Vincent Chart" +msgstr "通用1.4-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 +msgid "Animation 1.3.0-Vincent Picture" +msgstr "动漫1.3.0-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 +msgid "Animation 1.3.1-Vincent Picture" +msgstr "动漫1.3.1-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 +msgid "volcano engine" +msgstr "火山引擎" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:51 +#, python-brace-format +msgid "{model_name} The model does not support" +msgstr "{model_name} 模型不支持" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 +msgid "" +"ERNIE-Bot-4 is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、" +"内容创作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 +msgid "" +"ERNIE-Bot is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内" +"容创作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 +msgid "" +"ERNIE-Bot-turbo is a large language model independently developed by Baidu. " +"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " +"content creation and generation, and has a faster response speed." +msgstr "" +"ERNIE-Bot-turbo是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问" +"答、内容创作生成等能力,响应速度更快。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 +msgid "" +"BLOOMZ-7B is a well-known large language model in the industry. It was " +"developed and open sourced by BigScience and can output text in 46 languages " +"and 13 programming languages." +msgstr "" +"BLOOMZ-7B是业内知名的大语言模型,由BigScience研发并开源,能够以46种语言和13种" +"编程语言输出文本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 +msgid "" +"Llama-2-13b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning and knowledge application. " +"Llama-2-13b-chat is a native open source version with balanced performance " +"and effect, suitable for conversation scenarios." +msgstr "" +"Llama-2-13b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀," +"Llama-2-13b-chat是性能与效果均衡的原生开源版本,适用于对话场景。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 +msgid "" +"Llama-2-70b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning, and knowledge application. " +"Llama-2-70b-chat is a native open source version with high-precision effects." +msgstr "" +"Llama-2-70b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀," +"Llama-2-70b-chat是高精度效果的原生开源版本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 +msgid "" +"The Chinese enhanced version developed by the Qianfan team based on " +"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" +"EVAL." +msgstr "" +"千帆团队在Llama-2-7b基础上的中文增强版本,在CMMLU、C-EVAL等中文知识库上表现优" +"异。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 +msgid "" +"Embedding-V1 is a text representation model based on Baidu Wenxin large " +"model technology. It can convert text into a vector form represented by " +"numerical values and can be used in text retrieval, information " +"recommendation, knowledge mining and other scenarios. Embedding-V1 provides " +"the Embeddings interface, which can generate corresponding vector " +"representations based on input content. You can call this interface to input " +"text into the model and obtain the corresponding vector representation for " +"subsequent text processing and analysis." +msgstr "" +"Embedding-V1是一个基于百度文心大模型技术的文本表示模型,可以将文本转化为用数" +"值表示的向量形式,用于文本检索、信息推荐、知识挖掘等场景。 Embedding-V1提供了" +"Embeddings接口,可以根据输入内容生成对应的向量表示。您可以通过调用该接口,将" +"文本输入到模型中,获取到对应的向量表示,从而进行后续的文本处理和分析。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 +msgid "Thousand sails large model" +msgstr "千帆大模型" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42 +msgid "Please outline this picture" +msgstr "请描述这张图片" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15 +msgid "Speaker" +msgstr "发音人" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16 +msgid "" +"Speaker, optional value: Please go to the console to add a trial or purchase " +"speaker. After adding, the speaker parameter value will be displayed." +msgstr "" +"发音人,可选值:请到控制台添加试用或购买发音人,添加后即显示发音人参数值" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21 +msgid "iFlytek Xiaoyan" +msgstr "讯飞小燕" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22 +msgid "iFlytek Xujiu" +msgstr "讯飞许久" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23 +msgid "iFlytek Xiaoping" +msgstr "讯飞小萍" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24 +msgid "iFlytek Xiaojing" +msgstr "讯飞小婧" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25 +msgid "iFlytek Xuxiaobao" +msgstr "讯飞许小宝" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "Speech speed, optional value: [0-100], default is 50" +msgstr "语速,可选值:[0-100],默认为50" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39 +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50 +msgid "Chinese and English recognition" +msgstr "中英文识别" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66 +msgid "iFlytek Spark" +msgstr "讯飞星火" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " +"1792x1024 pixels." +msgstr "" +"图像生成端点允许您根据文本提示创建原始图像。图像的尺寸可以为 1024x1024、" +"1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29 +msgid "" +"By default, images are generated in standard quality, you can set quality: " +"\"hd\" to enhance detail. Square, standard quality images are generated " +"fastest." +msgstr "" +"默认情况下,图像以标准质量生成,您可以设置质量:“hd”以增强细节。方形、标准质" +"量的图像生成速度最快。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42 +msgid "" +"You can request 1 image at a time (requesting more images by making parallel " +"requests), or up to 10 images at a time using the n parameter." +msgstr "" +"您可以一次请求 1 个图像(通过发出并行请求来请求更多图像),或者使用 n 参数一" +"次最多请求 10 个图像。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20 +msgid "Chinese female" +msgstr "中文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21 +msgid "Chinese male" +msgstr "中文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22 +msgid "Japanese male" +msgstr "日语男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23 +msgid "Cantonese female" +msgstr "粤语女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24 +msgid "English female" +msgstr "英文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25 +msgid "English male" +msgstr "英文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26 +msgid "Korean female" +msgstr "韩语女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 +msgid "" +"Code Llama is a language model specifically designed for code generation." +msgstr "Code Llama 是一个专门用于代码生成的语言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 +msgid "" +" \n" +"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " +"designed to perform specific tasks.\n" +" " +msgstr "" +"Code Llama Instruct 是 Code Llama 的指令微调版本,专为执行特定任务而设计。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 +msgid "" +"Code Llama Python is a language model specifically designed for Python code " +"generation." +msgstr "Code Llama Python 是一个专门用于 Python 代码生成的语言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 +msgid "" +"CodeQwen 1.5 is a language model for code generation with high performance." +msgstr "CodeQwen 1.5 是一个用于代码生成的语言模型,具有较高的性能。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 +msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." +msgstr "CodeQwen 1.5 Chat 是一个聊天模型版本的 CodeQwen 1.5。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 +msgid "Deepseek is a large-scale language model with 13 billion parameters." +msgstr "Deepseek Chat 是一个聊天模型版本的 Deepseek。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16 +msgid "" +"Image size, only cogview-3-plus supports this parameter. Optional range: " +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " +"default is 1024x1024." +msgstr "" +"图片尺寸,仅 cogview-3-plus 支持该参数。可选范围:" +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默认是" +"1024x1024。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 +msgid "" +"Have strong multi-modal understanding capabilities. Able to understand up to " +"five images simultaneously and supports video content understanding" +msgstr "具有强大的多模态理解能力。能够同时理解多达五张图像,并支持视频内容理解" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis" +msgstr "专注于单图理解。适用于需要高效图像解析的场景" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis (free)" +msgstr "专注于单图理解。适用于需要高效图像解析的场景(免费)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 +msgid "" +"Quickly and accurately generate images based on user text descriptions. " +"Resolution supports 1024x1024" +msgstr "根据用户文字描述快速、精准生成图像。分辨率支持1024x1024" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes" +msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes (free)" +msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸(免费)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 +msgid "zhipu AI" +msgstr "智谱 AI" + +#: community/apps/setting/serializers/model_apply_serializers.py:32 +#: community/apps/setting/serializers/model_apply_serializers.py:37 +msgid "vector text" +msgstr "向量文本" + +#: community/apps/setting/serializers/model_apply_serializers.py:33 +msgid "vector text list" +msgstr "向量文本列表" + +#: community/apps/setting/serializers/model_apply_serializers.py:41 +msgid "text" +msgstr "文本" + +#: community/apps/setting/serializers/model_apply_serializers.py:42 +msgid "metadata" +msgstr "元数据" + +#: community/apps/setting/serializers/model_apply_serializers.py:47 +msgid "query" +msgstr "查询" + +#: community/apps/setting/serializers/provider_serializers.py:79 +#: community/apps/setting/serializers/provider_serializers.py:83 +#: community/apps/setting/serializers/provider_serializers.py:130 +#: community/apps/setting/serializers/provider_serializers.py:176 +#: community/apps/setting/serializers/provider_serializers.py:190 +#: community/apps/setting/swagger_api/provide_api.py:30 +#: community/apps/setting/swagger_api/provide_api.py:54 +#: community/apps/setting/swagger_api/provide_api.py:55 +#: community/apps/setting/swagger_api/provide_api.py:87 +#: community/apps/setting/swagger_api/provide_api.py:88 +#: community/apps/setting/swagger_api/provide_api.py:170 +msgid "model name" +msgstr "模型名称" + +#: community/apps/setting/serializers/provider_serializers.py:81 +#: community/apps/setting/serializers/provider_serializers.py:132 +#: community/apps/setting/serializers/provider_serializers.py:142 +#: community/apps/setting/serializers/provider_serializers.py:180 +#: community/apps/setting/swagger_api/provide_api.py:26 +#: community/apps/setting/swagger_api/provide_api.py:51 +#: community/apps/setting/swagger_api/provide_api.py:52 +#: community/apps/setting/swagger_api/provide_api.py:84 +#: community/apps/setting/swagger_api/provide_api.py:85 +#: community/apps/setting/swagger_api/provide_api.py:134 +#: community/apps/setting/swagger_api/provide_api.py:165 +msgid "model type" +msgstr "模型类型" + +#: community/apps/setting/serializers/provider_serializers.py:85 +#: community/apps/setting/serializers/provider_serializers.py:178 +#: community/apps/setting/serializers/provider_serializers.py:402 +#: community/apps/setting/swagger_api/provide_api.py:35 +#: community/apps/setting/swagger_api/provide_api.py:57 +#: community/apps/setting/swagger_api/provide_api.py:58 +#: community/apps/setting/swagger_api/provide_api.py:79 +#: community/apps/setting/swagger_api/provide_api.py:80 +#: community/apps/setting/swagger_api/provide_api.py:105 +#: community/apps/setting/swagger_api/provide_api.py:129 +#: community/apps/setting/swagger_api/provide_api.py:160 +#: community/apps/setting/swagger_api/provide_api.py:179 +msgid "provider" +msgstr "供应商" + +#: community/apps/setting/serializers/provider_serializers.py:87 +#: community/apps/setting/serializers/provider_serializers.py:134 +#: community/apps/setting/serializers/provider_serializers.py:182 +msgid "permission type" +msgstr "权限类型" + +#: community/apps/setting/serializers/provider_serializers.py:89 +msgid "create user" +msgstr "创建者" + +#: community/apps/setting/serializers/provider_serializers.py:138 +#: community/apps/setting/serializers/provider_serializers.py:186 +msgid "permissions only supportPUBLIC|PRIVATE" +msgstr "权限类型只支持PUBLIC|PRIVATE" + +#: community/apps/setting/serializers/provider_serializers.py:145 +#: community/apps/setting/serializers/provider_serializers.py:196 +msgid "certification information" +msgstr "认证信息" + +#: community/apps/setting/serializers/provider_serializers.py:193 +msgid "parameter configuration" +msgstr "参数配置" + +#: community/apps/setting/serializers/provider_serializers.py:202 +#, python-brace-format +msgid "Model name【{model_name}】already exists" +msgstr "模型名称【{model_name}】已存在" + +#: community/apps/setting/serializers/system_setting.py:29 +#: community/apps/setting/swagger_api/system_setting.py:25 +#: community/apps/setting/swagger_api/system_setting.py:26 +#: community/apps/setting/swagger_api/system_setting.py:57 +#: community/apps/setting/swagger_api/system_setting.py:58 +msgid "SMTP host" +msgstr "SMTP 主机" + +#: community/apps/setting/serializers/system_setting.py:30 +#: community/apps/setting/swagger_api/system_setting.py:28 +#: community/apps/setting/swagger_api/system_setting.py:29 +#: community/apps/setting/swagger_api/system_setting.py:60 +#: community/apps/setting/swagger_api/system_setting.py:61 +msgid "SMTP port" +msgstr "SMTP 端口" + +#: community/apps/setting/serializers/system_setting.py:31 +#: community/apps/setting/serializers/system_setting.py:35 +#: community/apps/setting/swagger_api/system_setting.py:31 +#: community/apps/setting/swagger_api/system_setting.py:32 +#: community/apps/setting/swagger_api/system_setting.py:43 +#: community/apps/setting/swagger_api/system_setting.py:44 +#: community/apps/setting/swagger_api/system_setting.py:63 +#: community/apps/setting/swagger_api/system_setting.py:64 +#: community/apps/setting/swagger_api/system_setting.py:75 +#: community/apps/setting/swagger_api/system_setting.py:76 +msgid "Sender's email" +msgstr "发件人邮箱" + +#: community/apps/setting/serializers/system_setting.py:32 +#: community/apps/setting/swagger_api/system_setting.py:34 +#: community/apps/setting/swagger_api/system_setting.py:35 +#: community/apps/setting/swagger_api/system_setting.py:66 +#: community/apps/setting/swagger_api/system_setting.py:67 +#: community/apps/users/serializers/user_serializers.py:72 +#: community/apps/users/serializers/user_serializers.py:112 +#: community/apps/users/serializers/user_serializers.py:143 +#: community/apps/users/serializers/user_serializers.py:211 +#: community/apps/users/serializers/user_serializers.py:293 +#: community/apps/users/serializers/user_serializers.py:346 +#: community/apps/users/serializers/user_serializers.py:671 +#: community/apps/users/serializers/user_serializers.py:703 +#: community/apps/users/serializers/user_serializers.py:704 +#: community/apps/users/serializers/user_serializers.py:743 +#: community/apps/users/serializers/user_serializers.py:763 +#: community/apps/users/serializers/user_serializers.py:764 +#: community/apps/users/views/user.py:109 +#: community/apps/users/views/user.py:110 +#: community/apps/users/views/user.py:111 +#: community/apps/users/views/user.py:112 +msgid "Password" +msgstr "密码" + +#: community/apps/setting/serializers/system_setting.py:33 +#: community/apps/setting/swagger_api/system_setting.py:37 +#: community/apps/setting/swagger_api/system_setting.py:38 +#: community/apps/setting/swagger_api/system_setting.py:69 +#: community/apps/setting/swagger_api/system_setting.py:70 +msgid "Whether to enable TLS" +msgstr "是否启用 TLS" + +#: community/apps/setting/serializers/system_setting.py:34 +#: community/apps/setting/swagger_api/system_setting.py:40 +#: community/apps/setting/swagger_api/system_setting.py:41 +#: community/apps/setting/swagger_api/system_setting.py:72 +#: community/apps/setting/swagger_api/system_setting.py:73 +msgid "Whether to enable SSL" +msgstr "是否启用 SSL" + +#: community/apps/setting/serializers/system_setting.py:49 +msgid "Email verification failed" +msgstr "邮箱验证失败" + +#: community/apps/setting/serializers/team_serializers.py:43 +#: community/apps/users/serializers/user_serializers.py:70 +#: community/apps/users/serializers/user_serializers.py:111 +#: community/apps/users/serializers/user_serializers.py:136 +#: community/apps/users/serializers/user_serializers.py:209 +#: community/apps/users/serializers/user_serializers.py:470 +#: community/apps/users/serializers/user_serializers.py:493 +#: community/apps/users/serializers/user_serializers.py:518 +#: community/apps/users/serializers/user_serializers.py:519 +#: community/apps/users/serializers/user_serializers.py:581 +#: community/apps/users/serializers/user_serializers.py:627 +#: community/apps/users/serializers/user_serializers.py:628 +#: community/apps/users/serializers/user_serializers.py:663 +#: community/apps/users/serializers/user_serializers.py:700 +#: community/apps/users/serializers/user_serializers.py:701 +msgid "Username" +msgstr "用户名" + +#: community/apps/setting/serializers/team_serializers.py:44 +#: community/apps/users/serializers/user_serializers.py:131 +#: community/apps/users/serializers/user_serializers.py:210 +#: community/apps/users/serializers/user_serializers.py:226 +#: community/apps/users/serializers/user_serializers.py:256 +#: community/apps/users/serializers/user_serializers.py:287 +#: community/apps/users/serializers/user_serializers.py:343 +#: community/apps/users/serializers/user_serializers.py:356 +#: community/apps/users/serializers/user_serializers.py:438 +#: community/apps/users/serializers/user_serializers.py:471 +#: community/apps/users/serializers/user_serializers.py:494 +#: community/apps/users/serializers/user_serializers.py:520 +#: community/apps/users/serializers/user_serializers.py:582 +#: community/apps/users/serializers/user_serializers.py:629 +#: community/apps/users/serializers/user_serializers.py:658 +#: community/apps/users/serializers/user_serializers.py:702 +#: community/apps/users/serializers/user_serializers.py:713 +#: community/apps/users/serializers/user_serializers.py:734 +msgid "Email" +msgstr "邮箱" + +#: community/apps/setting/serializers/team_serializers.py:47 +#: community/apps/setting/serializers/team_serializers.py:148 +#: community/apps/setting/serializers/team_serializers.py:256 +msgid "team id" +msgstr "团队 id" + +#: community/apps/setting/serializers/team_serializers.py:48 +#: community/apps/setting/serializers/team_serializers.py:254 +#: community/apps/setting/serializers/team_serializers.py:324 +msgid "member id" +msgstr "成员 id" + +#: community/apps/setting/serializers/team_serializers.py:54 +msgid "use" +msgstr "使用" + +#: community/apps/setting/serializers/team_serializers.py:55 +msgid "manage" +msgstr "管理" + +#: community/apps/setting/serializers/team_serializers.py:60 +msgid "Operation permissions USE, MANAGE permissions" +msgstr "操作权限 USE, MANAGE 权限" + +#: community/apps/setting/serializers/team_serializers.py:63 +msgid "use permission" +msgstr "使用权限" + +#: community/apps/setting/serializers/team_serializers.py:64 +msgid "use permission True|False" +msgstr "使用权限 True|False" + +#: community/apps/setting/serializers/team_serializers.py:66 +msgid "manage permission" +msgstr "管理权限" + +#: community/apps/setting/serializers/team_serializers.py:67 +msgid "manage permission True|False" +msgstr "管理权限 True|False" + +#: community/apps/setting/serializers/team_serializers.py:73 +msgid "target id" +msgstr "目标 id" + +#: community/apps/setting/serializers/team_serializers.py:82 +#: community/apps/setting/serializers/team_serializers.py:83 +msgid "dataset id/application id" +msgstr "知识库 id/应用 id" + +#: community/apps/setting/serializers/team_serializers.py:105 +msgid "Non-existent application|knowledge base id[" +msgstr "应用|知识库 id[ 不存在" + +#: community/apps/setting/serializers/team_serializers.py:139 +#: community/apps/setting/serializers/team_serializers.py:140 +msgid "Permission data" +msgstr "权限数据" + +#: community/apps/setting/serializers/team_serializers.py:157 +#: community/apps/setting/serializers/team_serializers.py:158 +msgid "user id list" +msgstr "用户 id 列表" + +#: community/apps/setting/serializers/team_serializers.py:168 +#: community/apps/setting/serializers/team_serializers.py:169 +msgid "Username or email" +msgstr "用户名或邮箱" + +#: community/apps/setting/serializers/team_serializers.py:217 +msgid "Username or email is required" +msgstr "用户名或邮箱是必填项" + +#: community/apps/setting/serializers/team_serializers.py:221 +#: community/apps/users/serializers/user_serializers.py:800 +msgid "User does not exist" +msgstr "用户不存在" + +#: community/apps/setting/serializers/team_serializers.py:224 +msgid "The current members already exist in the team, do not add them again." +msgstr "当前成员已存在于团队中,无需再次添加。" + +#: community/apps/setting/serializers/team_serializers.py:248 +msgid "member list" +msgstr "成员列表" + +#: community/apps/setting/serializers/team_serializers.py:263 +msgid "The member does not exist, please add a member first" +msgstr "成员不存在,请先添加成员" + +#: community/apps/setting/serializers/team_serializers.py:297 +msgid "Administrator rights do not allow modification" +msgstr "管理员权限不允许修改" + +#: community/apps/setting/serializers/team_serializers.py:311 +msgid "Unable to remove team admin" +msgstr "不支持移除团队管理员" + +#: community/apps/setting/serializers/valid_serializers.py:32 +#: community/apps/users/serializers/user_serializers.py:190 +#: community/apps/users/serializers/user_serializers.py:777 +msgid "" +"The community version supports up to 2 users. If you need more users, please " +"contact us (https://fit2cloud.com/)." +msgstr "" +"社区版最多支持 2 个用户,如需拥有更多用户,请联系我们(https://" +"fit2cloud.com/)。" + +#: community/apps/setting/serializers/valid_serializers.py:41 +#: community/apps/setting/swagger_api/valid_api.py:27 +msgid "check quantity" +msgstr "检查数量" + +#: community/apps/setting/swagger_api/provide_api.py:43 +#: community/apps/setting/swagger_api/provide_api.py:44 +#: community/apps/setting/swagger_api/provide_api.py:71 +#: community/apps/setting/swagger_api/provide_api.py:72 +#: community/apps/setting/swagger_api/provide_api.py:190 +#: community/apps/setting/swagger_api/provide_api.py:191 +msgid "parameters required to call the function" +msgstr "调用函数所需要的参数" + +#: community/apps/setting/swagger_api/provide_api.py:60 +#: community/apps/setting/swagger_api/provide_api.py:61 +#: community/apps/setting/swagger_api/provide_api.py:90 +#: community/apps/setting/swagger_api/provide_api.py:91 +msgid "model certificate information" +msgstr "模型认证信息" + +#: community/apps/setting/swagger_api/provide_api.py:114 +#: community/apps/setting/swagger_api/provide_api.py:115 +msgid "model type description" +msgstr "模型类型描述" + +#: community/apps/setting/swagger_api/provide_api.py:115 +msgid "large language model" +msgstr "大语言模型" + +#: community/apps/setting/swagger_api/provide_api.py:116 +#: community/apps/setting/swagger_api/provide_api.py:117 +#: community/apps/setting/swagger_api/provide_api.py:147 +#: community/apps/setting/swagger_api/provide_api.py:148 +msgid "model type value" +msgstr "模型类型值" + +#: community/apps/setting/swagger_api/provide_api.py:145 +#: community/apps/setting/swagger_api/provide_api.py:146 +msgid "model description" +msgstr "模型描述" + +#: community/apps/setting/swagger_api/provide_api.py:184 +msgid "function that needs to be executed" +msgstr "需要执行的函数" + +#: community/apps/setting/swagger_api/system_setting.py:19 +#: community/apps/setting/swagger_api/system_setting.py:20 +#: community/apps/setting/swagger_api/system_setting.py:51 +#: community/apps/setting/swagger_api/system_setting.py:52 +msgid "Email related parameters" +msgstr "邮箱相关参数" + +#: community/apps/setting/swagger_api/valid_api.py:22 +msgid "Verification type: application|dataset|user" +msgstr "认证类型:application|dataset|user" + +#: community/apps/setting/views/Team.py:27 +#: community/apps/setting/views/Team.py:28 +msgid "Get a list of team members" +msgstr "获取团队成员列表" + +#: community/apps/setting/views/Team.py:30 +#: community/apps/setting/views/Team.py:40 +#: community/apps/setting/views/Team.py:54 +#: community/apps/setting/views/Team.py:68 +#: community/apps/setting/views/Team.py:80 +#: community/apps/setting/views/Team.py:92 +#: community/apps/users/serializers/user_serializers.py:198 +#: community/apps/users/serializers/user_serializers.py:791 +msgid "team" +msgstr "团队成员" + +#: community/apps/setting/views/Team.py:37 +#: community/apps/setting/views/Team.py:38 +msgid "Add member" +msgstr "添加成员" + +#: community/apps/setting/views/Team.py:51 +#: community/apps/setting/views/Team.py:52 +msgid "Add members in batches" +msgstr "批量添加成员" + +#: community/apps/setting/views/Team.py:65 +#: community/apps/setting/views/Team.py:66 +msgid "Get team member permissions" +msgstr "获取团队成员权限" + +#: community/apps/setting/views/Team.py:76 +#: community/apps/setting/views/Team.py:77 +msgid "Update team member permissions" +msgstr "更新团队成员权限" + +#: community/apps/setting/views/Team.py:89 +#: community/apps/setting/views/Team.py:90 +msgid "Remove member" +msgstr "移除成员" + +#: community/apps/setting/views/model.py:30 +#: community/apps/setting/views/model.py:31 +msgid "Create model" +msgstr "创建模型" + +#: community/apps/setting/views/model.py:33 +#: community/apps/setting/views/model.py:45 +#: community/apps/setting/views/model.py:57 +#: community/apps/setting/views/model.py:74 +#: community/apps/setting/views/model.py:88 +#: community/apps/setting/views/model.py:103 +#: community/apps/setting/views/model.py:114 +#: community/apps/setting/views/model.py:129 +#: community/apps/setting/views/model.py:141 +#: community/apps/setting/views/model.py:151 +#: community/apps/setting/views/model.py:170 +#: community/apps/setting/views/model.py:180 +#: community/apps/setting/views/model.py:204 +#: community/apps/setting/views/model.py:219 +#: community/apps/setting/views/model.py:239 +#: community/apps/setting/views/model.py:257 +#: community/apps/setting/views/model_apply.py:26 +#: community/apps/setting/views/model_apply.py:36 +#: community/apps/setting/views/model_apply.py:46 +msgid "model" +msgstr "模型设置" + +#: community/apps/setting/views/model.py:42 +#: community/apps/setting/views/model.py:43 +msgid "Download model, trial only with Ollama platform" +msgstr "下载模型,仅支持 Ollama 平台试用" + +#: community/apps/setting/views/model.py:54 +#: community/apps/setting/views/model.py:55 +msgid "Get model list" +msgstr "获取模型列表" + +#: community/apps/setting/views/model.py:71 +#: community/apps/setting/views/model.py:73 +msgid "" +"Query model meta information, this interface does not carry authentication " +"information" +msgstr "查询模型元信息,该接口不携带认证信息" + +#: community/apps/setting/views/model.py:86 +#: community/apps/setting/views/model.py:87 +msgid "Pause model download" +msgstr "下载模型暂停" + +#: community/apps/setting/views/model.py:111 +#: community/apps/setting/views/model.py:112 +msgid "Save model parameter form" +msgstr "保存模型参数表单" + +#: community/apps/setting/views/model.py:126 +#: community/apps/setting/views/model.py:127 +msgid "Update model" +msgstr "更新模型" + +#: community/apps/setting/views/model.py:138 +#: community/apps/setting/views/model.py:139 +msgid "Delete model" +msgstr "删除模型" + +#: community/apps/setting/views/model.py:149 +#: community/apps/setting/views/model.py:150 +msgid "Query model details" +msgstr "查询模型详情" + +#: community/apps/setting/views/model.py:166 +#: community/apps/setting/views/model.py:167 +msgid "Call the supplier function to obtain form data" +msgstr "调用供应商函数,获取表单数据" + +#: community/apps/setting/views/model.py:178 +#: community/apps/setting/views/model.py:179 +msgid "Get a list of model suppliers" +msgstr "获取模型供应商列表" + +#: community/apps/setting/views/model.py:200 +#: community/apps/setting/views/model.py:201 +msgid "Get a list of model types" +msgstr "获取模型类型列表" + +#: community/apps/setting/views/model.py:215 +#: community/apps/setting/views/model.py:216 +#: community/apps/setting/views/model.py:236 +#: community/apps/setting/views/model.py:254 +#: community/apps/setting/views/model.py:255 +msgid "Get the model creation form" +msgstr "获取模型创建表单" + +#: community/apps/setting/views/model.py:235 +msgid "Get model default parameters" +msgstr "获取模型默认参数" + +#: community/apps/setting/views/model_apply.py:23 +#: community/apps/setting/views/model_apply.py:24 +#: community/apps/setting/views/model_apply.py:33 +#: community/apps/setting/views/model_apply.py:34 +msgid "Vectorization documentation" +msgstr "向量化文档" + +#: community/apps/setting/views/model_apply.py:43 +#: community/apps/setting/views/model_apply.py:44 +msgid "Reorder documents" +msgstr "重排序文档" + +#: community/apps/setting/views/system_setting.py:29 +#: community/apps/setting/views/system_setting.py:30 +msgid "Create or update email settings" +msgstr "创建或更新邮箱设置" + +#: community/apps/setting/views/system_setting.py:31 +#: community/apps/setting/views/system_setting.py:45 +#: community/apps/setting/views/system_setting.py:57 +msgid "Email settings" +msgstr "邮箱设置" + +#: community/apps/setting/views/system_setting.py:41 +#: community/apps/setting/views/system_setting.py:42 +msgid "Test email settings" +msgstr "测试邮箱设置" + +#: community/apps/setting/views/system_setting.py:54 +#: community/apps/setting/views/system_setting.py:55 +msgid "Get email settings" +msgstr "获取邮箱设置" + +#: community/apps/setting/views/valid.py:26 +#: community/apps/setting/views/valid.py:27 +msgid "Get verification results" +msgstr "获取认证结果" + +#: community/apps/users/serializers/user_serializers.py:62 +#: community/apps/users/serializers/user_serializers.py:63 +msgid "System version number" +msgstr "系统版本号" + +#: community/apps/users/serializers/user_serializers.py:141 +#: community/apps/users/serializers/user_serializers.py:669 +msgid "Username must be 6-20 characters long" +msgstr "用户名必须是 6-20 个字符长" + +#: community/apps/users/serializers/user_serializers.py:148 +#: community/apps/users/serializers/user_serializers.py:156 +#: community/apps/users/serializers/user_serializers.py:676 +#: community/apps/users/serializers/user_serializers.py:748 +msgid "" +"The password must be 6-20 characters long and must be a combination of " +"letters, numbers, and special characters." +msgstr "密码必须是 6-20 个字符长,且必须是字母、数字和特殊字符的组合" + +#: community/apps/users/serializers/user_serializers.py:151 +#: community/apps/users/serializers/user_serializers.py:212 +#: community/apps/users/serializers/user_serializers.py:213 +#: community/apps/users/serializers/user_serializers.py:300 +#: community/apps/users/serializers/user_serializers.py:347 +#: community/apps/users/serializers/user_serializers.py:348 +#: community/apps/users/serializers/user_serializers.py:749 +#: community/apps/users/serializers/user_serializers.py:765 +#: community/apps/users/serializers/user_serializers.py:766 +msgid "Confirm Password" +msgstr "确认密码" + +#: community/apps/users/serializers/user_serializers.py:158 +#: community/apps/users/serializers/user_serializers.py:214 +#: community/apps/users/serializers/user_serializers.py:215 +#: community/apps/users/serializers/user_serializers.py:229 +#: community/apps/users/serializers/user_serializers.py:257 +#: community/apps/users/serializers/user_serializers.py:258 +#: community/apps/users/serializers/user_serializers.py:291 +#: community/apps/users/serializers/user_serializers.py:344 +#: community/apps/users/serializers/user_serializers.py:345 +#: community/apps/users/views/user.py:107 +#: community/apps/users/views/user.py:108 +msgid "Verification code" +msgstr "验证码" + +#: community/apps/users/serializers/user_serializers.py:232 +#: community/apps/users/serializers/user_serializers.py:259 +#: community/apps/users/serializers/user_serializers.py:360 +#: community/apps/users/serializers/user_serializers.py:439 +msgid "Type" +msgstr "类型" + +#: community/apps/users/serializers/user_serializers.py:236 +#: community/apps/users/serializers/user_serializers.py:362 +msgid "The type only supports register|reset_password" +msgstr "该类型仅支持 register|reset_password" + +#: community/apps/users/serializers/user_serializers.py:266 +msgid "Is it successful" +msgstr "是否成功" + +#: community/apps/users/serializers/user_serializers.py:268 +msgid "Error message" +msgstr "错误信息" + +#: community/apps/users/serializers/user_serializers.py:280 +msgid "language only support:" +msgstr "语言只支持:" + +#: community/apps/users/serializers/user_serializers.py:298 +#: community/apps/users/serializers/user_serializers.py:305 +#: community/apps/users/serializers/user_serializers.py:754 +msgid "" +"The confirmation password must be 6-20 characters long and must be a " +"combination of letters, numbers, and special characters." +msgstr "确认密码长度6-20个字符,必须字母、数字、特殊字符组合" + +#: community/apps/users/serializers/user_serializers.py:380 +#, python-brace-format +msgid "Do not send emails again within {seconds} seconds" +msgstr "{seconds} 秒内请勿重复发送邮件" + +#: community/apps/users/serializers/user_serializers.py:410 +msgid "" +"The email service has not been set up. Please contact the administrator to " +"set up the email service in [Email Settings]." +msgstr "邮箱服务未设置,请联系管理员在【邮箱设置】中设置邮箱服务" + +#: community/apps/users/serializers/user_serializers.py:421 +#, python-brace-format +msgid "【Intelligent knowledge base question and answer system-{action}】" +msgstr "【智能知识库问答系统-{action}】" + +#: community/apps/users/serializers/user_serializers.py:422 +#: community/apps/users/views/user.py:194 +#: community/apps/users/views/user.py:195 +msgid "User registration" +msgstr "用户注册" + +#: community/apps/users/serializers/user_serializers.py:422 +#: community/apps/users/views/user.py:212 +#: community/apps/users/views/user.py:213 +#: community/apps/users/views/user.py:301 +#: community/apps/users/views/user.py:302 +msgid "Change password" +msgstr "修改密码" + +#: community/apps/users/serializers/user_serializers.py:474 +#: community/apps/users/serializers/user_serializers.py:475 +msgid "Permissions" +msgstr "权限列表" + +#: community/apps/users/serializers/user_serializers.py:509 +#: community/apps/users/serializers/user_serializers.py:610 +#: community/apps/users/serializers/user_serializers.py:618 +msgid "Email or username" +msgstr "邮箱或用户名" + +#: community/apps/users/serializers/user_serializers.py:560 +msgid "All" +msgstr "全部" + +#: community/apps/users/serializers/user_serializers.py:561 +msgid "Me" +msgstr "我的" + +#: community/apps/users/serializers/user_serializers.py:583 +#: community/apps/users/serializers/user_serializers.py:680 +#: community/apps/users/serializers/user_serializers.py:705 +#: community/apps/users/serializers/user_serializers.py:719 +#: community/apps/users/serializers/user_serializers.py:736 +msgid "Phone" +msgstr "手机号" + +#: community/apps/users/serializers/user_serializers.py:587 +msgid "Source" +msgstr "来源" + +#: community/apps/users/serializers/user_serializers.py:588 +#: community/apps/users/serializers/user_serializers.py:678 +#: community/apps/users/serializers/user_serializers.py:706 +#: community/apps/users/serializers/user_serializers.py:717 +#: community/apps/users/serializers/user_serializers.py:735 +msgid "Name" +msgstr "名字" + +#: community/apps/users/serializers/user_serializers.py:727 +msgid "Email is already in use" +msgstr "邮箱已被使用" + +#: community/apps/users/serializers/user_serializers.py:808 +msgid "Unable to delete administrator" +msgstr "不能删除管理员" + +#: community/apps/users/serializers/user_serializers.py:845 +msgid "Cannot modify administrator status" +msgstr "不能修改管理员状态" + +#: community/apps/users/views/user.py:37 community/apps/users/views/user.py:38 +msgid "Get MaxKB related information" +msgstr "获取 MaxKB 相关信息" + +#: community/apps/users/views/user.py:40 +msgid "System parameters" +msgstr "系统参数" + +#: community/apps/users/views/user.py:50 community/apps/users/views/user.py:51 +msgid "Get current user information" +msgstr "获取当前用户信息" + +#: community/apps/users/views/user.py:63 community/apps/users/views/user.py:64 +msgid "Get user list" +msgstr "获取用户列表" + +#: community/apps/users/views/user.py:67 community/apps/users/views/user.py:90 +#: community/apps/users/views/user.py:116 +#: community/apps/users/views/user.py:136 +#: community/apps/users/views/user.py:152 +#: community/apps/users/views/user.py:178 +#: community/apps/users/views/user.py:199 +#: community/apps/users/views/user.py:217 +#: community/apps/users/views/user.py:234 +#: community/apps/users/views/user.py:249 +#: community/apps/users/views/user.py:373 +msgid "User" +msgstr "用户" + +#: community/apps/users/views/user.py:79 community/apps/users/views/user.py:80 +msgid "Switch Language" +msgstr "切换语言" + +#: community/apps/users/views/user.py:101 +#: community/apps/users/views/user.py:102 +msgid "Modify current user password" +msgstr "修改当前用户密码" + +#: community/apps/users/views/user.py:125 +msgid "Failed to change password" +msgstr "修改密码失败" + +#: community/apps/users/views/user.py:133 +#: community/apps/users/views/user.py:134 +msgid "Send email to current user" +msgstr "给当前用户发送邮件" + +#: community/apps/users/views/user.py:149 +#: community/apps/users/views/user.py:150 +msgid "Sign out" +msgstr "登出" + +#: community/apps/users/views/user.py:205 +msgid "Registration successful" +msgstr "注册成功" + +#: community/apps/users/views/user.py:229 +#: community/apps/users/views/user.py:230 +msgid "Check whether the verification code is correct" +msgstr "检查验证码是否正确" + +#: community/apps/users/views/user.py:244 +#: community/apps/users/views/user.py:245 +msgid "Send email" +msgstr "发送邮件" + +#: community/apps/users/views/user.py:262 +#: community/apps/users/views/user.py:263 +msgid "Add user" +msgstr "添加用户" + +#: community/apps/users/views/user.py:266 +#: community/apps/users/views/user.py:282 +#: community/apps/users/views/user.py:306 +#: community/apps/users/views/user.py:324 +#: community/apps/users/views/user.py:338 +#: community/apps/users/views/user.py:354 +msgid "User management" +msgstr "用户管理" + +#: community/apps/users/views/user.py:280 +#: community/apps/users/views/user.py:281 +msgid "Get user paginated list" +msgstr "获取用户分页列表" + +#: community/apps/users/views/user.py:320 +#: community/apps/users/views/user.py:321 +msgid "Delete user" +msgstr "删除用户" + +#: community/apps/users/views/user.py:334 +#: community/apps/users/views/user.py:335 +msgid "Get user information" +msgstr "获取用户信息" + +#: community/apps/users/views/user.py:349 +#: community/apps/users/views/user.py:350 +msgid "Update user information" +msgstr "更新用户信息" + +#: community/apps/users/views/user.py:369 +#: community/apps/users/views/user.py:370 +msgid "Get user list by type" +msgstr "按类型获取用户列表" + +#~ msgid "MaxKB table template.csv" +#~ msgstr "MaxKB表格模版.csv" + +#~ msgid "MaxKB table template.xlsx" +#~ msgstr "MaxKB表格模版.xlsx" + +msgid "Fail" +msgstr "失败" + +msgid "Menu" +msgstr "操作菜单" + +msgid "Operate" +msgstr "操作" + +msgid "Operate user" +msgstr "操作用户" + +msgid "Ip Address" +msgstr "IP地址" + +msgid "API Details" +msgstr "API详情" + +msgid "Operate Time" +msgstr "操作时间" + +msgid "System Settings/API Key" +msgstr "系统 API Key" + +msgid "Appearance Settings" +msgstr "外观设置" + +msgid "Conversation Log" +msgstr "对话日志" + +msgid "login authentication" +msgstr "登录认证" + +msgid "Paragraph" +msgstr "段落" + +msgid "Batch generate related" +msgstr "分段生成问题" + +msgid "Application access" +msgstr "应用接入" + +msgid "Add internal function" +msgstr "添加内置函数" + +msgid "Batch generate related documents" +msgstr "批量生成问题" + +msgid "No permission to use this function {name}" +msgstr "无权使用此模型 {name}" + +msgid "Function {name} is unavailable" +msgstr "函数{name} 不可用" + +msgid "Field: {name} Type: {_type} Value: {value} Type error" +msgstr "字段: {name} 类型: {_type} 值: {value} 类型错误" + +msgid "Field: {name} Type: {_type} Value: {value} Unsupported types" +msgstr "字段: {name} 类型: {_type} 值: {value} 不支持的类型" + +msgid "Field: {name} No value set" +msgstr "字段: {name} 未设置值" + +msgid "Generate related" +msgstr "生成问题" + +msgid "Obtain graphical captcha" +msgstr "获取图形验证码" + +msgid "Captcha code error or expiration" +msgstr "验证码错误或过期" + +msgid "captcha" +msgstr "验证码" \ No newline at end of file diff --git a/apps/locales/zh_Hant/LC_MESSAGES/django.po b/apps/locales/zh_Hant/LC_MESSAGES/django.po new file mode 100644 index 00000000000..8bf746a89c8 --- /dev/null +++ b/apps/locales/zh_Hant/LC_MESSAGES/django.po @@ -0,0 +1,7675 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-03-20 14:22+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: apps/xpack/auth/user_key.py:26 +#: apps/xpack/serializers/license_serializers.py:96 +#: apps/xpack/serializers/license_tools.py:109 +msgid "The license is invalid" +msgstr "License 無效" + +#: apps/xpack/auth/user_key.py:32 apps/xpack/auth/user_key.py:34 +msgid "secret_key is invalid" +msgstr "secret key無效" + +#: apps/xpack/middleware/swagger_middleware.py:19 +msgid "The license has not been uploaded or the license has expired" +msgstr "License 未上傳或 License 已過期" + +#: apps/xpack/serializers/application_setting_serializer.py:20 +msgid "theme color" +msgstr "主題顏色" + +#: apps/xpack/serializers/application_setting_serializer.py:22 +msgid "header font color" +msgstr "頭部字體顏色" + +#: apps/xpack/serializers/application_setting_serializer.py:26 +msgid "float location type" +msgstr "浮窗位置類型" + +#: apps/xpack/serializers/application_setting_serializer.py:27 +msgid "float location value" +msgstr "浮窗位置值" + +#: apps/xpack/serializers/application_setting_serializer.py:31 +msgid "float location x" +msgstr "浮窗位置 x" + +#: apps/xpack/serializers/application_setting_serializer.py:32 +msgid "float location y" +msgstr "浮窗位置 y" + +#: apps/xpack/serializers/application_setting_serializer.py:36 +#: apps/xpack/swagger_api/application_setting_api.py:23 +msgid "show source" +msgstr "是否顯示來源" + +#: apps/xpack/serializers/application_setting_serializer.py:37 +#: community/apps/application/serializers/application_serializers.py:354 +#: community/apps/application/swagger_api/application_api.py:169 +#: community/apps/application/swagger_api/application_api.py:170 +#: community/apps/users/serializers/user_serializers.py:273 +#: community/apps/users/views/user.py:85 community/apps/users/views/user.py:86 +msgid "language" +msgstr "語言" + +#: apps/xpack/serializers/application_setting_serializer.py:38 +#: apps/xpack/swagger_api/application_setting_api.py:30 +msgid "show history" +msgstr "是否顯示歷史記錄" + +#: apps/xpack/serializers/application_setting_serializer.py:39 +#: apps/xpack/swagger_api/application_setting_api.py:37 +msgid "draggable" +msgstr "是否可拖動" + +#: apps/xpack/serializers/application_setting_serializer.py:40 +#: apps/xpack/swagger_api/application_setting_api.py:44 +msgid "show guide" +msgstr "是否顯示引導圖" + +#: apps/xpack/serializers/application_setting_serializer.py:41 +#: apps/xpack/swagger_api/application_setting_api.py:51 +msgid "avatar" +msgstr "頭像" + +#: apps/xpack/serializers/application_setting_serializer.py:42 +msgid "avatar url" +msgstr "頭像地址" + +#: apps/xpack/serializers/application_setting_serializer.py:43 +#: apps/xpack/swagger_api/application_setting_api.py:86 +msgid "user avatar" +msgstr "用戶頭像" + +#: apps/xpack/serializers/application_setting_serializer.py:44 +msgid "user avatar url" +msgstr "用戶頭像地址" + +#: apps/xpack/serializers/application_setting_serializer.py:45 +#: apps/xpack/swagger_api/application_setting_api.py:58 +msgid "float icon" +msgstr "浮窗圖標" + +#: apps/xpack/serializers/application_setting_serializer.py:46 +msgid "float icon url" +msgstr "浮窗圖標地址" + +#: apps/xpack/serializers/application_setting_serializer.py:47 +#: apps/xpack/swagger_api/application_setting_api.py:65 +msgid "disclaimer" +msgstr "免責聲明" + +#: apps/xpack/serializers/application_setting_serializer.py:48 +#: apps/xpack/swagger_api/application_setting_api.py:72 +msgid "disclaimer value" +msgstr "免責聲明的值" + +#: apps/xpack/serializers/application_setting_serializer.py:70 +#: apps/xpack/serializers/dataset_lark_serializer.py:373 +#: community/apps/dataset/serializers/dataset_serializers.py:548 +msgid "application id" +msgstr "應用 id" + +#: apps/xpack/serializers/application_setting_serializer.py:96 +#: apps/xpack/serializers/platform_serializer.py:83 +#: apps/xpack/serializers/platform_serializer.py:105 +#: apps/xpack/serializers/platform_serializer.py:174 +#: apps/xpack/serializers/platform_serializer.py:185 +#: community/apps/application/serializers/application_serializers.py:1237 +#: community/apps/application/serializers/chat_message_serializers.py:424 +#: community/apps/application/serializers/chat_serializers.py:294 +#: community/apps/application/serializers/chat_serializers.py:396 +msgid "Application does not exist" +msgstr "應用不存在" + +#: apps/xpack/serializers/application_setting_serializer.py:116 +msgid "Float location field type error" +msgstr "浮窗位置字段類型錯誤" + +#: apps/xpack/serializers/application_setting_serializer.py:122 +msgid "Custom theme field type error" +msgstr "自定義主題字段類型錯誤" + +#: apps/xpack/serializers/auth_config_serializer.py:19 +msgid "LDAP server cannot be empty" +msgstr "LDAP 服務器不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:20 +msgid "Base DN cannot be empty" +msgstr "Base DN 不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:21 +msgid "Password cannot be empty" +msgstr "密碼不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:22 +msgid "OU cannot be empty" +msgstr "OU 不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:23 +msgid "LDAP filter cannot be empty" +msgstr "LDAP 過濾器不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:24 +msgid "LDAP mapping cannot be empty" +msgstr "LDAP 映射不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:29 +msgid "Authorization address cannot be empty" +msgstr "授權地址不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:31 +msgid "Token address cannot be empty" +msgstr "令牌地址不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:33 +msgid "User information address cannot be empty" +msgstr "用戶信息地址不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:34 +msgid "Scope cannot be empty" +msgstr "Scope 不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:35 +msgid "Client ID cannot be empty" +msgstr "Client ID 不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:36 +msgid "Client secret cannot be empty" +msgstr "Client secret 不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:38 +msgid "Redirect address cannot be empty" +msgstr "重定向地址不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:39 +msgid "Field mapping cannot be empty" +msgstr "字段映射不能爲空" + +#: apps/xpack/serializers/auth_config_serializer.py:166 +#: apps/xpack/serializers/qr_login/qr_login.py:33 +#: community/apps/users/serializers/user_serializers.py:89 +msgid "The user has been disabled, please contact the administrator!" +msgstr "用戶已被禁用,請聯繫管理員!" + +#: apps/xpack/serializers/cas.py:32 +msgid "HttpClient query failed: " +msgstr "HttpClient 查詢失敗:" + +#: apps/xpack/serializers/cas.py:56 +msgid "CAS authentication failed" +msgstr "CAS 認證失敗" + +#: apps/xpack/serializers/channel/chat_manage.py:76 +#: apps/xpack/serializers/channel/chat_manage.py:134 +msgid "" +"Sorry, no relevant content was found. Please re-describe your problem or " +"provide more information. " +msgstr "抱歉,沒有找到相關內容。請重新描述您的問題或提供更多信息。" + +#: apps/xpack/serializers/channel/chat_manage.py:82 +msgid "Think: " +msgstr "思考過程: " + +#: apps/xpack/serializers/channel/chat_manage.py:85 +#: apps/xpack/serializers/channel/chat_manage.py:87 +msgid "AI reply: " +msgstr "AI 回覆: " + +#: apps/xpack/serializers/channel/chat_manage.py:298 +msgid "Thinking, please wait a moment!" +msgstr "正在思考中,請稍等......" + +#: apps/xpack/serializers/channel/ding_talk.py:19 +#: apps/xpack/serializers/channel/wechat.py:89 +#: apps/xpack/serializers/channel/wechat.py:130 +#: apps/xpack/serializers/channel/wecom.py:76 +#: apps/xpack/serializers/channel/wecom.py:259 +msgid "The corresponding platform configuration was not found" +msgstr "對應的平臺配置未找到" + +#: apps/xpack/serializers/channel/ding_talk.py:27 +#: apps/xpack/serializers/channel/feishu.py:112 +msgid "Currently only text messages are supported" +msgstr "目前只支持文本消息" + +#: apps/xpack/serializers/channel/ding_talk.py:91 +#: apps/xpack/serializers/channel/wechat.py:161 +#: apps/xpack/serializers/channel/wecom.py:189 +msgid "Image download failed, check network" +msgstr "圖片下載失敗,檢查網絡" + +#: apps/xpack/serializers/channel/ding_talk.py:92 +#: apps/xpack/serializers/channel/wechat.py:159 +#: apps/xpack/serializers/channel/wecom.py:185 +msgid "Please analyze the content of the image." +msgstr "請分析圖片內容。" + +#: apps/xpack/serializers/channel/ding_talk.py:95 +#, python-brace-format +msgid "DingTalk application: {user}" +msgstr "釘釘應用:{user}" + +#: apps/xpack/serializers/channel/ding_talk.py:106 +#: apps/xpack/serializers/channel/ding_talk.py:151 +msgid "Content generated by AI" +msgstr "內容由 AI 生成" + +#: apps/xpack/serializers/channel/feishu.py:87 +#: apps/xpack/serializers/channel/feishu.py:107 +msgid "Lark application: " +msgstr "飛書應用:" + +#: apps/xpack/serializers/channel/slack.py:116 +#| msgid "The corresponding platform configuration was not found" +msgid "The corresponding platform configuration for Slack was not found" +msgstr "未找到Slack的對應平臺配置" + +#: apps/xpack/serializers/channel/slack.py:206 +msgid "Thinking..." +msgstr "思考中..." + +#: apps/xpack/serializers/channel/slack.py:321 +msgid "Invalid json format." +msgstr "json格式無效。" + +#: apps/xpack/serializers/channel/slack.py:327 +#| msgid "Invalid access_token" +msgid "Invalid Slack request" +msgstr "Slack請求無效" + +#: apps/xpack/serializers/channel/slack.py:335 +#| msgid "DingTalk application: {user}" +msgid "Slack application: {user}" +msgstr "Slack 應用:{user}" + +#: apps/xpack/serializers/channel/slack.py:471 +msgid "Stop" +msgstr "停止" + +#: apps/xpack/serializers/channel/wechat.py:141 +#, python-brace-format +msgid "WeChat Official Account: {account}" +msgstr "微信公衆號:{account}" + +#: apps/xpack/serializers/channel/wechat.py:148 +#: apps/xpack/serializers/channel/wecom.py:171 +#: apps/xpack/serializers/channel/wecom.py:175 +msgid "" +"The app does not enable the speech-to-text function or the speech-to-text " +"function fails." +msgstr "應用未開啓語音轉文字功能或語音轉文字功能失敗。" + +#: apps/xpack/serializers/channel/wechat.py:187 +msgid "Message types not supported yet" +msgstr "暫時不支持該類型的消息" + +#: apps/xpack/serializers/channel/wechat.py:194 +msgid "Welcome to subscribe" +msgstr "歡迎訂閱" + +#: apps/xpack/serializers/channel/wecom.py:84 +msgid "Enterprise WeChat user: " +msgstr "企業微信用戶:" + +#: apps/xpack/serializers/channel/wecom.py:95 +msgid "Enterprise WeChat customer service: " +msgstr "企業微信客服:" + +#: apps/xpack/serializers/channel/wecom.py:132 +#: apps/xpack/serializers/channel/wecom.py:148 +msgid "This type of message is not supported yet" +msgstr "暫時不支持該類型的消息" + +#: apps/xpack/serializers/channel/wecom.py:254 +msgid "Signature missing" +msgstr "簽名缺失" + +#: apps/xpack/serializers/channel/wecom.py:266 +#: apps/xpack/serializers/channel/wecom.py:273 +#, python-brace-format +msgid "An error occurred while processing the GET request {e}" +msgstr "GET 請求處理時發生錯誤 {e}" + +#: apps/xpack/serializers/dataset_lark_serializer.py:58 +#: community/apps/dataset/serializers/dataset_serializers.py:82 +#: community/apps/dataset/serializers/dataset_serializers.py:214 +#: community/apps/dataset/serializers/dataset_serializers.py:295 +#: community/apps/dataset/serializers/dataset_serializers.py:296 +#: community/apps/dataset/serializers/dataset_serializers.py:357 +#: community/apps/dataset/serializers/dataset_serializers.py:358 +#: community/apps/dataset/serializers/dataset_serializers.py:502 +#: community/apps/dataset/serializers/dataset_serializers.py:503 +#: community/apps/dataset/serializers/dataset_serializers.py:568 +#: community/apps/dataset/serializers/dataset_serializers.py:607 +#: community/apps/dataset/serializers/dataset_serializers.py:701 +#: community/apps/dataset/serializers/dataset_serializers.py:933 +#: community/apps/dataset/serializers/dataset_serializers.py:934 +#: community/apps/dataset/serializers/document_serializers.py:816 +#: community/apps/function_lib/serializers/function_lib_serializer.py:141 +#: community/apps/function_lib/serializers/function_lib_serializer.py:186 +#: community/apps/function_lib/serializers/function_lib_serializer.py:203 +#: community/apps/function_lib/serializers/function_lib_serializer.py:262 +#: community/apps/setting/serializers/provider_serializers.py:76 +#: community/apps/setting/serializers/provider_serializers.py:127 +#: community/apps/setting/serializers/provider_serializers.py:174 +#: community/apps/setting/serializers/provider_serializers.py:256 +#: community/apps/setting/serializers/provider_serializers.py:277 +#: community/apps/setting/serializers/provider_serializers.py:301 +#: community/apps/setting/serializers/team_serializers.py:42 +#: community/apps/users/serializers/user_serializers.py:272 +msgid "user id" +msgstr "用戶 id" + +#: apps/xpack/serializers/dataset_lark_serializer.py:61 +#: apps/xpack/serializers/dataset_lark_serializer.py:112 +#: apps/xpack/serializers/dataset_lark_serializer.py:113 +#: apps/xpack/serializers/dataset_lark_serializer.py:367 +#: community/apps/dataset/serializers/dataset_serializers.py:137 +#: community/apps/dataset/serializers/dataset_serializers.py:201 +#: community/apps/dataset/serializers/dataset_serializers.py:221 +#: community/apps/dataset/serializers/dataset_serializers.py:244 +#: community/apps/dataset/serializers/dataset_serializers.py:273 +#: community/apps/dataset/serializers/dataset_serializers.py:274 +#: community/apps/dataset/serializers/dataset_serializers.py:291 +#: community/apps/dataset/serializers/dataset_serializers.py:292 +#: community/apps/dataset/serializers/dataset_serializers.py:319 +#: community/apps/dataset/serializers/dataset_serializers.py:353 +#: community/apps/dataset/serializers/dataset_serializers.py:354 +#: community/apps/dataset/serializers/dataset_serializers.py:382 +#: community/apps/dataset/serializers/dataset_serializers.py:383 +#: community/apps/dataset/serializers/dataset_serializers.py:498 +#: community/apps/dataset/serializers/dataset_serializers.py:499 +#: community/apps/dataset/serializers/dataset_serializers.py:527 +#: community/apps/dataset/serializers/dataset_serializers.py:528 +#: community/apps/dataset/serializers/dataset_serializers.py:542 +#: community/apps/dataset/serializers/dataset_serializers.py:907 +#: community/apps/dataset/serializers/dataset_serializers.py:908 +#: community/apps/dataset/serializers/dataset_serializers.py:929 +#: community/apps/dataset/serializers/dataset_serializers.py:930 +msgid "dataset name" +msgstr "知識庫名稱" + +#: apps/xpack/serializers/dataset_lark_serializer.py:63 +#: apps/xpack/serializers/dataset_lark_serializer.py:114 +#: apps/xpack/serializers/dataset_lark_serializer.py:115 +#: apps/xpack/serializers/dataset_lark_serializer.py:369 +#: community/apps/dataset/serializers/dataset_serializers.py:142 +#: community/apps/dataset/serializers/dataset_serializers.py:206 +#: community/apps/dataset/serializers/dataset_serializers.py:226 +#: community/apps/dataset/serializers/dataset_serializers.py:249 +#: community/apps/dataset/serializers/dataset_serializers.py:278 +#: community/apps/dataset/serializers/dataset_serializers.py:279 +#: community/apps/dataset/serializers/dataset_serializers.py:293 +#: community/apps/dataset/serializers/dataset_serializers.py:294 +#: community/apps/dataset/serializers/dataset_serializers.py:324 +#: community/apps/dataset/serializers/dataset_serializers.py:355 +#: community/apps/dataset/serializers/dataset_serializers.py:356 +#: community/apps/dataset/serializers/dataset_serializers.py:384 +#: community/apps/dataset/serializers/dataset_serializers.py:385 +#: community/apps/dataset/serializers/dataset_serializers.py:500 +#: community/apps/dataset/serializers/dataset_serializers.py:501 +#: community/apps/dataset/serializers/dataset_serializers.py:529 +#: community/apps/dataset/serializers/dataset_serializers.py:530 +#: community/apps/dataset/serializers/dataset_serializers.py:544 +#: community/apps/dataset/serializers/dataset_serializers.py:909 +#: community/apps/dataset/serializers/dataset_serializers.py:910 +#: community/apps/dataset/serializers/dataset_serializers.py:931 +#: community/apps/dataset/serializers/dataset_serializers.py:932 +msgid "dataset description" +msgstr "知識庫描述" + +#: apps/xpack/serializers/dataset_lark_serializer.py:65 +#: apps/xpack/serializers/dataset_lark_serializer.py:118 +#: apps/xpack/serializers/dataset_lark_serializer.py:377 +msgid "app id" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:66 +#: apps/xpack/serializers/dataset_lark_serializer.py:119 +#: apps/xpack/serializers/dataset_lark_serializer.py:120 +#: apps/xpack/serializers/dataset_lark_serializer.py:378 +msgid "app secret" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:67 +#: apps/xpack/serializers/dataset_lark_serializer.py:121 +#: apps/xpack/serializers/dataset_lark_serializer.py:122 +#: apps/xpack/serializers/dataset_lark_serializer.py:132 +#: apps/xpack/serializers/dataset_lark_serializer.py:165 +#: apps/xpack/serializers/dataset_lark_serializer.py:379 +msgid "folder token" +msgstr "" + +#: apps/xpack/serializers/dataset_lark_serializer.py:69 +#: apps/xpack/serializers/dataset_lark_serializer.py:116 +#: apps/xpack/serializers/dataset_lark_serializer.py:117 +#: community/apps/dataset/serializers/dataset_serializers.py:231 +#: community/apps/dataset/serializers/dataset_serializers.py:254 +#: community/apps/dataset/serializers/dataset_serializers.py:330 +#: community/apps/dataset/serializers/dataset_serializers.py:386 +#: community/apps/dataset/serializers/dataset_serializers.py:387 +#: community/apps/dataset/serializers/dataset_serializers.py:531 +#: community/apps/dataset/serializers/dataset_serializers.py:532 +msgid "embedding mode" +msgstr "向量模型" + +#: apps/xpack/serializers/dataset_lark_serializer.py:79 +#: apps/xpack/serializers/dataset_lark_serializer.py:389 +msgid "Network error or folder token error!" +msgstr "網絡錯誤或資料夾token錯誤!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:87 +#: apps/xpack/serializers/dataset_lark_serializer.py:444 +#: community/apps/dataset/serializers/dataset_serializers.py:424 +#: community/apps/dataset/serializers/dataset_serializers.py:476 +#: community/apps/dataset/serializers/dataset_serializers.py:865 +msgid "Knowledge base name duplicate!" +msgstr "知識庫名稱重複!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:130 +#: apps/xpack/serializers/dataset_lark_serializer.py:164 +#: apps/xpack/serializers/dataset_lark_serializer.py:201 +#: apps/xpack/serializers/dataset_lark_serializer.py:221 +#: apps/xpack/serializers/dataset_lark_serializer.py:346 +#: apps/xpack/serializers/dataset_lark_serializer.py:363 +#: community/apps/common/swagger_api/common_api.py:68 +#: community/apps/common/swagger_api/common_api.py:69 +#: community/apps/dataset/serializers/dataset_serializers.py:84 +#: community/apps/dataset/serializers/dataset_serializers.py:93 +#: community/apps/dataset/serializers/dataset_serializers.py:605 +#: community/apps/dataset/serializers/dataset_serializers.py:688 +#: community/apps/dataset/serializers/dataset_serializers.py:699 +#: community/apps/dataset/serializers/dataset_serializers.py:955 +#: community/apps/dataset/serializers/document_serializers.py:169 +#: community/apps/dataset/serializers/document_serializers.py:286 +#: community/apps/dataset/serializers/document_serializers.py:407 +#: community/apps/dataset/serializers/document_serializers.py:573 +#: community/apps/dataset/serializers/document_serializers.py:1055 +#: community/apps/dataset/serializers/document_serializers.py:1216 +#: community/apps/dataset/serializers/paragraph_serializers.py:96 +#: community/apps/dataset/serializers/paragraph_serializers.py:162 +#: community/apps/dataset/serializers/paragraph_serializers.py:195 +#: community/apps/dataset/serializers/paragraph_serializers.py:196 +#: community/apps/dataset/serializers/paragraph_serializers.py:208 +#: community/apps/dataset/serializers/paragraph_serializers.py:266 +#: community/apps/dataset/serializers/paragraph_serializers.py:285 +#: community/apps/dataset/serializers/paragraph_serializers.py:302 +#: community/apps/dataset/serializers/paragraph_serializers.py:459 +#: community/apps/dataset/serializers/paragraph_serializers.py:567 +#: community/apps/dataset/serializers/paragraph_serializers.py:638 +#: community/apps/dataset/serializers/paragraph_serializers.py:647 +#: community/apps/dataset/serializers/paragraph_serializers.py:715 +#: community/apps/dataset/serializers/paragraph_serializers.py:716 +#: community/apps/dataset/serializers/paragraph_serializers.py:732 +#: community/apps/dataset/serializers/problem_serializers.py:87 +#: community/apps/dataset/serializers/problem_serializers.py:112 +#: community/apps/dataset/serializers/problem_serializers.py:135 +#: community/apps/dataset/serializers/problem_serializers.py:192 +#: community/apps/dataset/swagger_api/problem_api.py:28 +#: community/apps/dataset/swagger_api/problem_api.py:29 +#: community/apps/dataset/swagger_api/problem_api.py:77 +#: community/apps/dataset/swagger_api/problem_api.py:96 +#: community/apps/dataset/swagger_api/problem_api.py:149 +#: community/apps/dataset/swagger_api/problem_api.py:177 +msgid "dataset id" +msgstr "知識庫 id" + +#: apps/xpack/serializers/dataset_lark_serializer.py:145 +#: apps/xpack/serializers/dataset_lark_serializer.py:146 +#: apps/xpack/serializers/dataset_lark_serializer.py:212 +#: community/apps/dataset/serializers/document_serializers.py:812 +#: community/apps/dataset/serializers/document_serializers.py:813 +#: community/apps/setting/swagger_api/provide_api.py:22 +#: community/apps/setting/swagger_api/provide_api.py:48 +#: community/apps/setting/swagger_api/provide_api.py:49 +#: community/apps/setting/swagger_api/provide_api.py:76 +#: community/apps/setting/swagger_api/provide_api.py:77 +#: community/apps/setting/swagger_api/provide_api.py:143 +#: community/apps/setting/swagger_api/provide_api.py:144 +msgid "name" +msgstr "名稱" + +#: apps/xpack/serializers/dataset_lark_serializer.py:147 +#: apps/xpack/serializers/dataset_lark_serializer.py:148 +#: apps/xpack/serializers/dataset_lark_serializer.py:211 +#: community/apps/application/serializers/application_serializers.py:257 +msgid "token" +msgstr "token" + +#: apps/xpack/serializers/dataset_lark_serializer.py:149 +#: apps/xpack/serializers/dataset_lark_serializer.py:150 +#: apps/xpack/serializers/dataset_lark_serializer.py:210 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:26 +#: community/apps/dataset/serializers/document_serializers.py:229 +#: community/apps/function_lib/serializers/function_lib_serializer.py:72 +#: community/apps/function_lib/swagger_api/function_lib_api.py:92 +#: community/apps/function_lib/swagger_api/function_lib_api.py:138 +#: community/apps/function_lib/swagger_api/function_lib_api.py:184 +#: community/apps/setting/serializers/team_serializers.py:59 +#: community/apps/setting/serializers/team_serializers.py:74 +#: community/apps/setting/serializers/team_serializers.py:85 +#: community/apps/setting/serializers/valid_serializers.py:37 +msgid "type" +msgstr "類型" + +#: apps/xpack/serializers/dataset_lark_serializer.py:151 +#: apps/xpack/serializers/dataset_lark_serializer.py:152 +#| msgid "id does not exist" +msgid "is exist" +msgstr "ID 不存在" + +#: apps/xpack/serializers/dataset_lark_serializer.py:173 +#: apps/xpack/serializers/dataset_lark_serializer.py:230 +#: apps/xpack/task/sync.py:120 +#| msgid "Knowledge base id" +msgid "Knowledge base not found!" +msgstr "知識庫不存在!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:185 +#: apps/xpack/serializers/dataset_lark_serializer.py:252 +msgid "Failed to get lark document list!" +msgstr "獲取飛書檔案清單失敗!" + +#: apps/xpack/serializers/dataset_lark_serializer.py:262 +#: community/apps/common/swagger_api/common_api.py:70 +#: community/apps/common/swagger_api/common_api.py:71 +#: community/apps/dataset/serializers/document_serializers.py:293 +#: community/apps/dataset/serializers/document_serializers.py:386 +#: community/apps/dataset/serializers/document_serializers.py:490 +#: community/apps/dataset/serializers/document_serializers.py:572 +#: community/apps/dataset/serializers/document_serializers.py:581 +#: community/apps/dataset/serializers/document_serializers.py:586 +#: community/apps/dataset/serializers/document_serializers.py:854 +#: community/apps/dataset/serializers/document_serializers.py:982 +#: community/apps/dataset/serializers/document_serializers.py:1191 +#: community/apps/dataset/serializers/paragraph_serializers.py:98 +#: community/apps/dataset/serializers/paragraph_serializers.py:167 +#: community/apps/dataset/serializers/paragraph_serializers.py:212 +#: community/apps/dataset/serializers/paragraph_serializers.py:271 +#: community/apps/dataset/serializers/paragraph_serializers.py:286 +#: community/apps/dataset/serializers/paragraph_serializers.py:303 +#: community/apps/dataset/serializers/paragraph_serializers.py:426 +#: community/apps/dataset/serializers/paragraph_serializers.py:431 +#: community/apps/dataset/serializers/paragraph_serializers.py:462 +#: community/apps/dataset/serializers/paragraph_serializers.py:570 +#: community/apps/dataset/serializers/paragraph_serializers.py:642 +#: community/apps/dataset/serializers/paragraph_serializers.py:650 +#: community/apps/dataset/serializers/paragraph_serializers.py:682 +#: community/apps/dataset/serializers/paragraph_serializers.py:717 +#: community/apps/dataset/serializers/paragraph_serializers.py:718 +#: community/apps/dataset/serializers/paragraph_serializers.py:733 +#: community/apps/dataset/serializers/problem_serializers.py:58 +#: community/apps/dataset/swagger_api/problem_api.py:64 +msgid "document id" +msgstr "文檔 id" + +#: apps/xpack/serializers/dataset_lark_serializer.py:269 +#: apps/xpack/serializers/dataset_lark_serializer.py:289 +#: community/apps/dataset/serializers/document_serializers.py:497 +#: community/apps/dataset/serializers/document_serializers.py:593 +#: community/apps/dataset/serializers/document_serializers.py:1197 +msgid "document id not exist" +msgstr "文檔 id 不存在" + +#: apps/xpack/serializers/dataset_lark_serializer.py:271 +#| msgid "Synchronization is only supported for web site types" +msgid "Synchronization is only supported for lark documents" +msgstr "僅支持飛書文檔的同步" + +#: apps/xpack/serializers/dataset_lark_serializer.py:374 +#: community/apps/dataset/serializers/dataset_serializers.py:549 +#: community/apps/dataset/serializers/dataset_serializers.py:914 +#: community/apps/dataset/serializers/dataset_serializers.py:915 +msgid "application id list" +msgstr "應用 id 列表" + +#: apps/xpack/serializers/dataset_lark_serializer.py:416 +#: community/apps/dataset/serializers/dataset_serializers.py:175 +#: community/apps/dataset/serializers/dataset_serializers.py:837 +#: community/apps/function_lib/serializers/function_lib_serializer.py:125 +#: community/apps/function_lib/swagger_api/function_lib_api.py:119 +#: community/apps/function_lib/swagger_api/function_lib_api.py:120 +#: community/apps/function_lib/swagger_api/function_lib_api.py:165 +#: community/apps/function_lib/swagger_api/function_lib_api.py:166 +#: community/apps/setting/swagger_api/provide_api.py:81 +msgid "permission" +msgstr "權限" + +#: apps/xpack/serializers/dataset_lark_serializer.py:463 +#: community/apps/dataset/serializers/dataset_serializers.py:884 +#, python-brace-format +msgid "Unknown application id {dataset_id}, cannot be associated" +msgstr "未知的應用id {dataset_id},無法關聯" + +#: apps/xpack/serializers/license_serializers.py:52 +msgid "license file" +msgstr "License 文件" + +#: apps/xpack/serializers/license_tools.py:134 +msgid "License usage limit exceeded." +msgstr "超出許可證使用限制。" + +#: apps/xpack/serializers/license_tools.py:158 +msgid "The network is busy, try again later." +msgstr "網絡繁忙,請稍後再試。" + +#: apps/xpack/serializers/oauth2.py:79 apps/xpack/serializers/oauth2.py:82 +msgid "Failed to obtain user information" +msgstr "獲取用戶信息失敗" + +#: apps/xpack/serializers/operate_log.py:36 +#: community/apps/application/serializers/application_statistics_serializers.py:27 +#: community/apps/application/serializers/chat_serializers.py:116 +#: community/apps/application/swagger_api/application_statistics_api.py:26 +msgid "Start time" +msgstr "開始時間" + +#: apps/xpack/serializers/operate_log.py:37 +#: community/apps/application/serializers/application_statistics_serializers.py:28 +#: community/apps/application/serializers/chat_serializers.py:117 +#: community/apps/application/swagger_api/application_statistics_api.py:31 +#: community/apps/application/swagger_api/chat_api.py:270 +msgid "End time" +msgstr "結束時間" + +#: apps/xpack/serializers/operate_log.py:38 +#: apps/xpack/swagger_api/operate_log.py:17 +#: apps/xpack/swagger_api/operate_log.py:18 +#: apps/xpack/swagger_api/operate_log.py:45 +#: apps/xpack/swagger_api/operate_log.py:46 +msgid "menu" +msgstr "選單" + +#: apps/xpack/serializers/operate_log.py:39 +#: apps/xpack/swagger_api/operate_log.py:20 +#: apps/xpack/swagger_api/operate_log.py:21 +#: apps/xpack/swagger_api/operate_log.py:48 +#: apps/xpack/swagger_api/operate_log.py:49 +#| msgid "Temperature" +msgid "operate" +msgstr "操作" + +#: apps/xpack/serializers/operate_log.py:40 +#: apps/xpack/swagger_api/operate_log.py:51 +#: apps/xpack/swagger_api/operate_log.py:52 +#| msgid "user id" +msgid "user" +msgstr "用戶" + +#: apps/xpack/serializers/operate_log.py:41 +#: apps/xpack/swagger_api/operate_log.py:54 +#: apps/xpack/swagger_api/operate_log.py:55 +#: community/apps/dataset/serializers/document_serializers.py:417 +msgid "status" +msgstr "狀態" + +#: apps/xpack/serializers/operate_log.py:42 +#: apps/xpack/swagger_api/operate_log.py:57 +#: apps/xpack/swagger_api/operate_log.py:58 + +#| msgid "Forum address" +msgid "ip_address" +msgstr "論壇地址" + +#: apps/xpack/serializers/platform_serializer.py:14 +msgid "app_id is required" +msgstr "app_id 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:15 +msgid "app_secret is required" +msgstr "app_secret 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:16 +msgid "token is required" +msgstr "token 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:17 +msgid "callback_url is required" +msgstr "回調地址是必填項" + +#: apps/xpack/serializers/platform_serializer.py:23 +#: apps/xpack/serializers/platform_serializer.py:32 +msgid "App ID is required" +msgstr "App ID 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:24 +#: apps/xpack/serializers/platform_source_serializer.py:24 +msgid "Agent ID is required" +msgstr "Agent ID 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:25 +msgid "Secret is required" +msgstr "Secret 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:26 +msgid "Token is required" +msgstr "Token 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:28 +#: apps/xpack/serializers/platform_serializer.py:36 +#: apps/xpack/serializers/platform_serializer.py:42 +#: apps/xpack/serializers/platform_serializer.py:48 +#: apps/xpack/serializers/platform_source_serializer.py:19 +msgid "Callback URL is required" +msgstr "Callback URL 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:33 +#: apps/xpack/serializers/platform_source_serializer.py:18 +msgid "App Secret is required" +msgstr "App Secret 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:35 +msgid "Verification Token is required" +msgstr "Verification Token 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:40 +msgid "Client ID is required" +msgstr "Client ID 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:41 +msgid "Client Secret is required" +msgstr "Client Secret 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:46 +#| msgid "Client Secret is required" +msgid "Signing Secret is required" +msgstr "Signing Secret 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:47 +#| msgid "Token is required" +msgid "Bot User Token is required" +msgstr "Bot User Token 是必填項" + +#: apps/xpack/serializers/platform_serializer.py:68 +msgid "Check if the fields are correct" +msgstr "檢查字段是否正確" + +#: apps/xpack/serializers/platform_serializer.py:114 +#: apps/xpack/views/platform.py:85 apps/xpack/views/platform.py:101 +#: community/apps/application/flow/step_node/application_node/i_application_node.py:13 +#: community/apps/application/serializers/application_serializers.py:335 +#: community/apps/application/serializers/application_serializers.py:581 +#: community/apps/application/serializers/application_serializers.py:696 +#: community/apps/application/serializers/application_serializers.py:791 +#: community/apps/application/serializers/application_serializers.py:1230 +#: community/apps/application/serializers/application_serializers.py:1272 +#: community/apps/application/serializers/application_statistics_serializers.py:26 +#: community/apps/application/serializers/application_version_serializers.py:35 +#: community/apps/application/serializers/application_version_serializers.py:59 +#: community/apps/application/serializers/chat_message_serializers.py:207 +#: community/apps/application/serializers/chat_message_serializers.py:270 +#: community/apps/application/serializers/chat_serializers.py:77 +#: community/apps/application/serializers/chat_serializers.py:102 +#: community/apps/application/serializers/chat_serializers.py:119 +#: community/apps/application/serializers/chat_serializers.py:287 +#: community/apps/application/serializers/chat_serializers.py:363 +#: community/apps/application/serializers/chat_serializers.py:440 +#: community/apps/application/swagger_api/application_api.py:87 +#: community/apps/application/swagger_api/application_api.py:101 +#: community/apps/application/swagger_api/application_api.py:112 +#: community/apps/application/swagger_api/application_api.py:143 +#: community/apps/application/swagger_api/application_api.py:392 +#: community/apps/application/swagger_api/application_api.py:413 +#: community/apps/application/swagger_api/application_api.py:424 +#: community/apps/application/swagger_api/application_statistics_api.py:21 +#: community/apps/application/swagger_api/application_version_api.py:42 +#: community/apps/application/swagger_api/application_version_api.py:56 +#: community/apps/application/swagger_api/chat_api.py:23 +#: community/apps/application/swagger_api/chat_api.py:33 +#: community/apps/application/swagger_api/chat_api.py:167 +#: community/apps/application/swagger_api/chat_api.py:168 +#: community/apps/application/swagger_api/chat_api.py:199 +#: community/apps/application/swagger_api/chat_api.py:222 +#: community/apps/application/swagger_api/chat_api.py:249 +#: community/apps/application/swagger_api/chat_api.py:281 +#: community/apps/application/swagger_api/chat_api.py:350 +#: community/apps/application/swagger_api/chat_api.py:410 +#: community/apps/application/swagger_api/chat_api.py:427 +#: community/apps/application/swagger_api/chat_api.py:460 +#: community/apps/application/views/chat_views.py:477 +msgid "Application ID" +msgstr "應用 ID" + +#: apps/xpack/serializers/platform_serializer.py:116 +msgid "Platform type, for example: wechat" +msgstr "平臺類型,例如:wechat" + +#: apps/xpack/serializers/platform_serializer.py:125 +#: apps/xpack/serializers/platform_serializer.py:126 +msgid "Platform type" +msgstr "平臺類型" + +#: apps/xpack/serializers/platform_serializer.py:128 +msgid "Status" +msgstr "狀態" + +#: apps/xpack/serializers/platform_serializer.py:138 +#: apps/xpack/serializers/platform_serializer.py:139 +msgid "Configuration information" +msgstr "配置信息" + +#: apps/xpack/serializers/platform_serializer.py:191 +#, python-brace-format +msgid "The platform configuration corresponding to {type} was not found" +msgstr "平臺配置 {type} 未找到" + +#: apps/xpack/serializers/platform_source_serializer.py:23 +#: apps/xpack/serializers/platform_source_serializer.py:32 +msgid "Corp ID is required" +msgstr "Corp ID 是必填項" + +#: apps/xpack/serializers/platform_source_serializer.py:28 +#: apps/xpack/serializers/platform_source_serializer.py:33 +msgid "App Key is required" +msgstr "App Key 是必填項" + +#: apps/xpack/serializers/platform_source_serializer.py:78 +msgid "Configuration information is wrong and failed to save" +msgstr "配置信息錯誤,保存失敗" + +#: apps/xpack/serializers/platform_source_serializer.py:104 +msgid "Connection failed" +msgstr "連接失敗" + +#: apps/xpack/serializers/platform_source_serializer.py:123 +msgid "Platform does not exist" +msgstr "平臺不存在" + +#: apps/xpack/serializers/platform_source_serializer.py:134 +#| msgid "Unsupported file format" +msgid "Unsupported platform type" +msgstr "不支持的平臺類型" + +#: apps/xpack/serializers/qr_login/qr_login.py:28 +msgid "Team" +msgstr "團隊成员" + +#: apps/xpack/serializers/system_params_serializers.py:63 +msgid "theme" +msgstr "主題" + +#: apps/xpack/serializers/system_params_serializers.py:70 +msgid "website icon" +msgstr "網站圖標" + +#: apps/xpack/serializers/system_params_serializers.py:77 +msgid "login logo" +msgstr "登錄logo" + +#: apps/xpack/serializers/system_params_serializers.py:84 +msgid "Login background image" +msgstr "登錄背景圖" + +#: apps/xpack/serializers/system_params_serializers.py:91 +msgid "website title" +msgstr "網站標題" + +#: apps/xpack/serializers/system_params_serializers.py:98 +msgid "website slogan" +msgstr "網站標語" + +#: apps/xpack/serializers/system_params_serializers.py:105 +msgid "Show user manual" +msgstr "是否顯示用戶手冊" + +#: apps/xpack/serializers/system_params_serializers.py:112 +msgid "User manual address" +msgstr "用戶手冊地址" + +#: apps/xpack/serializers/system_params_serializers.py:119 +msgid "Show forum" +msgstr "是否顯示論壇" + +#: apps/xpack/serializers/system_params_serializers.py:126 +msgid "Forum address" +msgstr "論壇地址" + +#: apps/xpack/serializers/system_params_serializers.py:133 +msgid "Show project" +msgstr "是否顯示項目" + +#: apps/xpack/serializers/system_params_serializers.py:140 +msgid "Project address" +msgstr "項目地址" + +#: apps/xpack/serializers/tools.py:58 +#, python-brace-format +msgid "" +"Thinking about 【{question}】...If you want me to continue answering, please " +"reply {trigger_message}" +msgstr "" +"思考中【{question}】...如果您希望我繼續回答,請回復“ {trigger_message} ”。" + +#: apps/xpack/serializers/tools.py:158 +msgid "" +"\n" +" ------------\n" +"[To be continued, reply \"Continue to answer the question]" +msgstr "" +"\n" +" ------------\n" +"【未完待續,回覆“問題繼續回答】" + +#: apps/xpack/serializers/tools.py:238 +#, python-brace-format +msgid "" +"To be continued, reply \"{trigger_message}\" to continue answering the " +"question" +msgstr "【未完待續,回覆“{trigger_message}” 或 問題繼續回答】" + +#: apps/xpack/swagger_api/application_setting_api.py:79 +msgid "Custom theme {theme_color: , header_font_color: }" +msgstr "自定義主題 {theme_color:, header_font_color: }" + +#: apps/xpack/swagger_api/application_setting_api.py:93 +msgid "Float location {top: 0, left: 0}" +msgstr "浮窗位置 {top: 0, left: 0}" + +#: apps/xpack/swagger_api/application_setting_api.py:101 +#: apps/xpack/swagger_api/application_setting_api.py:102 +#: apps/xpack/swagger_api/auth_api.py:10 apps/xpack/swagger_api/auth_api.py:11 +#: apps/xpack/swagger_api/auth_api.py:81 apps/xpack/swagger_api/auth_api.py:82 +msgid "Authentication configuration" +msgstr "認證配置" + +#: apps/xpack/swagger_api/application_setting_api.py:106 +#: apps/xpack/swagger_api/application_setting_api.py:107 +#: apps/xpack/swagger_api/auth_api.py:15 apps/xpack/swagger_api/auth_api.py:16 +#: apps/xpack/swagger_api/auth_api.py:30 apps/xpack/swagger_api/auth_api.py:87 +#: apps/xpack/swagger_api/auth_api.py:88 apps/xpack/views/auth.py:27 +#: apps/xpack/views/auth.py:28 +msgid "Authentication type" +msgstr "認證類型" + +#: apps/xpack/swagger_api/application_setting_api.py:109 +#: apps/xpack/swagger_api/application_setting_api.py:110 +#: apps/xpack/swagger_api/auth_api.py:18 apps/xpack/swagger_api/auth_api.py:19 +#: apps/xpack/swagger_api/auth_api.py:93 apps/xpack/swagger_api/auth_api.py:94 +msgid "Configuration" +msgstr "配置" + +#: apps/xpack/swagger_api/application_setting_api.py:112 +#: apps/xpack/swagger_api/application_setting_api.py:113 +#: apps/xpack/swagger_api/auth_api.py:21 apps/xpack/swagger_api/auth_api.py:22 +#: community/apps/common/swagger_api/common_api.py:72 +#: community/apps/common/swagger_api/common_api.py:73 +#: community/apps/dataset/serializers/document_serializers.py:819 +#: community/apps/dataset/serializers/document_serializers.py:820 +#: community/apps/dataset/serializers/document_serializers.py:838 +#: community/apps/dataset/serializers/document_serializers.py:839 +#: community/apps/dataset/serializers/paragraph_serializers.py:57 +#: community/apps/dataset/serializers/paragraph_serializers.py:71 +#: community/apps/dataset/serializers/paragraph_serializers.py:719 +#: community/apps/dataset/serializers/paragraph_serializers.py:720 +#: community/apps/dataset/swagger_api/problem_api.py:130 +#: community/apps/function_lib/serializers/function_lib_serializer.py:110 +#: community/apps/function_lib/serializers/function_lib_serializer.py:129 +#: community/apps/function_lib/serializers/function_lib_serializer.py:139 +#: community/apps/function_lib/swagger_api/function_lib_api.py:121 +#: community/apps/function_lib/swagger_api/function_lib_api.py:122 +#: community/apps/function_lib/swagger_api/function_lib_api.py:167 +#: community/apps/function_lib/swagger_api/function_lib_api.py:168 +#: community/apps/setting/serializers/team_serializers.py:46 +#: community/apps/users/serializers/user_serializers.py:473 +#: community/apps/users/serializers/user_serializers.py:496 +#: community/apps/users/serializers/user_serializers.py:584 +#: community/apps/users/serializers/user_serializers.py:585 +#: community/apps/users/serializers/user_serializers.py:721 +#: community/apps/users/serializers/user_serializers.py:737 +#: community/apps/users/serializers/user_serializers.py:738 +msgid "Is active" +msgstr "是否可用" + +#: apps/xpack/swagger_api/auth_api.py:37 +#| msgid "Form Configuration" +msgid "Wecom configuration" +msgstr "企業微信配置" + +#: apps/xpack/swagger_api/auth_api.py:38 +#| msgid "Get function details" +msgid "Wecom configuration details" +msgstr "企業微信配置詳情" + +#: apps/xpack/swagger_api/auth_api.py:40 apps/xpack/swagger_api/auth_api.py:53 +msgid "Corp ID" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:41 +msgid "Agent ID" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:42 apps/xpack/swagger_api/auth_api.py:55 +#: apps/xpack/swagger_api/auth_api.py:67 +#| msgid "App Secret is required" +msgid "App Secret" +msgstr "App Secret 是必填項" + +#: apps/xpack/swagger_api/auth_api.py:43 apps/xpack/swagger_api/auth_api.py:56 +#: apps/xpack/swagger_api/auth_api.py:68 +#| msgid "Callback URL is required" +msgid "Callback URL" +msgstr "Callback URL 是必填項" + +#: apps/xpack/swagger_api/auth_api.py:50 +#| msgid "Configuration" +msgid "Dingtalk configuration" +msgstr "釘釘配置" + +#: apps/xpack/swagger_api/auth_api.py:51 +#| msgid "Get application details" +msgid "Dingtalk configuration details" +msgstr "釘釘應用詳情" + +#: apps/xpack/swagger_api/auth_api.py:54 apps/xpack/swagger_api/auth_api.py:66 +msgid "App Key" +msgstr "" + +#: apps/xpack/swagger_api/auth_api.py:63 +#| msgid "Form Configuration" +msgid "Feishu configuration" +msgstr "飛書配置" + +#: apps/xpack/swagger_api/auth_api.py:64 +#| msgid "Get function details" +msgid "Feishu configuration details" +msgstr "飛書配置詳情" + +#: apps/xpack/swagger_api/license_api.py:22 +msgid "license status" +msgstr "License 狀態" + +#: apps/xpack/swagger_api/license_api.py:24 +msgid "" +"License status, possible values are: valid, invalid, expired, which " +"respectively represent: valid, invalid, expired" +msgstr "" +"license狀態,可能值爲:valid、invalid、expired,分別代表:有效、無效、已過期" + +#: apps/xpack/swagger_api/license_api.py:26 +msgid "license details" +msgstr "License 詳情" + +#: apps/xpack/swagger_api/license_api.py:30 +msgid "customer name" +msgstr "客戶名稱" + +#: apps/xpack/swagger_api/license_api.py:31 +msgid "customer name. For example: *** company." +msgstr "客戶名稱。例如:***公司。" + +#: apps/xpack/swagger_api/license_api.py:33 +msgid "independent software vendor" +msgstr "獨立軟件供應商。" + +#: apps/xpack/swagger_api/license_api.py:35 +msgid "" +"Independent Software Vendor. For example: *** Company, suitable for the " +"embedded version of the product." +msgstr "獨立軟件供應商。例如:***公司,適用於產品的嵌入式版本。" + +#: apps/xpack/swagger_api/license_api.py:37 +msgid "Authorization deadline." +msgstr "授權截止時間" + +#: apps/xpack/swagger_api/license_api.py:39 +msgid "" +"Authorization deadline. For example: 2020-12-31, this license will expire on " +"2021-01-01." +msgstr "授權截止時間。例如:2020-12-31,此license將在2021-01-01到期。" + +#: apps/xpack/swagger_api/license_api.py:41 +msgid "product name." +msgstr "產品名稱" + +#: apps/xpack/swagger_api/license_api.py:43 +msgid "Product name. For example: JumpServer, CMP, etc." +msgstr "產品名稱。例如:CMP、KO、JS、MS。" + +#: apps/xpack/swagger_api/license_api.py:45 +msgid "product version." +msgstr "產品版本" + +#: apps/xpack/swagger_api/license_api.py:47 +msgid "Product version. For example: JumpServer 2.0, CMP 1.0, etc." +msgstr "產品版本。例如:Standard、Enterprise,代表標準版、企業版。" + +#: apps/xpack/swagger_api/license_api.py:49 +msgid "license version." +msgstr "License 版本" + +#: apps/xpack/swagger_api/license_api.py:51 +msgid "License version. For example: 1.0, 2.0, etc." +msgstr "License版本。例如:1.0、2.0、3.0等。" + +#: apps/xpack/swagger_api/license_api.py:53 +msgid "authorization quantity." +msgstr "認證數量" + +#: apps/xpack/swagger_api/license_api.py:55 +msgid "" +"Authorization quantity. For example: 100, this license can be used by 100 " +"users." +msgstr "授權數量。例如:cmp授權的cpu數量,或JS授權的資產數量。" + +#: apps/xpack/swagger_api/license_api.py:57 +msgid "Serial number, the unique identifier of the License." +msgstr "序列號,License唯一標識。" + +#: apps/xpack/swagger_api/license_api.py:59 +msgid "" +"Serial number, the unique identifier of the license. The customer support " +"portal will save the serial number after generating the license. If the " +"serial number is not recorded in the customer support portal, the license " +"will be regarded as an unknown source." +msgstr "" +"序列號,License唯一標識。客戶支持門戶生成License後會保存序列號,如果序列號在" +"客戶支持門戶中沒有記錄,則此License將被視爲未知來源。" + +#: apps/xpack/swagger_api/license_api.py:61 +msgid "remarks" +msgstr "備註" + +#: apps/xpack/swagger_api/license_api.py:63 +msgid "" +"Remarks, record additional information, length limit is 50. For example, a " +"customer purchases two identical JumpServer subscriptions and uses them in " +"different computer rooms respectively. You can use this field to note the A " +"computer room and B computer room to help distinguish the licenses." +msgstr "" +"備註,記錄額外的信息,長度限制50。例如某個客戶買了兩個同樣的JumpServer訂閱分" +"別在不同機房使用,可以用這個字段備註A機房B機房,幫助區別License。" + +#: apps/xpack/swagger_api/operate_log.py:12 +#: apps/xpack/swagger_api/operate_log.py:13 +#: apps/xpack/swagger_api/operate_log.py:38 +#: apps/xpack/swagger_api/operate_log.py:39 apps/xpack/views/operate_log.py:24 +#: apps/xpack/views/operate_log.py:36 +msgid "Operate log" +msgstr "操作日誌" + +#: apps/xpack/swagger_api/operate_log.py:23 +#: apps/xpack/swagger_api/operate_log.py:24 +msgid "menu_label" +msgstr "操作選單" + +#: apps/xpack/swagger_api/operate_log.py:26 +#: apps/xpack/swagger_api/operate_log.py:27 +msgid "operate_label" +msgstr "操作" + +#: apps/xpack/swagger_api/operate_log.py:42 +#: apps/xpack/swagger_api/operate_log.py:43 +#: community/apps/dataset/serializers/dataset_serializers.py:104 +msgid "id" +msgstr "" + +#: apps/xpack/swagger_api/operate_log.py:60 +#: apps/xpack/swagger_api/operate_log.py:61 +#| msgid "license details" +msgid "details" +msgstr "詳情" + +#: apps/xpack/views/application_setting_views.py:22 +#: apps/xpack/views/application_setting_views.py:23 +#| msgid "Pro/Modify Application Settings" +msgid "Modify Application Settings" +msgstr "修改應用显示設置" + +#: apps/xpack/views/application_setting_views.py:24 +#: apps/xpack/views/application_setting_views.py:40 +msgid "Pro/Application/Public Access" +msgstr "專業版/應用/公共訪問" + +#: apps/xpack/views/application_setting_views.py:37 +#: apps/xpack/views/application_setting_views.py:38 +#| msgid "Pro/Get Application Settings" +msgid "Get Application Settings" +msgstr "獲取應用詳情" + +#: apps/xpack/views/auth.py:29 +msgid "Authentication" +msgstr "認證" + +#: apps/xpack/views/auth.py:40 apps/xpack/views/auth.py:41 +msgid "Add or modify authentication configuration" +msgstr "添加或修改認證信息" + +#: apps/xpack/views/auth.py:44 apps/xpack/views/auth.py:58 +#: apps/xpack/views/auth.py:72 +msgid "System settings/login authentication" +msgstr "系統設置/登錄認證" + +#: apps/xpack/views/auth.py:55 apps/xpack/views/auth.py:56 +msgid "Get authentication configuration" +msgstr "獲取認證配置" + +#: apps/xpack/views/auth.py:69 apps/xpack/views/auth.py:70 +msgid "test connection" +msgstr "測試連接" + +#: apps/xpack/views/auth.py:96 apps/xpack/views/auth.py:97 +#: community/apps/users/views/user.py:173 +#: community/apps/users/views/user.py:174 +msgid "Log in" +msgstr "登錄" + +#: apps/xpack/views/auth.py:101 apps/xpack/views/auth.py:114 +#: apps/xpack/views/auth.py:130 apps/xpack/views/auth.py:146 +#: apps/xpack/views/auth.py:207 apps/xpack/views/auth.py:224 +#: apps/xpack/views/auth.py:242 apps/xpack/views/auth.py:260 +#: apps/xpack/views/auth.py:278 apps/xpack/views/auth.py:296 +msgid "Three-party login" +msgstr "三方登錄" + +#: apps/xpack/views/auth.py:111 apps/xpack/views/auth.py:112 +msgid "CAS login" +msgstr "CAS 登錄" + +#: apps/xpack/views/auth.py:127 apps/xpack/views/auth.py:128 +msgid "OIDC login" +msgstr "OIDC 登錄" + +#: apps/xpack/views/auth.py:143 apps/xpack/views/auth.py:144 +msgid "OAuth2 login" +msgstr "OAuth2 登錄" + +#: apps/xpack/views/auth.py:160 apps/xpack/views/auth.py:161 +#: apps/xpack/views/auth.py:162 apps/xpack/views/auth.py:170 +#: apps/xpack/views/auth.py:194 apps/xpack/views/auth.py:195 +#: apps/xpack/views/auth.py:196 +msgid "Get platform information" +msgstr "獲取平臺信息" + +#: apps/xpack/views/auth.py:167 apps/xpack/views/auth.py:168 +msgid "Modify platform information" +msgstr "修改平臺信息" + +#: apps/xpack/views/auth.py:175 apps/xpack/views/auth.py:176 +#: apps/xpack/views/auth.py:178 +msgid "Test platform connection" +msgstr "測試平臺連接" + +#: apps/xpack/views/auth.py:185 apps/xpack/views/auth.py:186 +msgid "Scan code login type" +msgstr "掃碼登錄類型" + +#: apps/xpack/views/auth.py:187 +msgid "Scan code to log in" +msgstr "掃碼登錄" + +#: apps/xpack/views/auth.py:204 apps/xpack/views/auth.py:205 +msgid "DingTalk callback" +msgstr "釘釘回調" + +#: apps/xpack/views/auth.py:221 apps/xpack/views/auth.py:222 +#| msgid "DingTalk callback" +msgid "DingTalk OAuth2 callback" +msgstr "釘釘回調" + +#: apps/xpack/views/auth.py:239 apps/xpack/views/auth.py:240 +msgid "Lark callback" +msgstr "飛書回調" + +#: apps/xpack/views/auth.py:257 apps/xpack/views/auth.py:258 +#| msgid "Lark callback" +msgid "Lark OAuth2 callback" +msgstr "飛書回調" + +#: apps/xpack/views/auth.py:275 apps/xpack/views/auth.py:276 +msgid "Wecom callback" +msgstr "企業微信回調" + +#: apps/xpack/views/auth.py:293 apps/xpack/views/auth.py:294 +#| msgid "Wecom callback" +msgid "Wecom OAuth2 callback" +msgstr "企業微信回調" + +#: apps/xpack/views/dataset_lark_views.py:22 +#: apps/xpack/views/dataset_lark_views.py:23 +#| msgid "Create a knowledge base" +msgid "Create a lark knowledge base" +msgstr "創建知識庫" + +#: apps/xpack/views/dataset_lark_views.py:26 +#: apps/xpack/views/dataset_lark_views.py:40 +#: community/apps/dataset/views/dataset.py:39 +#: community/apps/dataset/views/dataset.py:62 +#: community/apps/dataset/views/dataset.py:82 +#: community/apps/dataset/views/dataset.py:98 +#: community/apps/dataset/views/dataset.py:109 +#: community/apps/dataset/views/dataset.py:123 +#: community/apps/dataset/views/dataset.py:137 +#: community/apps/dataset/views/dataset.py:157 +#: community/apps/dataset/views/dataset.py:172 +#: community/apps/dataset/views/dataset.py:187 +#: community/apps/dataset/views/dataset.py:202 +#: community/apps/dataset/views/dataset.py:217 +#: community/apps/dataset/views/dataset.py:231 +#: community/apps/dataset/views/dataset.py:250 +msgid "Knowledge Base" +msgstr "知識庫" + +#: apps/xpack/views/dataset_lark_views.py:36 +#: apps/xpack/views/dataset_lark_views.py:37 +#| msgid "Create a knowledge base" +msgid "Update the lark knowledge base" +msgstr "更新知識庫" + +#: apps/xpack/views/dataset_lark_views.py:53 +#: apps/xpack/views/dataset_lark_views.py:54 +#| msgid "Get a list of applications available in the knowledge base" +msgid "Get the list of documents in the lark knowledge base" +msgstr "獲取知識庫中文檔列表" + +#: apps/xpack/views/dataset_lark_views.py:57 +#: apps/xpack/views/dataset_lark_views.py:74 +#: apps/xpack/views/dataset_lark_views.py:90 +#: apps/xpack/views/dataset_lark_views.py:110 +#: community/apps/dataset/views/document.py:34 +#: community/apps/dataset/views/document.py:47 +#: community/apps/dataset/views/document.py:62 +#: community/apps/dataset/views/document.py:81 +#: community/apps/dataset/views/document.py:102 +#: community/apps/dataset/views/document.py:123 +#: community/apps/dataset/views/document.py:137 +#: community/apps/dataset/views/document.py:158 +#: community/apps/dataset/views/document.py:178 +#: community/apps/dataset/views/document.py:193 +#: community/apps/dataset/views/document.py:208 +#: community/apps/dataset/views/document.py:224 +#: community/apps/dataset/views/document.py:244 +#: community/apps/dataset/views/document.py:265 +#: community/apps/dataset/views/document.py:284 +#: community/apps/dataset/views/document.py:306 +#: community/apps/dataset/views/document.py:324 +#: community/apps/dataset/views/document.py:349 +#: community/apps/dataset/views/document.py:364 +#: community/apps/dataset/views/document.py:380 +#: community/apps/dataset/views/document.py:396 +#: community/apps/dataset/views/document.py:413 +#: community/apps/dataset/views/document.py:429 +#: community/apps/dataset/views/document.py:442 +#: community/apps/dataset/views/document.py:467 +msgid "Knowledge Base/Documentation" +msgstr "知識庫/文檔" + +#: apps/xpack/views/dataset_lark_views.py:70 +#: apps/xpack/views/dataset_lark_views.py:71 +#| msgid "Create a knowledge base" +msgid "Import documents to the lark knowledge base" +msgstr "導入文檔到知識庫" + +#: apps/xpack/views/dataset_lark_views.py:86 +#: apps/xpack/views/dataset_lark_views.py:87 +#| msgid "Create document" +msgid "Synchronize lark document" +msgstr "同步飞书文檔" + +#: apps/xpack/views/dataset_lark_views.py:104 +#: apps/xpack/views/dataset_lark_views.py:105 +#| msgid "Batch sync documents" +msgid "Batch sync lark documents" +msgstr "批量同步飞书文檔" + +#: apps/xpack/views/display.py:17 apps/xpack/views/display.py:18 +msgid "View appearance settings" +msgstr "查看外觀設置" + +#: apps/xpack/views/display.py:19 apps/xpack/views/display.py:33 +msgid "System Settings/Appearance Settings" +msgstr "系統設置/外觀設置" + +#: apps/xpack/views/display.py:30 apps/xpack/views/display.py:31 +msgid "Update appearance settings" +msgstr "更新外觀設置" + +#: apps/xpack/views/license.py:29 apps/xpack/views/license.py:30 +msgid "Get license information" +msgstr "獲取 License 信息" + +#: apps/xpack/views/license.py:38 apps/xpack/views/license.py:39 +msgid "Update license information" +msgstr "更新 License 信息" + +#: apps/xpack/views/license.py:44 +msgid "upload file" +msgstr "上傳文件" + +#: apps/xpack/views/operate_log.py:21 apps/xpack/views/operate_log.py:22 +#| msgid "Get model parameter form" +msgid "Get menu operate log" +msgstr "獲取菜單操作日誌" + +#: apps/xpack/views/operate_log.py:33 apps/xpack/views/operate_log.py:34 +#| msgid "Get model parameter form" +msgid "Get operate log" +msgstr "獲取操作日誌" + +#: apps/xpack/views/platform.py:56 apps/xpack/views/platform.py:57 +msgid "Get platform configuration" +msgstr "獲取平臺配置" + +#: apps/xpack/views/platform.py:59 apps/xpack/views/platform.py:67 +msgid "Application/application access" +msgstr "應用/應用訪問" + +#: apps/xpack/views/platform.py:63 apps/xpack/views/platform.py:64 +msgid "Update platform configuration" +msgstr "更新平臺配置" + +#: apps/xpack/views/platform.py:80 apps/xpack/views/platform.py:81 +msgid "Get platform status" +msgstr "獲取平臺狀態" + +#: apps/xpack/views/platform.py:86 +msgid "Application/Get platform status" +msgstr "應用/獲取平臺狀態" + +#: apps/xpack/views/platform.py:96 apps/xpack/views/platform.py:97 +msgid "Update platform status" +msgstr "更新平臺狀態" + +#: apps/xpack/views/platform.py:103 +msgid "Application/Update platform status" +msgstr "應用/更新平臺狀態" + +#: apps/xpack/views/system_api_key_views.py:28 +#: apps/xpack/views/system_api_key_views.py:29 +msgid "Get personal system API_KEY list" +msgstr "獲取個人系統 API_KEY 列表" + +#: apps/xpack/views/system_api_key_views.py:30 +#: apps/xpack/views/system_api_key_views.py:39 +#: apps/xpack/views/system_api_key_views.py:53 +#: apps/xpack/views/system_api_key_views.py:62 +msgid "Personal system/API_KEY" +msgstr "個人系統/API_KEY" + +#: apps/xpack/views/system_api_key_views.py:37 +#: apps/xpack/views/system_api_key_views.py:38 +msgid "Update personal system API_KEY" +msgstr "更新個人系統 API_KEY" + +#: apps/xpack/views/system_api_key_views.py:51 +#: apps/xpack/views/system_api_key_views.py:52 +msgid "Delete personal system API_KEY" +msgstr "刪除個人系統 API_KEY" + +#: apps/xpack/views/system_api_key_views.py:60 +#: apps/xpack/views/system_api_key_views.py:61 +msgid "Add personal system API_KEY" +msgstr "添加個人系統 API_KEY" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:27 +msgid "Model type error" +msgstr "模型類型錯誤" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:37 +#: community/apps/common/field/common.py:21 +#: community/apps/common/field/common.py:34 +msgid "Message type error" +msgstr "消息類型錯誤" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:56 +msgid "Conversation list" +msgstr "對話列表" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:57 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:30 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:19 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:13 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:13 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:19 +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:13 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:13 +#: community/apps/application/serializers/application_serializers.py:72 +#: community/apps/application/serializers/chat_serializers.py:365 +#: community/apps/application/swagger_api/application_api.py:53 +#: community/apps/application/swagger_api/application_api.py:185 +#: community/apps/application/swagger_api/application_api.py:186 +#: community/apps/application/swagger_api/application_api.py:334 +#: community/apps/application/swagger_api/application_api.py:335 +msgid "Model id" +msgstr "模型 id" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:59 +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:30 +msgid "Paragraph List" +msgstr "段落列表" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:61 +#: community/apps/application/serializers/chat_message_serializers.py:201 +#: community/apps/application/serializers/chat_message_serializers.py:253 +#: community/apps/application/serializers/chat_serializers.py:76 +#: community/apps/application/serializers/chat_serializers.py:240 +#: community/apps/application/serializers/chat_serializers.py:439 +#: community/apps/application/serializers/chat_serializers.py:531 +#: community/apps/application/serializers/chat_serializers.py:587 +#: community/apps/application/serializers/chat_serializers.py:613 +#: community/apps/application/serializers/chat_serializers.py:672 +#: community/apps/application/serializers/chat_serializers.py:712 +#: community/apps/application/swagger_api/chat_api.py:38 +#: community/apps/application/swagger_api/chat_api.py:76 +#: community/apps/application/swagger_api/chat_api.py:171 +#: community/apps/application/swagger_api/chat_api.py:172 +#: community/apps/application/swagger_api/chat_api.py:286 +#: community/apps/application/swagger_api/chat_api.py:355 +#: community/apps/application/swagger_api/chat_api.py:432 +#: community/apps/application/swagger_api/chat_api.py:465 +#: community/apps/application/views/chat_views.py:482 +msgid "Conversation ID" +msgstr "對話 ID" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:63 +#: community/apps/application/flow/step_node/application_node/i_application_node.py:15 +#: community/apps/application/serializers/chat_message_serializers.py:254 +#: community/apps/application/serializers/chat_serializers.py:240 +msgid "User Questions" +msgstr "用戶問題" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:66 +msgid "Post-processor" +msgstr "後置處理器" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:69 +msgid "Completion Question" +msgstr "補全問題" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:71 +#: community/apps/application/serializers/chat_message_serializers.py:203 +msgid "Streaming Output" +msgstr "流式輸出" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:72 +#: community/apps/application/serializers/chat_message_serializers.py:208 +#: community/apps/application/serializers/chat_message_serializers.py:271 +#: community/apps/application/serializers/chat_serializers.py:103 +msgid "Client id" +msgstr "客戶端 id" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:73 +#: community/apps/application/serializers/chat_message_serializers.py:209 +#: community/apps/application/serializers/chat_message_serializers.py:272 +msgid "Client Type" +msgstr "客戶端類型" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:76 +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:46 +#: community/apps/application/swagger_api/application_api.py:262 +msgid "No reference segment settings" +msgstr "未查詢到引用分段" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:78 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:31 +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:48 +#: community/apps/application/serializers/application_serializers.py:70 +#: community/apps/application/serializers/application_serializers.py:511 +#: community/apps/application/serializers/application_serializers.py:582 +#: community/apps/application/serializers/application_serializers.py:627 +#: community/apps/application/serializers/application_serializers.py:697 +#: community/apps/application/serializers/application_serializers.py:718 +#: community/apps/application/serializers/application_serializers.py:792 +#: community/apps/application/serializers/application_serializers.py:1228 +#: community/apps/application/serializers/chat_serializers.py:118 +#: community/apps/application/serializers/chat_serializers.py:285 +#: community/apps/application/serializers/chat_serializers.py:338 +#: community/apps/application/serializers/chat_serializers.py:360 +#: community/apps/function_lib/serializers/function_lib_serializer.py:332 +#: community/apps/function_lib/serializers/function_lib_serializer.py:358 +#: community/apps/function_lib/serializers/function_lib_serializer.py:387 +msgid "User ID" +msgstr "用戶 ID" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:81 +#| msgid "Model parameter settings" +msgid "Model settings" +msgstr "模型參數設置" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:84 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:31 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:29 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:27 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:27 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:19 +#: community/apps/application/serializers/chat_serializers.py:382 +msgid "Model parameter settings" +msgstr "模型參數設置" + +#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:91 +msgid "message type error" +msgstr "消息類型錯誤" + +#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:226 +#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:271 +msgid "" +"Sorry, the AI model is not configured. Please go to the application to set " +"up the AI model first." +msgstr "抱歉,沒有配置 AI 模型,請先去應用中設置 AI 模型。" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:27 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:25 +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:25 +#: community/apps/application/serializers/chat_serializers.py:579 +msgid "question" +msgstr "問題" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:33 +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:28 +msgid "History Questions" +msgstr "歷史對答" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:35 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:25 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:21 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:18 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:24 +#: community/apps/application/swagger_api/application_api.py:55 +#: community/apps/application/swagger_api/application_api.py:56 +#: community/apps/application/swagger_api/application_api.py:188 +#: community/apps/application/swagger_api/application_api.py:189 +#: community/apps/application/swagger_api/application_api.py:337 +#: community/apps/application/swagger_api/application_api.py:338 +msgid "Number of multi-round conversations" +msgstr "多輪對話數量" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:38 +msgid "Maximum length of the knowledge base paragraph" +msgstr "最大攜帶知識庫段落長度" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:40 +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:22 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:16 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:22 +#: community/apps/application/serializers/application_serializers.py:108 +#: community/apps/application/serializers/application_serializers.py:138 +#: community/apps/application/swagger_api/application_api.py:286 +#: community/apps/application/swagger_api/application_api.py:287 +msgid "Prompt word" +msgstr "提示詞" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:42 +#: community/apps/application/swagger_api/application_api.py:300 +#: community/apps/application/swagger_api/application_api.py:301 +msgid "System prompt words (role)" +msgstr "系統提示詞(角色)" + +#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:44 +msgid "Completion problem" +msgstr "補齊問題" + +#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:34 +#: community/apps/application/serializers/application_serializers.py:237 +msgid "Question completion prompt" +msgstr "問題補全提示詞" + +#: community/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py:20 +#: community/apps/application/serializers/chat_message_serializers.py:99 +#: community/apps/application/swagger_api/application_api.py:210 +#: community/apps/application/swagger_api/application_api.py:355 +#, python-brace-format +msgid "" +"() contains the user's question. Answer the guessed user's question based on " +"the context ({question}) Requirement: Output a complete question and put it " +"in the tag" +msgstr "" +"()裏面是用戶問題,根據上下文回答揣測用戶問題({question}) 要求: 輸出一個補全問" +"題,並且放在標籤中" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:28 +msgid "System completes question text" +msgstr "系統補全問題文本" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:31 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:39 +msgid "Dataset id list" +msgstr "知識庫 ID 列表" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:34 +msgid "List of document ids to exclude" +msgstr "要排除的文檔 ID 列表" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:37 +msgid "List of exclusion vector ids" +msgstr "排除向量 ID 列表" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:40 +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:21 +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:24 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:24 +#: community/apps/application/serializers/application_serializers.py:121 +#: community/apps/application/serializers/chat_serializers.py:243 +#: community/apps/application/swagger_api/application_api.py:249 +#: community/apps/application/swagger_api/application_api.py:250 +msgid "Reference segment number" +msgstr "引用分段數" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:43 +#: community/apps/application/swagger_api/application_api.py:252 +#: community/apps/application/swagger_api/application_api.py:253 +msgid "Similarity" +msgstr "相似度" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:46 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:30 +#: community/apps/application/serializers/application_serializers.py:129 +#: community/apps/application/serializers/application_serializers.py:590 +#: community/apps/dataset/serializers/dataset_serializers.py:576 +#| msgid "Retrieval pattern embedding|keywords|blend" +msgid "The type only supports embedding|keywords|blend" +msgstr "檢索模式 embedding|keywords|blend" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:47 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:31 +#: community/apps/application/serializers/application_serializers.py:130 +#: community/apps/application/serializers/application_serializers.py:591 +#: community/apps/application/swagger_api/application_api.py:259 +msgid "Retrieval Mode" +msgstr "檢索方式" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:31 +#: community/apps/application/serializers/application_serializers.py:84 +#: community/apps/application/serializers/application_serializers.py:1026 +#: community/apps/application/serializers/application_serializers.py:1036 +#: community/apps/application/serializers/application_serializers.py:1046 +#: community/apps/dataset/serializers/dataset_serializers.py:801 +#: community/apps/dataset/serializers/document_serializers.py:746 +#: community/apps/setting/models_provider/tools.py:23 +msgid "Model does not exist" +msgstr "模型不存在" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:33 +#, python-brace-format +msgid "No permission to use this model {model_name}" +msgstr "無權使用此模型 {model_name}" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:41 +msgid "" +"The vector model of the associated knowledge base is inconsistent and the " +"segmentation cannot be recalled." +msgstr "關聯知識庫的向量模型不一致,無法召回分段。" + +#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:43 +msgid "The knowledge base setting is wrong, please reset the knowledge base" +msgstr "知識庫設置錯誤,請重新設置知識庫!" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:21 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:15 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:21 +msgid "Role Setting" +msgstr "角色設置" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:28 +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:24 +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:29 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:47 +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:26 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:22 +#: community/apps/application/flow/step_node/question_node/i_question_node.py:26 +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:15 +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:15 +msgid "Whether to return content" +msgstr "是否返回內容" + +#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:35 +msgid "Context Type" +msgstr "內容類型" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:16 +msgid "API Input Fields" +msgstr "api 輸入字段" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:18 +msgid "User Input Fields" +msgstr "用戶輸入字段" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:19 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:24 +#: community/apps/application/serializers/application_serializers.py:698 +#: community/apps/application/serializers/chat_message_serializers.py:274 +#: community/apps/function_lib/serializers/function_lib_serializer.py:359 +msgid "picture" +msgstr "圖片" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:20 +#: community/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py:13 +#: community/apps/application/serializers/chat_message_serializers.py:275 +msgid "document" +msgstr "文檔" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:21 +#: community/apps/application/serializers/chat_message_serializers.py:276 +msgid "Audio" +msgstr "音頻" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:23 +#: community/apps/application/serializers/chat_message_serializers.py:278 +msgid "Child Nodes" +msgstr "子節點" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:24 +#: community/apps/application/flow/step_node/form_node/i_form_node.py:21 +msgid "Form Data" +msgstr "表單數據" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:58 +msgid "" +"Parameter value error: The uploaded document lacks file_id, and the document " +"upload fails" +msgstr "參數值錯誤: 上傳的文檔中缺少 file_id,文檔上傳失敗" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:67 +msgid "" +"Parameter value error: The uploaded image lacks file_id, and the image " +"upload fails" +msgstr "參數值錯誤: 上傳的圖片中缺少 file_id,圖片上傳失敗" + +#: community/apps/application/flow/step_node/application_node/i_application_node.py:77 +msgid "" +"Parameter value error: The uploaded audio lacks file_id, and the audio " +"upload fails." +msgstr "參數值錯誤: 上傳的音頻中缺少file_id,音頻上傳失敗" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:19 +#: community/apps/application/serializers/chat_serializers.py:124 +msgid "Comparator" +msgstr "比較器" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:20 +#: community/apps/application/swagger_api/application_api.py:271 +msgid "value" +msgstr "值" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:21 +msgid "Fields" +msgstr "字段" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:25 +msgid "Branch id" +msgstr "分支 id" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:26 +msgid "Branch Type" +msgstr "分支類型" + +#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:27 +msgid "Condition or|and" +msgstr "條件 or|and" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:20 +msgid "Response Type" +msgstr "響應類型" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:21 +#: community/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py:14 +msgid "Reference Field" +msgstr "引用字段" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:23 +msgid "Direct answer content" +msgstr "直接回答內容" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:30 +msgid "Reference field cannot be empty" +msgstr "引用字段不能爲空" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:32 +msgid "Reference field error" +msgstr "引用字段錯誤" + +#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:35 +msgid "Content cannot be empty" +msgstr "內容不能爲空" + +#: community/apps/application/flow/step_node/form_node/i_form_node.py:19 +msgid "Form Configuration" +msgstr "表單配置" + +#: community/apps/application/flow/step_node/form_node/i_form_node.py:20 +msgid "Form output content" +msgstr "表單輸出內容" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:22 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:24 +msgid "Variable Name" +msgstr "變量名" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:23 +#: community/apps/application/flow/step_node/function_node/i_function_node.py:34 +msgid "Variable Value" +msgstr "變量值" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:27 +msgid "Library ID" +msgstr "函數庫id" + +#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:35 +msgid "The function has been deleted" +msgstr "函數已被刪除" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:25 +msgid "Is this field required" +msgstr "字段是否必填" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:28 +msgid "The field only supports string|int|dict|array|float" +msgstr "字段只支持 string|int|dict|array|float" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:30 +#: community/apps/function_lib/serializers/function_lib_serializer.py:76 +#: community/apps/function_lib/swagger_api/function_lib_api.py:98 +#: community/apps/function_lib/swagger_api/function_lib_api.py:144 +#: community/apps/function_lib/swagger_api/function_lib_api.py:190 +msgid "source" +msgstr "來源" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:32 +#: community/apps/function_lib/serializers/function_lib_serializer.py:78 +msgid "The field only supports custom|reference" +msgstr "字段只支持 custom|reference" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:40 +#, python-brace-format +msgid "{field}, this field is required." +msgstr "{field}, 此字段爲必填項。" + +#: community/apps/application/flow/step_node/function_node/i_function_node.py:46 +#: community/apps/function_lib/views/function_lib_views.py:131 +#: community/apps/function_lib/views/function_lib_views.py:145 +msgid "function" +msgstr "函數" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:15 +msgid "Prompt word (positive)" +msgstr "提示詞(正向)" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:17 +msgid "Prompt word (negative)" +msgstr "提示詞(負向)" + +#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:24 +#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:20 +msgid "Conversation storage type" +msgstr "對話存儲類型" + +#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:26 +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:33 +msgid "Maximum number of words in a quoted segment" +msgstr "最大引用分段字數" + +#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:27 +#: community/apps/common/swagger_api/common_api.py:36 +#: community/apps/dataset/serializers/dataset_serializers.py:573 +msgid "similarity" +msgstr "相似度" + +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:17 +msgid "The audio file cannot be empty" +msgstr "音頻文件不能爲空" + +#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:31 +msgid "" +"Parameter value error: The uploaded audio lacks file_id, and the audio " +"upload fails" +msgstr "參數值錯誤:上傳的音頻缺少file_id,音頻上傳失敗" + +#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:17 +msgid "Text content" +msgstr "文本內容" + +#: community/apps/application/flow/workflow_manage.py:107 +#, python-brace-format +msgid "The branch {branch} of the {node} node needs to be connected" +msgstr "{node}節點的{branch}分支需要連接" + +#: community/apps/application/flow/workflow_manage.py:113 +#, python-brace-format +msgid "{node} Nodes cannot be considered as end nodes" +msgstr "{node}節點不能當做結束節點" + +#: community/apps/application/flow/workflow_manage.py:123 +msgid "The next node that does not exist" +msgstr "不存在的下一個節點" + +#: community/apps/application/flow/workflow_manage.py:137 +msgid "The starting node is required" +msgstr "開始節點必填" + +#: community/apps/application/flow/workflow_manage.py:139 +msgid "There can only be one starting node" +msgstr "開始節點只能有一個" + +#: community/apps/application/flow/workflow_manage.py:147 +#, python-brace-format +msgid "The node {node} model does not exist" +msgstr "節點{node}模型不存在" + +#: community/apps/application/flow/workflow_manage.py:157 +#, python-brace-format +msgid "Node {node} is unavailable" +msgstr "節點{node}不可用" + +#: community/apps/application/flow/workflow_manage.py:163 +#, python-brace-format +msgid "The library ID of node {node} cannot be empty" +msgstr "節點{node}函式程式庫id不能為空" + +#: community/apps/application/flow/workflow_manage.py:166 +#, python-brace-format +msgid "The function library for node {node} is not available" +msgstr "節點{node}函式程式庫不可用" + +#: community/apps/application/flow/workflow_manage.py:172 +msgid "Basic information node is required" +msgstr "基本資訊節點必填" + +#: community/apps/application/flow/workflow_manage.py:174 +msgid "There can only be one basic information node" +msgstr "基本資訊節點只能有一個" + +#: community/apps/application/serializers/application_serializers.py:75 +#: community/apps/application/serializers/chat_serializers.py:618 +#: community/apps/application/serializers/chat_serializers.py:677 +#: community/apps/application/serializers/chat_serializers.py:709 +#: community/apps/application/swagger_api/chat_api.py:365 +#: community/apps/application/swagger_api/chat_api.py:393 +#: community/apps/application/swagger_api/chat_api.py:394 +#: community/apps/application/swagger_api/chat_api.py:415 +#: community/apps/application/swagger_api/chat_api.py:494 +#: community/apps/application/swagger_api/chat_api.py:495 +msgid "Knowledge base id" +msgstr "知識庫 id" + +#: community/apps/application/serializers/application_serializers.py:76 +msgid "Knowledge Base List" +msgstr "知識庫列表" + +#: community/apps/application/serializers/application_serializers.py:90 +msgid "The knowledge base id does not exist" +msgstr "知識庫 id 不存在" + +#: community/apps/application/serializers/application_serializers.py:107 +msgid "No reference status" +msgstr "無引用狀態" + +#: community/apps/application/serializers/application_serializers.py:123 +msgid "Acquaintance" +msgstr "相似度" + +#: community/apps/application/serializers/application_serializers.py:126 +#: community/apps/application/swagger_api/application_api.py:256 +#: community/apps/application/swagger_api/application_api.py:257 +msgid "Maximum number of quoted characters" +msgstr "最多引用字符數" + +#: community/apps/application/serializers/application_serializers.py:133 +msgid "Segment settings not referenced" +msgstr "未引用分段設置" + +#: community/apps/application/serializers/application_serializers.py:140 +msgid "Role prompts" +msgstr "角色提示詞" + +#: community/apps/application/serializers/application_serializers.py:142 +#: community/apps/application/swagger_api/application_api.py:303 +#: community/apps/application/swagger_api/application_api.py:305 +msgid "No citation segmentation prompt" +msgstr "無引用分段提示詞" + +#: community/apps/application/serializers/application_serializers.py:144 +msgid "Thinking process switch" +msgstr "思考過程開關" + +#: community/apps/application/serializers/application_serializers.py:148 +msgid "The thinking process begins to mark" +msgstr "思考過程開始標記" + +#: community/apps/application/serializers/application_serializers.py:151 +msgid "End of thinking process marker" +msgstr "思考過程結束標記" + +#: community/apps/application/serializers/application_serializers.py:156 +#: community/apps/application/serializers/application_serializers.py:482 +#: community/apps/application/serializers/application_serializers.py:623 +#: community/apps/application/swagger_api/application_api.py:49 +#: community/apps/application/swagger_api/application_api.py:50 +#: community/apps/application/swagger_api/application_api.py:181 +#: community/apps/application/swagger_api/application_api.py:182 +#: community/apps/application/swagger_api/application_api.py:330 +#: community/apps/application/swagger_api/application_api.py:331 +#: community/apps/application/swagger_api/application_api.py:377 +msgid "Application Name" +msgstr "應用名稱" + +#: community/apps/application/serializers/application_serializers.py:159 +#: community/apps/application/serializers/application_serializers.py:484 +#: community/apps/application/serializers/application_serializers.py:625 +#: community/apps/application/swagger_api/application_api.py:51 +#: community/apps/application/swagger_api/application_api.py:52 +#: community/apps/application/swagger_api/application_api.py:183 +#: community/apps/application/swagger_api/application_api.py:184 +#: community/apps/application/swagger_api/application_api.py:332 +#: community/apps/application/swagger_api/application_api.py:333 +#: community/apps/application/swagger_api/application_api.py:382 +msgid "Application Description" +msgstr "應用描述" + +#: community/apps/application/serializers/application_serializers.py:160 +msgid "Workflow Objects" +msgstr "工作流對象" + +#: community/apps/application/serializers/application_serializers.py:162 +#: community/apps/application/serializers/application_serializers.py:225 +#: community/apps/application/serializers/application_serializers.py:492 +#: community/apps/application/swagger_api/application_api.py:57 +#: community/apps/application/swagger_api/application_api.py:58 +#: community/apps/application/swagger_api/application_api.py:190 +#: community/apps/application/swagger_api/application_api.py:191 +#: community/apps/application/swagger_api/application_api.py:339 +#: community/apps/application/swagger_api/application_api.py:340 +msgid "Opening remarks" +msgstr "開場白" + +#: community/apps/application/serializers/application_serializers.py:214 +#: community/apps/dataset/serializers/dataset_serializers.py:105 +#: community/apps/dataset/serializers/dataset_serializers.py:106 +msgid "application name" +msgstr "應用名稱" + +#: community/apps/application/serializers/application_serializers.py:217 +msgid "application describe" +msgstr "應用描述" + +#: community/apps/application/serializers/application_serializers.py:219 +#: community/apps/application/serializers/application_serializers.py:486 +msgid "Model" +msgstr "模型" + +#: community/apps/application/serializers/application_serializers.py:223 +#: community/apps/application/serializers/application_serializers.py:490 +msgid "Historical chat records" +msgstr "歷史聊天記錄" + +#: community/apps/application/serializers/application_serializers.py:228 +#: community/apps/application/serializers/application_serializers.py:494 +msgid "Related Knowledge Base" +msgstr "關聯知識庫" + +#: community/apps/application/serializers/application_serializers.py:235 +#: community/apps/application/serializers/application_serializers.py:504 +#: community/apps/application/serializers/chat_serializers.py:379 +msgid "Question completion" +msgstr "問題補全" + +#: community/apps/application/serializers/application_serializers.py:239 +#: community/apps/application/swagger_api/application_api.py:203 +#: community/apps/application/swagger_api/application_api.py:349 +msgid "Application Type" +msgstr "應用類型" + +#: community/apps/application/serializers/application_serializers.py:243 +msgid "Application type only supports SIMPLE|WORK_FLOW" +msgstr "應用類型只支持 SIMPLE|WORK_FLOW" + +#: community/apps/application/serializers/application_serializers.py:247 +#: community/apps/application/serializers/application_serializers.py:508 +msgid "Model parameters" +msgstr "模型參數" + +#: community/apps/application/serializers/application_serializers.py:255 +msgid "Host" +msgstr "主機" + +#: community/apps/application/serializers/application_serializers.py:256 +msgid "protocol" +msgstr "協議" + +#: community/apps/application/serializers/application_serializers.py:339 +#: community/apps/application/swagger_api/application_api.py:153 +#: community/apps/application/swagger_api/application_api.py:154 +msgid "Reset Token" +msgstr "重置 Token" + +#: community/apps/application/serializers/application_serializers.py:340 +msgid "Is it enabled" +msgstr "是否開啓" + +#: community/apps/application/serializers/application_serializers.py:343 +#: community/apps/application/swagger_api/application_api.py:158 +#: community/apps/application/swagger_api/application_api.py:159 +msgid "Number of visits" +msgstr "訪問次數" + +#: community/apps/application/serializers/application_serializers.py:345 +#: community/apps/application/swagger_api/application_api.py:160 +#: community/apps/application/swagger_api/application_api.py:161 +msgid "Whether to enable whitelist" +msgstr "是否開啓白名單" + +#: community/apps/application/serializers/application_serializers.py:348 +#: community/apps/application/serializers/application_serializers.py:349 +#: community/apps/application/swagger_api/application_api.py:163 +#: community/apps/application/swagger_api/application_api.py:164 +msgid "Whitelist" +msgstr "白名單" + +#: community/apps/application/serializers/application_serializers.py:352 +#: community/apps/application/swagger_api/application_api.py:166 +#: community/apps/application/swagger_api/application_api.py:167 +msgid "Whether to display knowledge sources" +msgstr "是否顯示知識來源" + +#: community/apps/application/serializers/application_serializers.py:423 +msgid "access_token" +msgstr "access_token" + +#: community/apps/application/serializers/application_serializers.py:425 +msgid "Certification Information" +msgstr "認證信息" + +#: community/apps/application/serializers/application_serializers.py:462 +msgid "Invalid access_token" +msgstr "無效的access_token" + +#: community/apps/application/serializers/application_serializers.py:473 +msgid "Wrong password" +msgstr "密碼錯誤" + +#: community/apps/application/serializers/application_serializers.py:498 +msgid "Dataset settings" +msgstr "知識庫設置" + +#: community/apps/application/serializers/application_serializers.py:501 +msgid "Model setup" +msgstr "模型設置" + +#: community/apps/application/serializers/application_serializers.py:505 +msgid "Icon" +msgstr "icon 圖標" + +#: community/apps/application/serializers/application_serializers.py:515 +#: community/apps/application/serializers/application_serializers.py:722 +#: community/apps/setting/serializers/valid_serializers.py:29 +msgid "" +"The community version supports up to 5 applications. If you need more " +"applications, please contact us (https://fit2cloud.com/)." +msgstr "" +"社區版最多支持 5 個應用,如需擁有更多應用,請聯繫我們(https://" +"fit2cloud.com/)" + +#: community/apps/application/serializers/application_serializers.py:583 +msgid "Query text" +msgstr "查詢文本" + +#: community/apps/application/serializers/application_serializers.py:585 +msgid "topN" +msgstr "topN" + +#: community/apps/application/serializers/application_serializers.py:587 +msgid "Relevance" +msgstr "相似度" + +#: community/apps/application/serializers/application_serializers.py:596 +#: community/apps/application/serializers/application_serializers.py:705 +#: community/apps/application/serializers/application_serializers.py:797 +msgid "Application id does not exist" +msgstr "應用 ID 不存在" + +#: community/apps/application/serializers/application_serializers.py:628 +msgid "Select User ID" +msgstr "選擇用戶 ID" + +#: community/apps/application/serializers/application_serializers.py:717 +#: community/apps/dataset/serializers/document_serializers.py:164 +#: community/apps/dataset/serializers/document_serializers.py:213 +#: community/apps/dataset/serializers/document_serializers.py:220 +#: community/apps/dataset/serializers/file_serializers.py:59 +#: community/apps/dataset/views/file.py:35 +#: community/apps/dataset/views/file.py:44 +#: community/apps/function_lib/serializers/function_lib_serializer.py:331 +msgid "file" +msgstr "文件" + +#: community/apps/application/serializers/application_serializers.py:732 +#: community/apps/common/handle/impl/qa/zip_parse_qa_handle.py:62 +#: community/apps/common/handle/impl/zip_split_handle.py:56 +#: community/apps/dataset/serializers/document_serializers.py:874 +#: community/apps/dataset/serializers/document_serializers.py:882 +#: community/apps/function_lib/serializers/function_lib_serializer.py:343 +msgid "Unsupported file format" +msgstr "文件格式不支持" + +#: community/apps/application/serializers/application_serializers.py:872 +msgid "work_flow is a required field" +msgstr "work_flow是必填字段" + +#: community/apps/application/serializers/application_serializers.py:934 +#: community/apps/application/serializers/application_serializers.py:1076 +#, python-brace-format +msgid "Unknown knowledge base id {dataset_id}, unable to associate" +msgstr "未知的知識庫 id {dataset_id},無法關聯" + +#: community/apps/application/serializers/application_serializers.py:954 +msgid "Illegal User" +msgstr "非法用戶" + +#: community/apps/application/serializers/application_serializers.py:1028 +#: community/apps/application/serializers/application_serializers.py:1038 +#: community/apps/application/serializers/application_serializers.py:1048 +#, python-brace-format +msgid "No permission to use this model:{model_name}" +msgstr "用戶沒有使用該模型:{model_name}的權限" + +#: community/apps/application/serializers/application_serializers.py:1259 +#: community/apps/application/swagger_api/chat_api.py:498 +#: community/apps/application/swagger_api/chat_api.py:499 +msgid "Availability" +msgstr "是否可用" + +#: community/apps/application/serializers/application_serializers.py:1263 +#: community/apps/application/swagger_api/application_api.py:129 +#: community/apps/application/swagger_api/application_api.py:130 +msgid "Is cross-domain allowed" +msgstr "是否允許跨域" + +#: community/apps/application/serializers/application_serializers.py:1268 +msgid "Cross-domain address" +msgstr "跨域地址" + +#: community/apps/application/serializers/application_serializers.py:1269 +#: community/apps/application/swagger_api/application_api.py:131 +msgid "Cross-domain list" +msgstr "跨域列表" + +#: community/apps/application/serializers/application_serializers.py:1274 +msgid "ApiKeyid" +msgstr "" + +#: community/apps/application/serializers/application_serializers.py:1295 +msgid "APIKey does not exist" +msgstr "APIKey 不存在" + +#: community/apps/application/serializers/application_version_serializers.py:30 +#: community/apps/application/swagger_api/application_version_api.py:24 +#: community/apps/application/swagger_api/application_version_api.py:25 +#: community/apps/application/swagger_api/application_version_api.py:47 +#: community/apps/application/swagger_api/application_version_api.py:70 +#: community/apps/application/swagger_api/application_version_api.py:71 +msgid "Version Name" +msgstr "版本名稱" + +#: community/apps/application/serializers/application_version_serializers.py:37 +#: community/apps/application/serializers/chat_serializers.py:115 +#: community/apps/application/serializers/chat_serializers.py:240 +msgid "summary" +msgstr "摘要" + +#: community/apps/application/serializers/application_version_serializers.py:61 +msgid "Workflow version id" +msgstr "工作流版本 id" + +#: community/apps/application/serializers/application_version_serializers.py:71 +#: community/apps/application/serializers/application_version_serializers.py:86 +msgid "Workflow version does not exist" +msgstr "工作流版本不存在" + +#: community/apps/application/serializers/chat_message_serializers.py:195 +#: community/apps/dataset/serializers/paragraph_serializers.py:47 +#: community/apps/dataset/serializers/paragraph_serializers.py:180 +#: community/apps/dataset/serializers/paragraph_serializers.py:692 +#: community/apps/dataset/serializers/paragraph_serializers.py:705 +#: community/apps/dataset/serializers/paragraph_serializers.py:706 +#: community/apps/dataset/serializers/problem_serializers.py:41 +#: community/apps/dataset/serializers/problem_serializers.py:52 +#: community/apps/dataset/serializers/problem_serializers.py:113 +#: community/apps/dataset/swagger_api/problem_api.py:24 +#: community/apps/dataset/swagger_api/problem_api.py:25 +#: community/apps/dataset/swagger_api/problem_api.py:109 +#: community/apps/dataset/swagger_api/problem_api.py:110 +#: community/apps/dataset/swagger_api/problem_api.py:126 +#: community/apps/dataset/swagger_api/problem_api.py:127 +#: community/apps/dataset/swagger_api/problem_api.py:154 +#: community/apps/dataset/swagger_api/problem_api.py:169 +msgid "content" +msgstr "內容" + +#: community/apps/application/serializers/chat_message_serializers.py:196 +#: community/apps/setting/serializers/team_serializers.py:45 +#: community/apps/users/serializers/user_serializers.py:472 +#: community/apps/users/serializers/user_serializers.py:495 +#: community/apps/users/serializers/user_serializers.py:586 +msgid "Role" +msgstr "角色" + +#: community/apps/application/serializers/chat_message_serializers.py:202 +msgid "Regenerate" +msgstr "重新生成" + +#: community/apps/application/serializers/chat_message_serializers.py:256 +msgid "Is the answer in streaming mode" +msgstr "是否流式回答" + +#: community/apps/application/serializers/chat_message_serializers.py:257 +msgid "Do you want to reply again" +msgstr "是否重新回答" + +#: community/apps/application/serializers/chat_message_serializers.py:259 +#: community/apps/application/serializers/chat_serializers.py:442 +#: community/apps/application/serializers/chat_serializers.py:534 +#: community/apps/application/serializers/chat_serializers.py:590 +#: community/apps/application/serializers/chat_serializers.py:616 +#: community/apps/application/serializers/chat_serializers.py:675 +#: community/apps/application/swagger_api/chat_api.py:148 +#: community/apps/application/swagger_api/chat_api.py:149 +#: community/apps/application/swagger_api/chat_api.py:360 +#: community/apps/application/swagger_api/chat_api.py:437 +#: community/apps/application/swagger_api/chat_api.py:470 +msgid "Conversation record id" +msgstr "對話記錄 ID" + +#: community/apps/application/serializers/chat_message_serializers.py:262 +msgid "Node id" +msgstr "節點 ID" + +#: community/apps/application/serializers/chat_message_serializers.py:265 +#: community/apps/application/swagger_api/chat_api.py:142 +#: community/apps/application/swagger_api/chat_api.py:143 +msgid "Runtime node id" +msgstr "運行時節點 ID" + +#: community/apps/application/serializers/chat_message_serializers.py:268 +msgid "Node parameters" +msgstr "節點參數" + +#: community/apps/application/serializers/chat_message_serializers.py:273 +msgid "Global variables" +msgstr "全局變量" + +#: community/apps/application/serializers/chat_message_serializers.py:286 +#: community/apps/application/serializers/chat_message_serializers.py:421 +#: community/apps/application/serializers/chat_serializers.py:469 +msgid "Conversation does not exist" +msgstr "對話不存在" + +#: community/apps/application/serializers/chat_message_serializers.py:303 +msgid "The number of visits exceeds today's visits" +msgstr "訪問次數超過今日訪問量" + +#: community/apps/application/serializers/chat_message_serializers.py:314 +msgid "The current model is not available" +msgstr "當前模型不可用" + +#: community/apps/application/serializers/chat_message_serializers.py:316 +msgid "The model is downloading, please try again later" +msgstr "模型正在下載中,請稍後再試" + +#: community/apps/application/serializers/chat_message_serializers.py:361 +#: community/apps/application/serializers/chat_serializers.py:599 +#: community/apps/application/serializers/chat_serializers.py:645 +#: community/apps/application/serializers/chat_serializers.py:694 +msgid "Conversation record does not exist" +msgstr "對話記錄不存在" + +#: community/apps/application/serializers/chat_message_serializers.py:454 +#: community/apps/application/serializers/chat_serializers.py:314 +msgid "The application has not been published. Please use it after publishing." +msgstr "應用未發佈,請發佈後使用" + +#: community/apps/application/serializers/chat_serializers.py:55 +msgid "node" +msgstr "節點" + +#: community/apps/application/serializers/chat_serializers.py:56 +msgid "Connection" +msgstr "連線" + +#: community/apps/application/serializers/chat_serializers.py:71 +#: community/apps/application/swagger_api/chat_api.py:48 +#: community/apps/application/swagger_api/chat_api.py:49 +#: community/apps/application/swagger_api/chat_api.py:169 +#: community/apps/application/swagger_api/chat_api.py:170 +#: community/apps/application/swagger_api/chat_api.py:256 +msgid "abstract" +msgstr "摘要" + +#: community/apps/application/serializers/chat_serializers.py:121 +#: community/apps/application/swagger_api/chat_api.py:258 +msgid "Minimum number of likes" +msgstr "最小點贊數" + +#: community/apps/application/serializers/chat_serializers.py:123 +#: community/apps/application/swagger_api/chat_api.py:260 +msgid "Minimum number of clicks" +msgstr "最小點踩數" + +#: community/apps/application/serializers/chat_serializers.py:126 +msgid "Only supports and|or" +msgstr "只支持 and|or" + +#: community/apps/application/serializers/chat_serializers.py:241 +msgid "Problem after optimization" +msgstr "問題優化" + +#: community/apps/application/serializers/chat_serializers.py:242 +msgid "answer" +msgstr "回答" + +#: community/apps/application/serializers/chat_serializers.py:242 +msgid "User feedback" +msgstr "用戶回饋" + +#: community/apps/application/serializers/chat_serializers.py:244 +msgid "Section title + content" +msgstr "分段標題+內容" + +#: community/apps/application/serializers/chat_serializers.py:245 +#: community/apps/application/views/chat_views.py:385 +#: community/apps/application/views/chat_views.py:386 +msgid "Annotation" +msgstr "標註" + +#: community/apps/application/serializers/chat_serializers.py:245 +msgid "Consuming tokens" +msgstr "消耗tokens" + +#: community/apps/application/serializers/chat_serializers.py:245 +msgid "Time consumed (s)" +msgstr "耗時(s)',' 提問時間" + +#: community/apps/application/serializers/chat_serializers.py:246 +msgid "Question Time" +msgstr "提問時間" + +#: community/apps/application/serializers/chat_serializers.py:337 +msgid "Workflow" +msgstr "工作流" + +#: community/apps/application/serializers/chat_serializers.py:369 +msgid "Multi-round conversation" +msgstr "多輪對話" + +#: community/apps/application/serializers/chat_serializers.py:372 +msgid "Related Datasets" +msgstr "關聯數據集" + +#: community/apps/application/serializers/chat_serializers.py:449 +#| msgid "Application authentication token" +msgid "Application authentication information does not exist" +msgstr "應用認證信息不存在" + +#: community/apps/application/serializers/chat_serializers.py:451 +#| msgid "Whether to display knowledge sources" +msgid "Displaying knowledge sources is not enabled" +msgstr "是否顯示知識來源未開啓" + +#: community/apps/application/serializers/chat_serializers.py:537 +msgid "Bidding Status" +msgstr "投標狀態" + +#: community/apps/application/serializers/chat_serializers.py:546 +#| msgid "The task is being executed, please do not send it repeatedly." +msgid "" +"Voting on the current session minutes, please do not send repeated requests" +msgstr "當前會話正在投票中,請勿重複發送請求" + +#: community/apps/application/serializers/chat_serializers.py:551 +#| msgid "Get a list of conversation records" +msgid "Non-existent conversation chat_record_id" +msgstr "不存在的對話 chat_record_id" + +#: community/apps/application/serializers/chat_serializers.py:568 +#| msgid "Already associated, please do not associate again" +msgid "Already voted, please cancel first and then vote again" +msgstr "已投票,請先取消再重新投票" + +#: community/apps/application/serializers/chat_serializers.py:575 +#: community/apps/application/swagger_api/chat_api.py:379 +#: community/apps/application/swagger_api/chat_api.py:380 +#: community/apps/dataset/swagger_api/problem_api.py:128 +#: community/apps/dataset/swagger_api/problem_api.py:129 +msgid "Section title" +msgstr "段落標題" + +#: community/apps/application/serializers/chat_serializers.py:576 +#: community/apps/application/swagger_api/chat_api.py:381 +#: community/apps/application/swagger_api/chat_api.py:382 +#: community/apps/application/swagger_api/chat_api.py:483 +#: community/apps/application/swagger_api/chat_api.py:484 +#: community/apps/common/swagger_api/common_api.py:57 +#: community/apps/common/swagger_api/common_api.py:58 +msgid "Paragraph content" +msgstr "段落內容" + +#: community/apps/application/serializers/chat_serializers.py:620 +#: community/apps/application/serializers/chat_serializers.py:679 +#: community/apps/application/serializers/chat_serializers.py:710 +#: community/apps/application/swagger_api/chat_api.py:370 +#: community/apps/application/swagger_api/chat_api.py:395 +#: community/apps/application/swagger_api/chat_api.py:396 +#: community/apps/application/swagger_api/chat_api.py:496 +#: community/apps/application/swagger_api/chat_api.py:497 +msgid "Document id" +msgstr "文檔 ID" + +#: community/apps/application/serializers/chat_serializers.py:626 +#: community/apps/application/serializers/chat_serializers.py:717 +#: community/apps/dataset/serializers/paragraph_serializers.py:576 +msgid "The document id is incorrect" +msgstr "文檔 id 不正確" + +#: community/apps/application/serializers/chat_serializers.py:681 +#: community/apps/application/swagger_api/chat_api.py:310 +#: community/apps/application/swagger_api/chat_api.py:311 +msgid "Paragraph id" +msgstr "段落 ID" + +#: community/apps/application/serializers/chat_serializers.py:697 +#, python-brace-format +msgid "" +"The paragraph id is wrong. The current conversation record does not exist. " +"[{paragraph_id}] paragraph id" +msgstr "段落id錯誤。當前對話記錄不存在。[{paragraph_id}] 段落id" + +#: community/apps/application/serializers/chat_serializers.py:736 +#| msgid "Conversation record does not exist" +msgid "Conversation records that do not exist" +msgstr "對話記錄不存在" + +#: community/apps/application/swagger_api/application_api.py:24 +#: community/apps/application/views/chat_views.py:470 +#: community/apps/application/views/chat_views.py:471 +msgid "Upload files" +msgstr "上傳文件" + +#: community/apps/application/swagger_api/application_api.py:35 +#: community/apps/application/swagger_api/application_api.py:36 +msgid "Application authentication token" +msgstr "應用認證 token" + +#: community/apps/application/swagger_api/application_api.py:48 +#: community/apps/application/swagger_api/application_version_api.py:22 +#: community/apps/application/swagger_api/application_version_api.py:23 +msgid "Primary key id" +msgstr "主鍵 id" + +#: community/apps/application/swagger_api/application_api.py:60 +msgid "Example List" +msgstr "示例列表" + +#: community/apps/application/swagger_api/application_api.py:61 +#: community/apps/application/swagger_api/application_api.py:62 +msgid "Affiliation user" +msgstr "所屬用戶" + +#: community/apps/application/swagger_api/application_api.py:64 +msgid "Is publish" +msgstr "是否發佈" + +#: community/apps/application/swagger_api/application_api.py:66 +#: community/apps/application/swagger_api/application_api.py:67 +#: community/apps/application/swagger_api/application_version_api.py:28 +#: community/apps/application/swagger_api/application_version_api.py:29 +#: community/apps/application/swagger_api/chat_api.py:185 +#: community/apps/application/swagger_api/chat_api.py:186 +#: community/apps/application/swagger_api/chat_api.py:335 +#: community/apps/application/swagger_api/chat_api.py:336 +#: community/apps/application/swagger_api/chat_api.py:503 +#: community/apps/application/swagger_api/chat_api.py:504 +msgid "Creation time" +msgstr "創建時間" + +#: community/apps/application/swagger_api/application_api.py:69 +#: community/apps/application/swagger_api/application_api.py:70 +#: community/apps/application/swagger_api/application_version_api.py:30 +#: community/apps/application/swagger_api/application_version_api.py:31 +#: community/apps/application/swagger_api/chat_api.py:332 +#: community/apps/application/swagger_api/chat_api.py:333 +#: community/apps/application/swagger_api/chat_api.py:500 +#: community/apps/application/swagger_api/chat_api.py:501 +msgid "Modification time" +msgstr "修改時間" + +#: community/apps/application/swagger_api/application_api.py:74 +#: community/apps/application/swagger_api/application_api.py:194 +#: community/apps/application/swagger_api/application_api.py:195 +#: community/apps/application/swagger_api/application_api.py:343 +#: community/apps/application/swagger_api/application_api.py:344 +#: community/apps/application/swagger_api/chat_api.py:229 +#: community/apps/application/swagger_api/chat_api.py:230 +msgid "List of associated knowledge base IDs" +msgstr "關聯知識庫 ID 列表" + +#: community/apps/application/swagger_api/application_api.py:76 +msgid "List of associated knowledge base IDs (returned when querying details)" +msgstr "關聯知識庫ID列表(查詢詳情時返回)" + +#: community/apps/application/swagger_api/application_api.py:91 +msgid "Model Type" +msgstr "模型類型" + +#: community/apps/application/swagger_api/application_api.py:117 +msgid "Application api_key id" +msgstr "應用 api_key id" + +#: community/apps/application/swagger_api/application_api.py:126 +#: community/apps/application/swagger_api/application_api.py:127 +#: community/apps/application/swagger_api/application_api.py:156 +#: community/apps/application/swagger_api/application_api.py:157 +msgid "Is activation" +msgstr "是否可用" + +#: community/apps/application/swagger_api/application_api.py:198 +#: community/apps/application/swagger_api/application_api.py:347 +#: community/apps/application/swagger_api/application_api.py:348 +msgid "Problem Optimization" +msgstr "問題優化" + +#: community/apps/application/swagger_api/application_api.py:199 +msgid "Whether to enable problem optimization" +msgstr "是否開啓問題優化" + +#: community/apps/application/swagger_api/application_api.py:204 +#: community/apps/application/swagger_api/application_api.py:350 +msgid "Application Type SIMPLE | WORK_FLOW" +msgstr "應用類型 SIMPLE | WORK_FLOW" + +#: community/apps/application/swagger_api/application_api.py:207 +#: community/apps/application/swagger_api/application_api.py:208 +#: community/apps/application/swagger_api/application_api.py:352 +#: community/apps/application/swagger_api/application_api.py:353 +msgid "Question optimization tips" +msgstr "問題優化提示詞" + +#: community/apps/application/swagger_api/application_api.py:211 +#: community/apps/application/swagger_api/application_api.py:212 +#: community/apps/application/swagger_api/application_api.py:356 +#: community/apps/application/swagger_api/application_api.py:357 +msgid "Text-to-speech model ID" +msgstr "文本轉語音模型 ID" + +#: community/apps/application/swagger_api/application_api.py:213 +#: community/apps/application/swagger_api/application_api.py:214 +#: community/apps/application/swagger_api/application_api.py:358 +#: community/apps/application/swagger_api/application_api.py:359 +msgid "Speech-to-text model id" +msgstr "語音轉文本模型 ID" + +#: community/apps/application/swagger_api/application_api.py:215 +#: community/apps/application/swagger_api/application_api.py:216 +#: community/apps/application/swagger_api/application_api.py:360 +#: community/apps/application/swagger_api/application_api.py:361 +msgid "Is speech-to-text enabled" +msgstr "是否開啓語音轉文本" + +#: community/apps/application/swagger_api/application_api.py:217 +#: community/apps/application/swagger_api/application_api.py:218 +#: community/apps/application/swagger_api/application_api.py:362 +#: community/apps/application/swagger_api/application_api.py:363 +msgid "Is text-to-speech enabled" +msgstr "是否開啓文本轉語音" + +#: community/apps/application/swagger_api/application_api.py:219 +#: community/apps/application/swagger_api/application_api.py:220 +#: community/apps/application/swagger_api/application_api.py:364 +#: community/apps/application/swagger_api/application_api.py:365 +msgid "Text-to-speech type" +msgstr "文本轉語音類型" + +#: community/apps/application/swagger_api/application_api.py:233 +msgid "Node List" +msgstr "節點列表" + +#: community/apps/application/swagger_api/application_api.py:236 +msgid "Connection List" +msgstr "連線列表" + +#: community/apps/application/swagger_api/application_api.py:266 +msgid "state" +msgstr "狀態" + +#: community/apps/application/swagger_api/application_api.py:268 +msgid "ai_questioning|designated_answer" +msgstr "ai作答|指定答案" + +#: community/apps/application/swagger_api/application_api.py:273 +msgid "" +"ai_questioning: is the title, designated_answer: is the designated answer " +"content" +msgstr "ai作答:就是題詞,指定回答:就是指定回答內容" + +#: community/apps/application/swagger_api/application_api.py:403 +#: community/apps/function_lib/swagger_api/function_lib_api.py:216 +msgid "Upload image files" +msgstr "上傳圖片文件" + +#: community/apps/application/swagger_api/application_api.py:434 +#: community/apps/application/swagger_api/application_api.py:435 +msgid "Text" +msgstr "文字" + +#: community/apps/application/swagger_api/application_statistics_api.py:41 +#: community/apps/application/swagger_api/application_statistics_api.py:42 +#: community/apps/application/swagger_api/chat_api.py:490 +#: community/apps/application/swagger_api/chat_api.py:491 +msgid "Number of Likes" +msgstr "點贊數" + +#: community/apps/application/swagger_api/application_statistics_api.py:44 +#: community/apps/application/swagger_api/chat_api.py:492 +#: community/apps/application/swagger_api/chat_api.py:493 +msgid "Number of thumbs-downs" +msgstr "點踩數" + +#: community/apps/application/swagger_api/application_statistics_api.py:45 +#: community/apps/application/swagger_api/application_statistics_api.py:46 +msgid "Number of tokens used" +msgstr "token使用數量" + +#: community/apps/application/swagger_api/application_statistics_api.py:47 +#: community/apps/application/swagger_api/application_statistics_api.py:48 +msgid "Number of conversations" +msgstr "對話次數" + +#: community/apps/application/swagger_api/application_statistics_api.py:49 +#: community/apps/application/swagger_api/application_statistics_api.py:50 +msgid "Number of customers" +msgstr "客戶數量" + +#: community/apps/application/swagger_api/application_statistics_api.py:51 +#: community/apps/application/swagger_api/application_statistics_api.py:52 +msgid "Number of new customers" +msgstr "客戶新增數量" + +#: community/apps/application/swagger_api/application_statistics_api.py:54 +#: community/apps/application/swagger_api/application_statistics_api.py:69 +#: community/apps/application/swagger_api/application_statistics_api.py:70 +msgid "time" +msgstr "日期" + +#: community/apps/application/swagger_api/application_statistics_api.py:55 +msgid "Time, this field is only available when querying trends" +msgstr "日期,只有查詢趨勢的時候纔有該字段" + +#: community/apps/application/swagger_api/application_statistics_api.py:66 +#: community/apps/application/swagger_api/application_statistics_api.py:83 +msgid "New quantity" +msgstr "新增數量" + +#: community/apps/application/swagger_api/application_statistics_api.py:81 +#: community/apps/application/swagger_api/application_statistics_api.py:82 +msgid "Today's new quantity" +msgstr "今日新增數量" + +#: community/apps/application/swagger_api/application_version_api.py:26 +#: community/apps/application/swagger_api/application_version_api.py:27 +msgid "Workflow data" +msgstr "工作流數據" + +#: community/apps/application/swagger_api/application_version_api.py:61 +msgid "Application version id" +msgstr "應用版本 id" + +#: community/apps/application/swagger_api/chat_api.py:61 +#: community/apps/application/swagger_api/chat_api.py:62 +#: community/apps/application/swagger_api/chat_api.py:92 +#: community/apps/dataset/serializers/problem_serializers.py:91 +msgid "problem" +msgstr "問題" + +#: community/apps/application/swagger_api/chat_api.py:68 +msgid "Question content" +msgstr "問題內容" + +#: community/apps/application/swagger_api/chat_api.py:72 +msgid "role" +msgstr "角色" + +#: community/apps/application/swagger_api/chat_api.py:77 +#: community/apps/application/swagger_api/chat_api.py:93 +msgid "regenerate" +msgstr "重新生成" + +#: community/apps/application/swagger_api/chat_api.py:79 +msgid "Stream Output" +msgstr "流式輸出" + +#: community/apps/application/swagger_api/chat_api.py:94 +msgid "Is it streaming output" +msgstr "是否流式輸出" + +#: community/apps/application/swagger_api/chat_api.py:96 +#: community/apps/application/swagger_api/chat_api.py:97 +#| msgid "Form Data" +msgid "Form data" +msgstr "表單數據" + +#: community/apps/application/swagger_api/chat_api.py:101 +#: community/apps/application/swagger_api/chat_api.py:102 +#| msgid "state list" +msgid "Image list" +msgstr "圖片列表" + +#: community/apps/application/swagger_api/chat_api.py:107 +msgid "Image name" +msgstr "圖片名稱" + +#: community/apps/application/swagger_api/chat_api.py:109 +msgid "Image URL" +msgstr "圖片網址" + +#: community/apps/application/swagger_api/chat_api.py:115 +#: community/apps/application/swagger_api/chat_api.py:116 +#: community/apps/dataset/views/document.py:133 +#: community/apps/dataset/views/document.py:134 +msgid "Document list" +msgstr "文檔列表" + +#: community/apps/application/swagger_api/chat_api.py:122 +msgid "Document name" +msgstr "文件名稱" + +#: community/apps/application/swagger_api/chat_api.py:124 +msgid "Document URL" +msgstr "文件網址" + +#: community/apps/application/swagger_api/chat_api.py:129 +#: community/apps/application/swagger_api/chat_api.py:130 +#| msgid "id list" +msgid "Audio list" +msgstr "音频列表" + +#: community/apps/application/swagger_api/chat_api.py:135 +msgid "Audio name" +msgstr "音頻名稱" + +#: community/apps/application/swagger_api/chat_api.py:137 +msgid "Audio URL" +msgstr "音頻網址" + +#: community/apps/application/swagger_api/chat_api.py:145 +#: community/apps/application/swagger_api/chat_api.py:146 +msgid "Node data" +msgstr "節點數據" + +#: community/apps/application/swagger_api/chat_api.py:151 +#: community/apps/application/swagger_api/chat_api.py:152 +msgid "Child node" +msgstr "子節點" + +#: community/apps/application/swagger_api/chat_api.py:173 +#: community/apps/application/swagger_api/chat_api.py:174 +msgid "Number of dialogue questions" +msgstr "對話提問數量" + +#: community/apps/application/swagger_api/chat_api.py:176 +#: community/apps/application/swagger_api/chat_api.py:177 +msgid "Number of tags" +msgstr "標記數量" + +#: community/apps/application/swagger_api/chat_api.py:178 +#: community/apps/application/swagger_api/chat_api.py:179 +#: community/apps/common/swagger_api/common_api.py:64 +#: community/apps/common/swagger_api/common_api.py:65 +#: community/apps/dataset/serializers/paragraph_serializers.py:711 +#: community/apps/dataset/serializers/paragraph_serializers.py:712 +msgid "Number of likes" +msgstr "點贊數量" + +#: community/apps/application/swagger_api/chat_api.py:180 +#: community/apps/application/swagger_api/chat_api.py:181 +msgid "Number of clicks" +msgstr "點踩數量" + +#: community/apps/application/swagger_api/chat_api.py:182 +#: community/apps/application/swagger_api/chat_api.py:183 +msgid "Change time" +msgstr "修改時間" + +#: community/apps/application/swagger_api/chat_api.py:224 +msgid "Application ID, pass when modifying, do not pass when creating" +msgstr "應用id,修改的時候傳,創建的時候不傳" + +#: community/apps/application/swagger_api/chat_api.py:225 +#: community/apps/application/swagger_api/chat_api.py:226 +msgid "Model ID" +msgstr "模型 ID" + +#: community/apps/application/swagger_api/chat_api.py:232 +#: community/apps/application/swagger_api/chat_api.py:234 +msgid "Do you want to initiate multiple sessions" +msgstr "是否開啓多輪會話" + +#: community/apps/application/swagger_api/chat_api.py:237 +msgid "Problem optimization" +msgstr "問題優化" + +#: community/apps/application/swagger_api/chat_api.py:238 +msgid "Do you want to enable problem optimization" +msgstr "是否開啓問題優化" + +#: community/apps/application/swagger_api/chat_api.py:254 +msgid "Historical days" +msgstr "歷史天數" + +#: community/apps/application/swagger_api/chat_api.py:262 +msgid "or|and comparator" +msgstr "or|and 比較器" + +#: community/apps/application/swagger_api/chat_api.py:266 +#| msgid "Start time" +msgid "start time" +msgstr "開始時間" + +#: community/apps/application/swagger_api/chat_api.py:291 +msgid "Is it ascending order" +msgstr "是否昇冪" + +#: community/apps/application/swagger_api/chat_api.py:304 +msgid "Session log id" +msgstr "會話日誌 id" + +#: community/apps/application/swagger_api/chat_api.py:305 +msgid "Conversation log id" +msgstr "對話日誌 ID" + +#: community/apps/application/swagger_api/chat_api.py:306 +#: community/apps/application/swagger_api/chat_api.py:307 +#: community/apps/application/swagger_api/chat_api.py:446 +msgid "Voting Status" +msgstr "投票狀態" + +#: community/apps/application/swagger_api/chat_api.py:308 +#: community/apps/application/swagger_api/chat_api.py:309 +msgid "Dataset id" +msgstr "數據集 id" + +#: community/apps/application/swagger_api/chat_api.py:312 +#: community/apps/application/swagger_api/chat_api.py:313 +msgid "Resource ID" +msgstr "資源 ID" + +#: community/apps/application/swagger_api/chat_api.py:314 +#: community/apps/application/swagger_api/chat_api.py:315 +msgid "Resource Type" +msgstr "資源類型" + +#: community/apps/application/swagger_api/chat_api.py:317 +#: community/apps/application/swagger_api/chat_api.py:318 +msgid "Number of tokens consumed by the question" +msgstr "問題消耗 token 數量" + +#: community/apps/application/swagger_api/chat_api.py:320 +#: community/apps/application/swagger_api/chat_api.py:321 +msgid "The number of tokens consumed by the answer" +msgstr "答案消耗 token 數量" + +#: community/apps/application/swagger_api/chat_api.py:324 +#: community/apps/application/swagger_api/chat_api.py:325 +msgid "Improved annotation list" +msgstr "改進標註列表" + +#: community/apps/application/swagger_api/chat_api.py:328 +msgid "Corresponding session Corresponding subscript" +msgstr "對應會話對應下標" + +#: community/apps/application/swagger_api/chat_api.py:329 +msgid "Corresponding session id corresponding subscript" +msgstr "對應會話id對應下標" + +#: community/apps/application/swagger_api/chat_api.py:397 +#: community/apps/application/swagger_api/chat_api.py:398 +msgid "Conversation id list" +msgstr "會話 id 列表" + +#: community/apps/application/swagger_api/chat_api.py:447 +msgid "-1: Cancel vote | 0: Agree | 1: Oppose" +msgstr "-1:取消投票|0:贊同|1:反對" + +#: community/apps/application/swagger_api/chat_api.py:485 +#: community/apps/application/swagger_api/chat_api.py:486 +#: community/apps/common/swagger_api/common_api.py:59 +#: community/apps/common/swagger_api/common_api.py:60 +#: community/apps/dataset/serializers/paragraph_serializers.py:687 +#: community/apps/dataset/serializers/paragraph_serializers.py:707 +#: community/apps/dataset/serializers/paragraph_serializers.py:708 +msgid "title" +msgstr "標題" + +#: community/apps/application/swagger_api/chat_api.py:486 +#: community/apps/common/swagger_api/common_api.py:60 +msgid "Description of xxx" +msgstr "xxx 描述" + +#: community/apps/application/swagger_api/chat_api.py:487 +#: community/apps/application/swagger_api/chat_api.py:488 +#: community/apps/common/swagger_api/common_api.py:61 +#: community/apps/common/swagger_api/common_api.py:62 +msgid "Number of hits" +msgstr "命中數量" + +#: community/apps/application/views/application_version_views.py:28 +#: community/apps/application/views/application_version_views.py:29 +#: community/apps/application/views/application_views.py:489 +#: community/apps/application/views/application_views.py:490 +msgid "Get the application list" +msgstr "獲取應用列表" + +#: community/apps/application/views/application_version_views.py:32 +#: community/apps/application/views/application_version_views.py:50 +#: community/apps/application/views/application_version_views.py:68 +#: community/apps/application/views/application_version_views.py:83 +msgid "Application/Version" +msgstr "應用/版本" + +#: community/apps/application/views/application_version_views.py:45 +#: community/apps/application/views/application_version_views.py:46 +msgid "Get the list of application versions by page" +msgstr "獲取應用版本列表分頁" + +#: community/apps/application/views/application_version_views.py:64 +#: community/apps/application/views/application_version_views.py:65 +msgid "Get application version details" +msgstr "獲取應用版本詳情" + +#: community/apps/application/views/application_version_views.py:78 +#: community/apps/application/views/application_version_views.py:79 +msgid "Modify application version information" +msgstr "修改應用版本信息" + +#: community/apps/application/views/application_views.py:42 +#: community/apps/application/views/application_views.py:43 +msgid "User Statistics" +msgstr "用戶統計" + +#: community/apps/application/views/application_views.py:44 +#: community/apps/application/views/application_views.py:70 +#: community/apps/application/views/application_views.py:95 +#: community/apps/application/views/application_views.py:121 +msgid "Application/Statistics" +msgstr "應用/統計" + +#: community/apps/application/views/application_views.py:68 +#: community/apps/application/views/application_views.py:69 +msgid "User demographic trends" +msgstr "用戶統計趨勢" + +#: community/apps/application/views/application_views.py:93 +#: community/apps/application/views/application_views.py:94 +msgid "Conversation statistics" +msgstr "對話相關統計" + +#: community/apps/application/views/application_views.py:119 +#: community/apps/application/views/application_views.py:120 +msgid "Dialogue-related statistical trends" +msgstr "對話相關統計趨勢" + +#: community/apps/application/views/application_views.py:150 +#: community/apps/application/views/application_views.py:151 +msgid "Modify application icon" +msgstr "修改應用圖標" + +#: community/apps/application/views/application_views.py:152 +#: community/apps/application/views/application_views.py:175 +#: community/apps/application/views/application_views.py:189 +#: community/apps/application/views/application_views.py:202 +#: community/apps/application/views/application_views.py:216 +#: community/apps/application/views/application_views.py:236 +#: community/apps/application/views/application_views.py:255 +#: community/apps/application/views/application_views.py:274 +#: community/apps/application/views/application_views.py:313 +#: community/apps/application/views/application_views.py:482 +#: community/apps/application/views/application_views.py:493 +#: community/apps/application/views/application_views.py:508 +#: community/apps/application/views/application_views.py:535 +#: community/apps/application/views/application_views.py:555 +#: community/apps/application/views/application_views.py:575 +#: community/apps/application/views/application_views.py:593 +#: community/apps/application/views/application_views.py:614 +#: community/apps/application/views/application_views.py:635 +#: community/apps/application/views/application_views.py:670 +msgid "Application" +msgstr "應用" + +#: community/apps/application/views/application_views.py:173 +msgid "Import Application" +msgstr "導入應用" + +#: community/apps/application/views/application_views.py:187 +msgid "Export Application" +msgstr "導出應用" + +#: community/apps/application/views/application_views.py:200 +#: community/apps/application/views/application_views.py:201 +msgid "Get embedded js" +msgstr "獲取嵌入 js" + +#: community/apps/application/views/application_views.py:214 +#: community/apps/application/views/application_views.py:215 +msgid "Get a list of models" +msgstr "獲取模型列表" + +#: community/apps/application/views/application_views.py:234 +#: community/apps/application/views/application_views.py:235 +#: community/apps/setting/views/model.py:100 +#: community/apps/setting/views/model.py:101 +msgid "Get model parameter form" +msgstr "獲取模型參數表單" + +#: community/apps/application/views/application_views.py:253 +#: community/apps/application/views/application_views.py:254 +msgid "Get a list of function libraries" +msgstr "獲取函數庫列表" + +#: community/apps/application/views/application_views.py:272 +#: community/apps/application/views/application_views.py:273 +msgid "Get library details" +msgstr "獲取函數庫詳情" + +#: community/apps/application/views/application_views.py:292 +#: community/apps/application/views/application_views.py:293 +msgid "Get the list of apps created by the current user" +msgstr "獲取當前用戶創建的應用列表" + +#: community/apps/application/views/application_views.py:294 +#: community/apps/application/views/application_views.py:333 +#: community/apps/application/views/chat_views.py:74 +#: community/apps/application/views/chat_views.py:93 +#: community/apps/application/views/chat_views.py:105 +#: community/apps/application/views/chat_views.py:118 +#: community/apps/application/views/chat_views.py:347 +msgid "Application/Chat" +msgstr "應用/對話" + +#: community/apps/application/views/application_views.py:311 +#: community/apps/application/views/application_views.py:312 +msgid "Get application data" +msgstr "獲取應用數據" + +#: community/apps/application/views/application_views.py:331 +#: community/apps/application/views/application_views.py:332 +msgid "Get application related information" +msgstr "獲取應用相關信息" + +#: community/apps/application/views/application_views.py:346 +#: community/apps/application/views/application_views.py:347 +msgid "Add ApiKey" +msgstr "添加 ApiKey" + +#: community/apps/application/views/application_views.py:348 +#: community/apps/application/views/application_views.py:364 +#: community/apps/application/views/application_views.py:383 +#: community/apps/application/views/application_views.py:402 +msgid "Application/API_KEY" +msgstr "應用/API_KEY" + +#: community/apps/application/views/application_views.py:362 +#: community/apps/application/views/application_views.py:363 +msgid "Get the application API_KEY list" +msgstr "獲取應用 API_KEY 列表" + +#: community/apps/application/views/application_views.py:381 +#: community/apps/application/views/application_views.py:382 +msgid "Modify application API_KEY" +msgstr "修改應用 API_KEY" + +#: community/apps/application/views/application_views.py:400 +#: community/apps/application/views/application_views.py:401 +msgid "Delete Application API_KEY" +msgstr "刪除應用 API_KEY" + +#: community/apps/application/views/application_views.py:421 +#: community/apps/application/views/application_views.py:422 +msgid "Modify Application AccessToken" +msgstr "修改應用访问限制" + +#: community/apps/application/views/application_views.py:423 +#: community/apps/application/views/application_views.py:441 +msgid "Application/Public Access" +msgstr "應用/公共訪問" + +#: community/apps/application/views/application_views.py:438 +#: community/apps/application/views/application_views.py:439 +msgid "Get the application AccessToken information" +msgstr "獲取應用 AccessToken 信息" + +#: community/apps/application/views/application_views.py:462 +#: community/apps/application/views/application_views.py:463 +msgid "Application Certification" +msgstr "應用認證" + +#: community/apps/application/views/application_views.py:465 +msgid "Application/Certification" +msgstr "應用/認證" + +#: community/apps/application/views/application_views.py:479 +#: community/apps/application/views/application_views.py:480 +msgid "Create an application" +msgstr "創建應用" + +#: community/apps/application/views/application_views.py:505 +msgid "Hit Test List" +msgstr "命中測試列表" + +#: community/apps/application/views/application_views.py:530 +#: community/apps/application/views/application_views.py:531 +msgid "Publishing an application" +msgstr "發佈應用" + +#: community/apps/application/views/application_views.py:551 +#: community/apps/application/views/application_views.py:552 +msgid "Deleting application" +msgstr "刪除應用" + +#: community/apps/application/views/application_views.py:570 +#: community/apps/application/views/application_views.py:571 +msgid "Modify the application" +msgstr "修改應用" + +#: community/apps/application/views/application_views.py:589 +#: community/apps/application/views/application_views.py:590 +msgid "Get application details" +msgstr "獲取應用詳情" + +#: community/apps/application/views/application_views.py:609 +#: community/apps/application/views/application_views.py:610 +msgid "Get the knowledge base available to the current application" +msgstr "獲取當前應用可用的知識庫" + +#: community/apps/application/views/application_views.py:630 +#: community/apps/application/views/application_views.py:631 +msgid "Get the application list by page" +msgstr "獲取應用列表分頁" + +#: community/apps/application/views/application_views.py:665 +#: community/apps/application/views/application_views.py:666 +#| msgid "Text-to-speech type" +msgid "text to speech" +msgstr "文本轉語音類型" + +#: community/apps/application/views/chat_views.py:36 +#: community/apps/application/views/chat_views.py:37 +msgid "OpenAI Interface Dialogue" +msgstr "openai接口對話" + +#: community/apps/application/views/chat_views.py:39 +msgid "OpenAI Dialogue" +msgstr "openai對話" + +#: community/apps/application/views/chat_views.py:52 +#: community/apps/application/views/chat_views.py:53 +msgid "Export conversation" +msgstr "導出對話" + +#: community/apps/application/views/chat_views.py:55 +#: community/apps/application/views/chat_views.py:156 +#: community/apps/application/views/chat_views.py:174 +#: community/apps/application/views/chat_views.py:197 +#: community/apps/application/views/chat_views.py:217 +#: community/apps/application/views/chat_views.py:235 +#: community/apps/application/views/chat_views.py:257 +#: community/apps/application/views/chat_views.py:282 +#: community/apps/application/views/chat_views.py:302 +#: community/apps/application/views/chat_views.py:324 +#: community/apps/application/views/chat_views.py:489 +msgid "Application/Conversation Log" +msgstr "應用/對話日誌" + +#: community/apps/application/views/chat_views.py:71 +#: community/apps/application/views/chat_views.py:72 +msgid "Get the session id according to the application id" +msgstr "獲取應用id對應的會話id" + +#: community/apps/application/views/chat_views.py:90 +#: community/apps/application/views/chat_views.py:91 +msgid "Get the workflow temporary session id" +msgstr "獲取工作流臨時會話id" + +#: community/apps/application/views/chat_views.py:102 +#: community/apps/application/views/chat_views.py:103 +msgid "Get a temporary session id" +msgstr "獲取臨時會話id" + +#: community/apps/application/views/chat_views.py:115 +#: community/apps/application/views/chat_views.py:116 +msgid "dialogue" +msgstr "對話" + +#: community/apps/application/views/chat_views.py:152 +#: community/apps/application/views/chat_views.py:153 +msgid "Get the conversation list" +msgstr "獲取對話列表" + +#: community/apps/application/views/chat_views.py:172 +#: community/apps/application/views/chat_views.py:173 +msgid "Delete a conversation" +msgstr "刪除對話" + +#: community/apps/application/views/chat_views.py:192 +#: community/apps/application/views/chat_views.py:193 +msgid "Get client conversation list by paging" +msgstr "獲取客戶對話列表分頁" + +#: community/apps/application/views/chat_views.py:215 +#: community/apps/application/views/chat_views.py:216 +msgid "Client deletes conversation" +msgstr "客戶端刪除對話" + +#: community/apps/application/views/chat_views.py:232 +#: community/apps/application/views/chat_views.py:233 +msgid "Client modifies dialogue summary" +msgstr "用戶端修改對話摘要" + +#: community/apps/application/views/chat_views.py:253 +#: community/apps/application/views/chat_views.py:254 +msgid "Get the conversation list by page" +msgstr "獲取對話列表分頁" + +#: community/apps/application/views/chat_views.py:278 +#: community/apps/application/views/chat_views.py:279 +msgid "Get conversation record details" +msgstr "獲取對話記錄詳情" + +#: community/apps/application/views/chat_views.py:298 +#: community/apps/application/views/chat_views.py:299 +msgid "Get a list of conversation records" +msgstr "獲取對話記錄列表" + +#: community/apps/application/views/chat_views.py:319 +#: community/apps/application/views/chat_views.py:320 +msgid "Get the conversation history list by page" +msgstr "獲取對話歷史列表分頁" + +#: community/apps/application/views/chat_views.py:342 +#: community/apps/application/views/chat_views.py:343 +msgid "Like, Dislike" +msgstr "點贊,點踩" + +#: community/apps/application/views/chat_views.py:365 +#: community/apps/application/views/chat_views.py:366 +msgid "Get the list of marked paragraphs" +msgstr "獲取標記段落列表" + +#: community/apps/application/views/chat_views.py:369 +#: community/apps/application/views/chat_views.py:390 +#: community/apps/application/views/chat_views.py:442 +msgid "Application/Conversation Log/Annotation" +msgstr "應用/對話日誌/標註" + +#: community/apps/application/views/chat_views.py:412 +#: community/apps/application/views/chat_views.py:413 +msgid "Add to Knowledge Base" +msgstr "添加到知識庫" + +#: community/apps/application/views/chat_views.py:416 +msgid "Application/Conversation Log/Add to Knowledge Base" +msgstr "應用/對話日誌/添加到知識庫" + +#: community/apps/application/views/chat_views.py:438 +#: community/apps/application/views/chat_views.py:439 +msgid "Delete a Annotation" +msgstr "刪除標註" + +#: community/apps/application/views/chat_views.py:487 +#: community/apps/dataset/views/file.py:28 +#: community/apps/dataset/views/file.py:29 +#: community/apps/dataset/views/file.py:34 +msgid "Upload file" +msgstr "上傳文件" + +#: community/apps/common/auth/authenticate.py:62 +#: community/apps/common/auth/authenticate.py:83 +msgid "Not logged in, please log in first" +msgstr "未登錄,請先登錄" + +#: community/apps/common/auth/authenticate.py:68 +#: community/apps/common/auth/authenticate.py:74 +#: community/apps/common/auth/authenticate.py:89 +#: community/apps/common/auth/authenticate.py:95 +msgid "Authentication information is incorrect! illegal user" +msgstr "非法用戶!認證信息不正確" + +#: community/apps/common/auth/authentication.py:94 +msgid "No permission to access" +msgstr "沒有權限訪問" + +#: community/apps/common/auth/handle/impl/application_key.py:23 +#: community/apps/common/auth/handle/impl/application_key.py:25 +msgid "Secret key is invalid" +msgstr "secret key無效" + +#: community/apps/common/auth/handle/impl/public_access_token.py:48 +#: community/apps/common/auth/handle/impl/public_access_token.py:50 +#: community/apps/common/auth/handle/impl/public_access_token.py:52 +#: community/apps/common/auth/handle/impl/public_access_token.py:54 +msgid "Authentication information is incorrect" +msgstr "認證信息不正確" + +#: community/apps/common/auth/handle/impl/user_token.py:34 +msgid "Login expired" +msgstr "登錄過期" + +#: community/apps/common/constants/exception_code_constants.py:31 +msgid "The username or password is incorrect" +msgstr "用戶名或密碼錯誤" + +#: community/apps/common/constants/exception_code_constants.py:32 +msgid "Please log in first and bring the user Token" +msgstr "請先登錄並攜帶用戶Token" + +#: community/apps/common/constants/exception_code_constants.py:33 +#: community/apps/users/serializers/user_serializers.py:429 +msgid "Email sending failed" +msgstr "郵箱發送失敗" + +#: community/apps/common/constants/exception_code_constants.py:34 +msgid "Email format error" +msgstr "郵箱格式錯誤" + +#: community/apps/common/constants/exception_code_constants.py:35 +msgid "The email has been registered, please log in directly" +msgstr "郵箱已註冊,請直接登錄" + +#: community/apps/common/constants/exception_code_constants.py:36 +msgid "The email is not registered, please register first" +msgstr "郵箱未註冊,請先註冊" + +#: community/apps/common/constants/exception_code_constants.py:38 +msgid "The verification code is incorrect or the verification code has expired" +msgstr "驗證碼錯誤或驗證碼已過期" + +#: community/apps/common/constants/exception_code_constants.py:39 +msgid "The username has been registered, please log in directly" +msgstr "用戶名已註冊,請直接登錄" + +#: community/apps/common/constants/exception_code_constants.py:41 +msgid "" +"The username cannot be empty and must be between 6 and 20 characters long." +msgstr "用戶名不能爲空,且長度必須在6-20個字符之間。" + +#: community/apps/common/constants/exception_code_constants.py:43 +msgid "Password and confirmation password are inconsistent" +msgstr "密碼和確認密碼不一致" + +#: community/apps/common/constants/permission_constants.py:61 +msgid "ADMIN" +msgstr "管理員" + +#: community/apps/common/constants/permission_constants.py:61 +msgid "Admin, prefabs are not currently used" +msgstr "管理員,預製目前不會使用" + +#: community/apps/common/constants/permission_constants.py:62 +msgid "USER" +msgstr "用戶" + +#: community/apps/common/constants/permission_constants.py:62 +msgid "All user permissions" +msgstr "所有用戶權限" + +#: community/apps/common/constants/permission_constants.py:63 +msgid "chat" +msgstr "對話" + +#: community/apps/common/constants/permission_constants.py:63 +msgid "Only has application dialog interface permissions" +msgstr "只擁有應用對話接口權限" + +#: community/apps/common/constants/permission_constants.py:64 +msgid "Apply private key" +msgstr "應用私鑰" + +#: community/apps/common/event/__init__.py:30 +msgid "The download process was interrupted, please try again" +msgstr "下載過程中斷,請重試" + +#: community/apps/common/event/listener_manage.py:91 +#, python-brace-format +msgid "Query vector data: {paragraph_id_list} error {error} {traceback}" +msgstr "向量數據查詢: {paragraph_id_list} 錯誤 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:96 +#, python-brace-format +msgid "Start--->Embedding paragraph: {paragraph_id_list}" +msgstr "開始--->嵌入段落: {paragraph_id_list}" + +#: community/apps/common/event/listener_manage.py:108 +#, python-brace-format +msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}" +msgstr "向量化段落: {paragraph_id_list} 錯誤 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:114 +#, python-brace-format +msgid "End--->Embedding paragraph: {paragraph_id_list}" +msgstr "結束--->嵌入段落: {paragraph_id_list}" + +#: community/apps/common/event/listener_manage.py:123 +#, python-brace-format +msgid "Start--->Embedding paragraph: {paragraph_id}" +msgstr "開始--->嵌入段落: {paragraph_id}" + +#: community/apps/common/event/listener_manage.py:148 +#, python-brace-format +msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}" +msgstr "向量化段落: {paragraph_id} 錯誤 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:153 +#, python-brace-format +msgid "End--->Embedding paragraph: {paragraph_id}" +msgstr "結束--->嵌入段落: {paragraph_id}" + +#: community/apps/common/event/listener_manage.py:269 +#, python-brace-format +msgid "Start--->Embedding document: {document_id}" +msgstr "開始--->嵌入文檔: {document_id}" + +#: community/apps/common/event/listener_manage.py:291 +#, python-brace-format +msgid "Vectorized document: {document_id} error {error} {traceback}" +msgstr "向量化文檔: {document_id} 錯誤 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:296 +#, python-brace-format +msgid "End--->Embedding document: {document_id}" +msgstr "結束--->嵌入文檔: {document_id}" + +#: community/apps/common/event/listener_manage.py:307 +#, python-brace-format +msgid "Start--->Embedding dataset: {dataset_id}" +msgstr "開始--->嵌入知識庫: {dataset_id}" + +#: community/apps/common/event/listener_manage.py:311 +#, python-brace-format +msgid "Start--->Embedding document: {document_list}" +msgstr "開始--->嵌入文檔: {document_list}" + +#: community/apps/common/event/listener_manage.py:315 +#: community/apps/embedding/task/embedding.py:123 +#, python-brace-format +msgid "Vectorized dataset: {dataset_id} error {error} {traceback}" +msgstr "向量化知識庫: {dataset_id} 錯誤 {error} {traceback}" + +#: community/apps/common/event/listener_manage.py:318 +#, python-brace-format +msgid "End--->Embedding dataset: {dataset_id}" +msgstr "結束--->嵌入知識庫: {dataset_id}" + +#: community/apps/common/field/common.py:45 +msgid "not a function" +msgstr "不是函數" + +#: community/apps/common/forms/base_field.py:64 +#, python-brace-format +msgid "The field {field_label} is required" +msgstr "字段 {field_label} 是必填的" + +#: community/apps/common/forms/slider_field.py:56 +#, python-brace-format +msgid "The {field_label} cannot be less than {min}" +msgstr "字段 {field_label} 不能小於 {min}" + +#: community/apps/common/forms/slider_field.py:62 +#, python-brace-format +msgid "The {field_label} cannot be greater than {max}" +msgstr "{field_label} 不能大於 {max}" + +#: community/apps/common/handle/handle_exception.py:30 +msgid "Unknown exception" +msgstr "未知異常" + +#: community/apps/common/handle/impl/pdf_split_handle.py:278 +#, python-brace-format +msgid "This document has no preface and is treated as ordinary text: {e}" +msgstr "文檔沒有前言,視爲普通文本: {e}" + +#: community/apps/common/init/init_doc.py:26 +#: community/apps/common/init/init_doc.py:45 +msgid "Intelligent customer service platform" +msgstr "智能客服平臺" + +#: community/apps/common/job/clean_chat_job.py:25 +msgid "start clean chat log" +msgstr "開始清理對話日誌" + +#: community/apps/common/job/clean_chat_job.py:71 +msgid "end clean chat log" +msgstr "結束清理對話日誌" + +#: community/apps/common/job/clean_debug_file_job.py:21 +msgid "start clean debug file" +msgstr "開始清理調試文件" + +#: community/apps/common/job/clean_debug_file_job.py:25 +msgid "end clean debug file" +msgstr "結束清理調試文件" + +#: community/apps/common/job/client_access_num_job.py:25 +msgid "start reset access_num" +msgstr "開始重置訪問次數" + +#: community/apps/common/job/client_access_num_job.py:27 +msgid "end reset access_num" +msgstr "結束重置訪問次數" + +#: community/apps/common/log/log.py:37 +msgid "unknown" +msgstr "未知的" + +#: community/apps/common/response/result.py:24 +msgid "Success" +msgstr "成功" + +#: community/apps/common/response/result.py:36 +#: community/apps/common/response/result.py:80 +#: community/apps/common/response/result.py:82 +msgid "current page" +msgstr "當前頁" + +#: community/apps/common/response/result.py:42 +#: community/apps/common/response/result.py:85 +#: community/apps/common/response/result.py:87 +msgid "page size" +msgstr "每頁數量" + +#: community/apps/common/response/result.py:53 +#: community/apps/common/response/result.py:101 +#: community/apps/common/response/result.py:130 +msgid "response parameters" +msgstr "響應參數" + +#: community/apps/common/response/result.py:59 +#: community/apps/common/response/result.py:107 +#: community/apps/common/response/result.py:136 +msgid "response code" +msgstr "響應碼" + +#: community/apps/common/response/result.py:61 +#: community/apps/common/response/result.py:109 +#: community/apps/common/response/result.py:138 +msgid "success:200 fail:other" +msgstr "成功:200 失敗:其他" + +#: community/apps/common/response/result.py:64 +#: community/apps/common/response/result.py:112 +#: community/apps/common/response/result.py:141 +msgid "prompt" +msgstr "提示" + +#: community/apps/common/response/result.py:65 +#: community/apps/common/response/result.py:113 +#: community/apps/common/response/result.py:142 +msgid "success" +msgstr "成功" + +#: community/apps/common/response/result.py:66 +#: community/apps/common/response/result.py:114 +#: community/apps/common/response/result.py:143 +msgid "error prompt" +msgstr "錯誤提示" + +#: community/apps/common/response/result.py:72 +#: community/apps/common/response/result.py:74 +msgid "total number of data" +msgstr "總條數" + +#: community/apps/common/swagger_api/common_api.py:24 +#: community/apps/dataset/serializers/dataset_serializers.py:569 +msgid "query text" +msgstr "查詢文本" + +#: community/apps/common/swagger_api/common_api.py:42 +msgid "Retrieval pattern embedding|keywords|blend" +msgstr "檢索模式 embedding|keywords|blend" + +#: community/apps/common/swagger_api/common_api.py:66 +#: community/apps/common/swagger_api/common_api.py:67 +msgid "Number of clicks and dislikes" +msgstr "點踩數" + +#: community/apps/common/swagger_api/common_api.py:74 +#: community/apps/common/swagger_api/common_api.py:75 +msgid "relevance score" +msgstr "相關性得分" + +#: community/apps/common/swagger_api/common_api.py:76 +#: community/apps/common/swagger_api/common_api.py:77 +msgid "Comprehensive score, used for ranking" +msgstr "綜合得分,用於排序" + +#: community/apps/common/swagger_api/common_api.py:78 +#: community/apps/common/swagger_api/common_api.py:79 +#: community/apps/users/serializers/user_serializers.py:591 +#: community/apps/users/serializers/user_serializers.py:592 +msgid "Update time" +msgstr "更新時間" + +#: community/apps/common/swagger_api/common_api.py:81 +#: community/apps/common/swagger_api/common_api.py:82 +#: community/apps/users/serializers/user_serializers.py:589 +#: community/apps/users/serializers/user_serializers.py:590 +msgid "Create time" +msgstr "創建時間" + +#: community/apps/common/util/common.py:239 +msgid "Text-to-speech node, the text content must be of string type" +msgstr "文字轉語音節點,文字內容必須是字串類型" + +#: community/apps/common/util/common.py:241 +msgid "Text-to-speech node, the text content cannot be empty" +msgstr "文字轉語音節點,文字內容不能為空" + +#: community/apps/dataset/serializers/common_serializers.py:87 +msgid "source url" +msgstr "文檔地址" + +#: community/apps/dataset/serializers/common_serializers.py:89 +#: community/apps/dataset/serializers/dataset_serializers.py:333 +#: community/apps/dataset/serializers/dataset_serializers.py:390 +#: community/apps/dataset/serializers/dataset_serializers.py:391 +#: community/apps/dataset/serializers/document_serializers.py:155 +#: community/apps/dataset/serializers/document_serializers.py:181 +msgid "selector" +msgstr "選擇器" + +#: community/apps/dataset/serializers/common_serializers.py:96 +#: community/apps/dataset/serializers/dataset_serializers.py:341 +#, python-brace-format +msgid "URL error, cannot parse [{source_url}]" +msgstr "URL錯誤,無法解析 [{source_url}]" + +#: community/apps/dataset/serializers/common_serializers.py:105 +#: community/apps/dataset/serializers/common_serializers.py:124 +#: community/apps/dataset/serializers/common_serializers.py:125 +#: community/apps/dataset/serializers/document_serializers.py:85 +#: community/apps/dataset/swagger_api/document_api.py:23 +#: community/apps/dataset/swagger_api/document_api.py:24 +#: community/apps/dataset/swagger_api/document_api.py:49 +#: community/apps/dataset/swagger_api/document_api.py:50 +msgid "id list" +msgstr "id 列表" + +#: community/apps/dataset/serializers/common_serializers.py:115 +#, python-brace-format +msgid "The following id does not exist: {error_id_list}" +msgstr "id不存在: {error_id_list}" + +#: community/apps/dataset/serializers/common_serializers.py:183 +#: community/apps/dataset/serializers/common_serializers.py:207 +msgid "The knowledge base is inconsistent with the vector model" +msgstr "知識庫未向量模型不一致" + +#: community/apps/dataset/serializers/common_serializers.py:185 +#: community/apps/dataset/serializers/common_serializers.py:209 +msgid "Knowledge base setting error, please reset the knowledge base" +msgstr "知識庫設置錯誤,請重新設置知識庫" + +#: community/apps/dataset/serializers/dataset_serializers.py:109 +#: community/apps/dataset/serializers/dataset_serializers.py:110 +#: community/apps/setting/serializers/model_apply_serializers.py:51 +msgid "model id" +msgstr "模型 id" + +#: community/apps/dataset/serializers/dataset_serializers.py:112 +#: community/apps/dataset/serializers/dataset_serializers.py:114 +msgid "Whether to start multiple rounds of dialogue" +msgstr "是否開啓多輪對話" + +#: community/apps/dataset/serializers/dataset_serializers.py:115 +#: community/apps/dataset/serializers/dataset_serializers.py:116 +msgid "opening remarks" +msgstr "開場白" + +#: community/apps/dataset/serializers/dataset_serializers.py:118 +msgid "example" +msgstr "示例" + +#: community/apps/dataset/serializers/dataset_serializers.py:119 +msgid "User id" +msgstr "用戶 id" + +#: community/apps/dataset/serializers/dataset_serializers.py:121 +#: community/apps/dataset/serializers/dataset_serializers.py:122 +msgid "Whether to publish" +msgstr "是否發佈" + +#: community/apps/dataset/serializers/dataset_serializers.py:124 +#: community/apps/dataset/serializers/dataset_serializers.py:125 +#: community/apps/dataset/serializers/dataset_serializers.py:304 +#: community/apps/dataset/serializers/dataset_serializers.py:305 +#: community/apps/dataset/serializers/dataset_serializers.py:366 +#: community/apps/dataset/serializers/dataset_serializers.py:367 +#: community/apps/dataset/serializers/dataset_serializers.py:511 +#: community/apps/dataset/serializers/dataset_serializers.py:512 +#: community/apps/dataset/serializers/dataset_serializers.py:942 +#: community/apps/dataset/serializers/dataset_serializers.py:943 +#: community/apps/dataset/serializers/document_serializers.py:824 +#: community/apps/dataset/serializers/document_serializers.py:825 +#: community/apps/dataset/serializers/paragraph_serializers.py:200 +#: community/apps/dataset/serializers/paragraph_serializers.py:201 +#: community/apps/dataset/serializers/paragraph_serializers.py:724 +#: community/apps/dataset/serializers/paragraph_serializers.py:725 +#: community/apps/dataset/swagger_api/problem_api.py:33 +#: community/apps/dataset/swagger_api/problem_api.py:34 +#: community/apps/dataset/swagger_api/problem_api.py:135 +#: community/apps/dataset/swagger_api/problem_api.py:136 +#: community/apps/function_lib/swagger_api/function_lib_api.py:32 +#: community/apps/function_lib/swagger_api/function_lib_api.py:33 +msgid "create time" +msgstr "創建時間" + +#: community/apps/dataset/serializers/dataset_serializers.py:127 +#: community/apps/dataset/serializers/dataset_serializers.py:128 +#: community/apps/dataset/serializers/dataset_serializers.py:301 +#: community/apps/dataset/serializers/dataset_serializers.py:302 +#: community/apps/dataset/serializers/dataset_serializers.py:363 +#: community/apps/dataset/serializers/dataset_serializers.py:364 +#: community/apps/dataset/serializers/dataset_serializers.py:508 +#: community/apps/dataset/serializers/dataset_serializers.py:509 +#: community/apps/dataset/serializers/dataset_serializers.py:939 +#: community/apps/dataset/serializers/dataset_serializers.py:940 +#: community/apps/dataset/serializers/document_serializers.py:821 +#: community/apps/dataset/serializers/document_serializers.py:822 +#: community/apps/dataset/serializers/paragraph_serializers.py:197 +#: community/apps/dataset/serializers/paragraph_serializers.py:198 +#: community/apps/dataset/serializers/paragraph_serializers.py:721 +#: community/apps/dataset/serializers/paragraph_serializers.py:722 +#: community/apps/dataset/swagger_api/problem_api.py:30 +#: community/apps/dataset/swagger_api/problem_api.py:31 +#: community/apps/dataset/swagger_api/problem_api.py:132 +#: community/apps/dataset/swagger_api/problem_api.py:133 +#: community/apps/function_lib/swagger_api/function_lib_api.py:34 +#: community/apps/function_lib/swagger_api/function_lib_api.py:35 +msgid "update time" +msgstr "更新時間" + +#: community/apps/dataset/serializers/dataset_serializers.py:257 +#: community/apps/dataset/serializers/dataset_serializers.py:260 +#: community/apps/dataset/serializers/document_serializers.py:211 +#: community/apps/dataset/serializers/document_serializers.py:218 +#: community/apps/dataset/serializers/document_serializers.py:987 +#: community/apps/dataset/serializers/document_serializers.py:1016 +msgid "file list" +msgstr "文件列表" + +#: community/apps/dataset/serializers/dataset_serializers.py:269 +msgid "upload files " +msgstr "上傳文件" + +#: community/apps/dataset/serializers/dataset_serializers.py:297 +#: community/apps/dataset/serializers/dataset_serializers.py:298 +#: community/apps/dataset/serializers/dataset_serializers.py:359 +#: community/apps/dataset/serializers/dataset_serializers.py:360 +#: community/apps/dataset/serializers/dataset_serializers.py:504 +#: community/apps/dataset/serializers/dataset_serializers.py:505 +#: community/apps/dataset/serializers/dataset_serializers.py:935 +#: community/apps/dataset/serializers/dataset_serializers.py:936 +#: community/apps/dataset/serializers/document_serializers.py:814 +#: community/apps/dataset/serializers/document_serializers.py:815 +msgid "char length" +msgstr "字符長度" + +#: community/apps/dataset/serializers/dataset_serializers.py:299 +#: community/apps/dataset/serializers/dataset_serializers.py:300 +#: community/apps/dataset/serializers/dataset_serializers.py:361 +#: community/apps/dataset/serializers/dataset_serializers.py:362 +#: community/apps/dataset/serializers/dataset_serializers.py:506 +#: community/apps/dataset/serializers/dataset_serializers.py:507 +#: community/apps/dataset/serializers/dataset_serializers.py:937 +#: community/apps/dataset/serializers/dataset_serializers.py:938 +msgid "document count" +msgstr "文檔數量" + +#: community/apps/dataset/serializers/dataset_serializers.py:308 +#: community/apps/dataset/serializers/dataset_serializers.py:309 +#: community/apps/dataset/serializers/dataset_serializers.py:370 +#: community/apps/dataset/serializers/dataset_serializers.py:371 +#: community/apps/dataset/serializers/dataset_serializers.py:515 +#: community/apps/dataset/serializers/dataset_serializers.py:516 +#: community/apps/dataset/serializers/document_serializers.py:290 +#: community/apps/dataset/serializers/document_serializers.py:485 +msgid "document list" +msgstr "文檔列表" + +#: community/apps/dataset/serializers/dataset_serializers.py:327 +#: community/apps/dataset/serializers/dataset_serializers.py:388 +#: community/apps/dataset/serializers/dataset_serializers.py:389 +msgid "web source url" +msgstr "web站點url" + +#: community/apps/dataset/serializers/dataset_serializers.py:414 +#: community/apps/setting/serializers/valid_serializers.py:26 +msgid "" +"The community version supports up to 50 knowledge bases. If you need more " +"knowledge bases, please contact us (https://fit2cloud.com/)." +msgstr "" +"社區版最多支持 50 個知識庫,如需擁有更多知識庫,請聯繫我們(https://" +"fit2cloud.com/)。" + +#: community/apps/dataset/serializers/dataset_serializers.py:533 +#: community/apps/dataset/serializers/dataset_serializers.py:534 +msgid "documents" +msgstr "文檔" + +#: community/apps/dataset/serializers/dataset_serializers.py:577 +msgid "search mode" +msgstr "搜索模式" + +#: community/apps/dataset/serializers/dataset_serializers.py:582 +#: community/apps/dataset/serializers/dataset_serializers.py:618 +#: community/apps/dataset/serializers/dataset_serializers.py:706 +msgid "id does not exist" +msgstr "ID 不存在" + +#: community/apps/dataset/serializers/dataset_serializers.py:609 +msgid "sync type" +msgstr "同步類型" + +#: community/apps/dataset/serializers/dataset_serializers.py:611 +msgid "The synchronization type only supports:replace|complete" +msgstr "同步類型只支持:replace|complete" + +#: community/apps/dataset/serializers/dataset_serializers.py:620 +#: community/apps/dataset/serializers/document_serializers.py:499 +msgid "Synchronization is only supported for web site types" +msgstr "只有web站點類型才支持同步" + +#: community/apps/dataset/serializers/dataset_serializers.py:694 +msgid "" +"Synchronization type->replace: replacement synchronization, complete: " +"complete synchronization" +msgstr "同步類型->replace:替換同步,complete:完整同步" + +#: community/apps/dataset/serializers/dataset_serializers.py:803 +#: community/apps/dataset/serializers/document_serializers.py:748 +#: community/apps/setting/models_provider/tools.py:25 +msgid "No permission to use this model" +msgstr "無權限使用該模型" + +#: community/apps/dataset/serializers/dataset_serializers.py:815 +msgid "Failed to send the vectorization task, please try again later!" +msgstr "向量化任務發送失敗,請稍後再試!" + +#: community/apps/dataset/serializers/dataset_serializers.py:911 +#: community/apps/dataset/serializers/document_serializers.py:846 +msgid "meta" +msgstr "知識庫元數據" + +#: community/apps/dataset/serializers/dataset_serializers.py:913 +msgid "Knowledge base metadata->web:{source_url:xxx,selector:'xxx'},base:{}" +msgstr "知識庫元數據->web:{source_url:xxx,selector:'xxx'},base:{}" + +#: community/apps/dataset/serializers/document_serializers.py:87 +#: community/apps/dataset/serializers/document_serializers.py:100 +#: community/apps/dataset/serializers/document_serializers.py:416 +#: community/apps/dataset/swagger_api/document_api.py:37 +#: community/apps/dataset/swagger_api/document_api.py:51 +msgid "task type" +msgstr "任務類型" + +#: community/apps/dataset/serializers/document_serializers.py:95 +#: community/apps/dataset/serializers/document_serializers.py:108 +msgid "task type not support" +msgstr "任務類型不支持" + +#: community/apps/dataset/serializers/document_serializers.py:115 +#: community/apps/dataset/serializers/document_serializers.py:188 +#: community/apps/dataset/serializers/document_serializers.py:200 +#: community/apps/dataset/serializers/document_serializers.py:201 +#: community/apps/dataset/serializers/document_serializers.py:412 +#: community/apps/dataset/serializers/document_serializers.py:476 +#: community/apps/dataset/serializers/document_serializers.py:836 +#: community/apps/dataset/serializers/document_serializers.py:837 +msgid "document name" +msgstr "文檔名稱" + +#: community/apps/dataset/serializers/document_serializers.py:118 +msgid "The type only supports optimization|directly_return" +msgstr "類型只支持 optimization|directly_return" + +#: community/apps/dataset/serializers/document_serializers.py:120 +#: community/apps/dataset/serializers/document_serializers.py:414 +#: community/apps/dataset/serializers/document_serializers.py:480 +#: community/apps/dataset/serializers/document_serializers.py:840 +#: community/apps/dataset/swagger_api/document_api.py:25 +msgid "hit handling method" +msgstr "命中處理方法" + +#: community/apps/dataset/serializers/document_serializers.py:126 +#: community/apps/dataset/serializers/document_serializers.py:844 +#: community/apps/dataset/swagger_api/document_api.py:27 +msgid "directly return similarity" +msgstr "直接返回相似度" + +#: community/apps/dataset/serializers/document_serializers.py:129 +#: community/apps/dataset/serializers/document_serializers.py:415 +msgid "document is active" +msgstr "文檔是否可用" + +#: community/apps/dataset/serializers/document_serializers.py:150 +#: community/apps/dataset/serializers/document_serializers.py:152 +msgid "document url list" +msgstr "文檔 url 列表" + +#: community/apps/dataset/serializers/document_serializers.py:178 +#: community/apps/dataset/serializers/document_serializers.py:179 +msgid "source url list" +msgstr "文檔地址列表" + +#: community/apps/dataset/serializers/document_serializers.py:202 +#: community/apps/dataset/serializers/document_serializers.py:203 +msgid "paragraphs" +msgstr "段落" + +#: community/apps/dataset/serializers/document_serializers.py:227 +msgid "The template type only supports excel|csv" +msgstr "模版類型只支持 excel|csv" + +#: community/apps/dataset/serializers/document_serializers.py:237 +msgid "Export template type csv|excel" +msgstr "導出模版類型 csv|excel" + +#: community/apps/dataset/serializers/document_serializers.py:289 +#: community/apps/dataset/serializers/paragraph_serializers.py:304 +#: community/apps/dataset/serializers/paragraph_serializers.py:436 +msgid "target dataset id" +msgstr "目標知識庫 id" + +#: community/apps/dataset/serializers/document_serializers.py:391 +#: community/apps/dataset/serializers/paragraph_serializers.py:305 +#: community/apps/dataset/serializers/paragraph_serializers.py:441 +msgid "target document id" +msgstr "目標文檔 id" + +#: community/apps/dataset/serializers/document_serializers.py:399 +#: community/apps/dataset/serializers/document_serializers.py:400 +msgid "document id list" +msgstr "文檔 id 列表" + +#: community/apps/dataset/serializers/document_serializers.py:418 +msgid "order by" +msgstr "" + +#: community/apps/dataset/serializers/document_serializers.py:653 +msgid "Section title (optional)" +msgstr "分段標題(選填)" + +#: community/apps/dataset/serializers/document_serializers.py:654 +msgid "" +"Section content (required, question answer, no more than 4096 characters)" +msgstr "分段內容(必填,問題答案,最長不超過4096個字元)" + +#: community/apps/dataset/serializers/document_serializers.py:655 +msgid "Question (optional, one per line in the cell)" +msgstr "問題(選填,儲存格內一行一個)" + +#: community/apps/dataset/serializers/document_serializers.py:765 +msgid "The task is being executed, please do not send it repeatedly." +msgstr "任務正在執行中,請勿重複發送" + +#: community/apps/dataset/serializers/document_serializers.py:842 +msgid "ai optimization: optimization, direct return: directly_return" +msgstr "ai優化: optimization, 直接返回: directly_return" + +#: community/apps/dataset/serializers/document_serializers.py:848 +msgid "Document metadata->web:{source_url:xxx,selector:'xxx'},base:{}" +msgstr "文檔元數據->web:{source_url:xxx,selector:'xxx'},base:{}" + +#: community/apps/dataset/serializers/document_serializers.py:859 +msgid "dataset id not exist" +msgstr "知識庫 id 不存在" + +#: community/apps/dataset/serializers/document_serializers.py:990 +#: community/apps/dataset/serializers/document_serializers.py:1020 +msgid "limit" +msgstr "分段長度" + +#: community/apps/dataset/serializers/document_serializers.py:994 +#: community/apps/dataset/serializers/document_serializers.py:996 +msgid "patterns" +msgstr "分段標識列表" + +#: community/apps/dataset/serializers/document_serializers.py:999 +msgid "Auto Clean" +msgstr "自動清洗" + +#: community/apps/dataset/serializers/document_serializers.py:1006 +msgid "The maximum size of the uploaded file cannot exceed 100MB" +msgstr "文件上傳最大大小不能超過100MB" + +#: community/apps/dataset/serializers/document_serializers.py:1025 +msgid "Segmented regular list" +msgstr "分段正則列表" + +#: community/apps/dataset/serializers/document_serializers.py:1029 +#: community/apps/dataset/serializers/document_serializers.py:1030 +msgid "Whether to clear special characters" +msgstr "是否清除特殊字符" + +#: community/apps/dataset/serializers/document_serializers.py:1049 +msgid "space" +msgstr "空格" + +#: community/apps/dataset/serializers/document_serializers.py:1050 +msgid "semicolon" +msgstr "分號" + +#: community/apps/dataset/serializers/document_serializers.py:1050 +msgid "comma" +msgstr "逗號" + +#: community/apps/dataset/serializers/document_serializers.py:1051 +msgid "period" +msgstr "句號" + +#: community/apps/dataset/serializers/document_serializers.py:1051 +msgid "enter" +msgstr "回車" + +#: community/apps/dataset/serializers/document_serializers.py:1052 +msgid "blank line" +msgstr "空行" + +#: community/apps/dataset/serializers/document_serializers.py:1165 +msgid "Hit handling method is required" +msgstr "命中處理方式必填" + +#: community/apps/dataset/serializers/document_serializers.py:1167 +msgid "The hit processing method must be directly_return|optimization" +msgstr "命中處理方式必須是 directly_return|optimization" + +#: community/apps/dataset/serializers/document_serializers.py:1213 +#: community/apps/dataset/serializers/paragraph_serializers.py:753 +msgid "The task is being executed, please do not send it again." +msgstr "任務正在執行中,請勿重複發送" + +#: community/apps/dataset/serializers/file_serializers.py:82 +msgid "File not found" +msgstr "文件不存在" + +#: community/apps/dataset/serializers/image_serializers.py:23 +msgid "image" +msgstr "圖片" + +#: community/apps/dataset/serializers/image_serializers.py:42 +msgid "Image not found" +msgstr "圖片不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:52 +#: community/apps/dataset/serializers/paragraph_serializers.py:68 +#: community/apps/dataset/serializers/paragraph_serializers.py:69 +#: community/apps/dataset/serializers/paragraph_serializers.py:82 +#: community/apps/dataset/serializers/paragraph_serializers.py:85 +#: community/apps/dataset/serializers/paragraph_serializers.py:91 +#: community/apps/dataset/serializers/paragraph_serializers.py:93 +#: community/apps/dataset/serializers/paragraph_serializers.py:653 +msgid "section title" +msgstr "段落標題" + +#: community/apps/dataset/serializers/paragraph_serializers.py:65 +#: community/apps/dataset/serializers/paragraph_serializers.py:66 +msgid "section content" +msgstr "段落內容" + +#: community/apps/dataset/serializers/paragraph_serializers.py:73 +#: community/apps/dataset/serializers/paragraph_serializers.py:74 +#: community/apps/dataset/serializers/problem_serializers.py:88 +msgid "problem list" +msgstr "問題列表" + +#: community/apps/dataset/serializers/paragraph_serializers.py:100 +#: community/apps/dataset/serializers/paragraph_serializers.py:172 +#: community/apps/dataset/serializers/paragraph_serializers.py:214 +#: community/apps/dataset/serializers/paragraph_serializers.py:276 +#: community/apps/dataset/serializers/paragraph_serializers.py:308 +#: community/apps/dataset/serializers/paragraph_serializers.py:456 +#: community/apps/dataset/serializers/paragraph_serializers.py:563 +#: community/apps/dataset/serializers/problem_serializers.py:57 +#: community/apps/dataset/swagger_api/problem_api.py:61 +msgid "paragraph id" +msgstr "段落 id" + +#: community/apps/dataset/serializers/paragraph_serializers.py:105 +#: community/apps/dataset/serializers/paragraph_serializers.py:467 +msgid "Paragraph id does not exist" +msgstr "段落 id 不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:134 +msgid "Already associated, please do not associate again" +msgstr "已經關聯,請勿重複關聯" + +#: community/apps/dataset/serializers/paragraph_serializers.py:191 +#: community/apps/dataset/serializers/paragraph_serializers.py:192 +msgid "question content" +msgstr "問題內容" + +#: community/apps/dataset/serializers/paragraph_serializers.py:193 +#: community/apps/dataset/serializers/paragraph_serializers.py:709 +#: community/apps/dataset/swagger_api/problem_api.py:26 +msgid "hit num" +msgstr "命中數量" + +#: community/apps/dataset/serializers/paragraph_serializers.py:210 +#: community/apps/dataset/serializers/paragraph_serializers.py:281 +#: community/apps/dataset/serializers/problem_serializers.py:39 +#: community/apps/dataset/serializers/problem_serializers.py:64 +#: community/apps/dataset/serializers/problem_serializers.py:194 +#: community/apps/dataset/swagger_api/problem_api.py:101 +msgid "problem id" +msgstr "問題 id" + +#: community/apps/dataset/serializers/paragraph_serializers.py:222 +msgid "Paragraph does not exist" +msgstr "段落不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:224 +msgid "Problem does not exist" +msgstr "問題不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:306 +#: community/apps/dataset/serializers/paragraph_serializers.py:449 +#: community/apps/dataset/serializers/paragraph_serializers.py:450 +msgid "paragraph id list" +msgstr "段落 id 列表" + +#: community/apps/dataset/serializers/paragraph_serializers.py:317 +msgid "The document to be migrated is consistent with the target document" +msgstr "文檔遷移的文檔與目標文檔一致" + +#: community/apps/dataset/serializers/paragraph_serializers.py:319 +#, python-brace-format +msgid "The document id does not exist [{document_id}]" +msgstr "文檔 id 不存在 [{document_id}]" + +#: community/apps/dataset/serializers/paragraph_serializers.py:323 +#, python-brace-format +msgid "The target document id does not exist [{document_id}]" +msgstr "目標文檔 id 不存在 [{document_id}]" + +#: community/apps/dataset/serializers/paragraph_serializers.py:503 +msgid "Problem id does not exist" +msgstr "問題 id 不存在" + +#: community/apps/dataset/serializers/paragraph_serializers.py:713 +#: community/apps/dataset/serializers/paragraph_serializers.py:714 +msgid "Number of dislikes" +msgstr "點踩數量" + +#: community/apps/dataset/serializers/problem_serializers.py:50 +msgid "Issue ID is passed when modifying, not when creating." +msgstr "問題 ID 在修改時傳遞,創建時不傳遞" + +#: community/apps/dataset/serializers/problem_serializers.py:62 +#: community/apps/dataset/swagger_api/problem_api.py:51 +#: community/apps/dataset/swagger_api/problem_api.py:52 +#: community/apps/dataset/swagger_api/problem_api.py:83 +#: community/apps/dataset/swagger_api/problem_api.py:84 +msgid "problem id list" +msgstr "問題 id 列表" + +#: community/apps/dataset/swagger_api/document_api.py:38 +#: community/apps/dataset/swagger_api/document_api.py:52 +msgid "1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents" +msgstr "1|2|3 1:向量化|2:生成問題|3:同步文檔" + +#: community/apps/dataset/swagger_api/document_api.py:64 +#: community/apps/dataset/swagger_api/document_api.py:65 +msgid "state list" +msgstr "狀態列表" + +#: community/apps/dataset/swagger_api/image_api.py:22 +msgid "image file" +msgstr "圖片文件" + +#: community/apps/dataset/swagger_api/problem_api.py:54 +#: community/apps/dataset/swagger_api/problem_api.py:55 +msgid "Associated paragraph information list" +msgstr "關聯段落信息列表" + +#: community/apps/dataset/swagger_api/problem_api.py:131 +msgid "Hit num" +msgstr "命中數量" + +#: community/apps/dataset/task/generate.py:95 +#, python-brace-format +msgid "" +"Generate issue based on document: {document_id} error {error}{traceback}" +msgstr "生成問題基於文檔: {document_id} 錯誤 {error}{traceback}" + +#: community/apps/dataset/task/generate.py:99 +#, python-brace-format +msgid "End--->Generate problem: {document_id}" +msgstr "結束--->生成問題: {document_id}" + +#: community/apps/dataset/task/sync.py:29 +#: community/apps/dataset/task/sync.py:43 +#, python-brace-format +msgid "Start--->Start synchronization web knowledge base:{dataset_id}" +msgstr "開始--->開始同步web知識庫:{dataset_id}" + +#: community/apps/dataset/task/sync.py:34 +#: community/apps/dataset/task/sync.py:47 +#, python-brace-format +msgid "End--->End synchronization web knowledge base:{dataset_id}" +msgstr "結束--->結束同步web知識庫:{dataset_id}" + +#: community/apps/dataset/task/sync.py:36 +#: community/apps/dataset/task/sync.py:49 +#, python-brace-format +msgid "Synchronize web knowledge base:{dataset_id} error{error}{traceback}" +msgstr "同步web知識庫:{dataset_id} 錯誤{error}{traceback}" + +#: community/apps/dataset/task/tools.py:114 +#, python-brace-format +msgid "Association problem failed {error}" +msgstr "關聯問題失敗 {error}" + +#: community/apps/dataset/views/dataset.py:35 +#: community/apps/dataset/views/dataset.py:36 +msgid "Synchronize the knowledge base of the website" +msgstr "同步Web網站知識庫" + +#: community/apps/dataset/views/dataset.py:57 +#: community/apps/dataset/views/dataset.py:58 +msgid "Create QA knowledge base" +msgstr "創建QA知識庫" + +#: community/apps/dataset/views/dataset.py:77 +#: community/apps/dataset/views/dataset.py:78 +msgid "Create a web site knowledge base" +msgstr "創建web站點知識庫" + +#: community/apps/dataset/views/dataset.py:93 +#: community/apps/dataset/views/dataset.py:94 +msgid "Get a list of applications available in the knowledge base" +msgstr "獲取知識庫中可用的應用列表" + +#: community/apps/dataset/views/dataset.py:105 +#: community/apps/dataset/views/dataset.py:106 +msgid "Get a list of knowledge bases" +msgstr "獲取知識庫列表" + +#: community/apps/dataset/views/dataset.py:119 +#: community/apps/dataset/views/dataset.py:120 +msgid "Create a knowledge base" +msgstr "創建知識庫" + +#: community/apps/dataset/views/dataset.py:134 +msgid "Hit test list" +msgstr "命中測試列表" + +#: community/apps/dataset/views/dataset.py:154 +msgid "Re-vectorize" +msgstr "重新向量化" + +#: community/apps/dataset/views/dataset.py:170 +msgid "Export knowledge base" +msgstr "導出知識庫" + +#: community/apps/dataset/views/dataset.py:184 +#: community/apps/dataset/views/dataset.py:185 +msgid "Export knowledge base containing images" +msgstr "導出ZIP知識庫" + +#: community/apps/dataset/views/dataset.py:199 +msgid "Delete knowledge base" +msgstr "刪除知識庫" + +#: community/apps/dataset/views/dataset.py:213 +#: community/apps/dataset/views/dataset.py:214 +msgid "Query knowledge base details based on knowledge base id" +msgstr "根據知識庫id查詢知識庫詳情" + +#: community/apps/dataset/views/dataset.py:226 +#: community/apps/dataset/views/dataset.py:227 +msgid "Modify knowledge base information" +msgstr "修改知識庫信息" + +#: community/apps/dataset/views/dataset.py:245 +#: community/apps/dataset/views/dataset.py:246 +#: community/apps/dataset/views/document.py:463 +#: community/apps/dataset/views/document.py:464 +msgid "Get the knowledge base paginated list" +msgstr "獲取知識庫文档分頁列表" + +#: community/apps/dataset/views/document.py:31 +#: community/apps/dataset/views/document.py:32 +msgid "Get QA template" +msgstr "獲取問答模版" + +#: community/apps/dataset/views/document.py:44 +#: community/apps/dataset/views/document.py:45 +msgid "Get form template" +msgstr "獲取表單模版" + +#: community/apps/dataset/views/document.py:57 +#: community/apps/dataset/views/document.py:58 +msgid "Create Web site documents" +msgstr "創建web站點文檔" + +#: community/apps/dataset/views/document.py:77 +#: community/apps/dataset/views/document.py:78 +msgid "Import QA and create documentation" +msgstr "導入問答並創建文檔" + +#: community/apps/dataset/views/document.py:98 +#: community/apps/dataset/views/document.py:99 +msgid "Import tables and create documents" +msgstr "導入表格並創建文檔" + +#: community/apps/dataset/views/document.py:118 +#: community/apps/dataset/views/document.py:119 +msgid "Create document" +msgstr "創建文檔" + +#: community/apps/dataset/views/document.py:152 +#: community/apps/dataset/views/document.py:153 +msgid "Modify document hit processing methods in batches" +msgstr "修改文檔命中處理方式批量" + +#: community/apps/dataset/views/document.py:171 +#: community/apps/dataset/views/document.py:172 +msgid "Create documents in batches" +msgstr "批量創建文檔" + +#: community/apps/dataset/views/document.py:187 +#: community/apps/dataset/views/document.py:188 +msgid "Batch sync documents" +msgstr "批量同步文檔" + +#: community/apps/dataset/views/document.py:202 +#: community/apps/dataset/views/document.py:203 +msgid "Delete documents in batches" +msgstr "批量刪除文檔" + +#: community/apps/dataset/views/document.py:220 +#: community/apps/dataset/views/document.py:221 +msgid "Synchronize web site types" +msgstr "同步web站點類型" + +#: community/apps/dataset/views/document.py:239 +#: community/apps/dataset/views/document.py:240 +msgid "Cancel task" +msgstr "取消任務" + +#: community/apps/dataset/views/document.py:260 +#: community/apps/dataset/views/document.py:261 +msgid "Cancel tasks in batches" +msgstr "批量取消任務" + +#: community/apps/dataset/views/document.py:279 +#: community/apps/dataset/views/document.py:280 +msgid "Refresh document vector library" +msgstr "文檔向量化" + +#: community/apps/dataset/views/document.py:300 +#: community/apps/dataset/views/document.py:301 +msgid "Batch refresh document vector library" +msgstr "批量文檔向量化" + +#: community/apps/dataset/views/document.py:319 +#: community/apps/dataset/views/document.py:320 +msgid "Migrate documents in batches" +msgstr "批量遷移文檔" + +#: community/apps/dataset/views/document.py:346 +#: community/apps/dataset/views/document.py:347 +msgid "Export document" +msgstr "導出文檔" + +#: community/apps/dataset/views/document.py:361 +#: community/apps/dataset/views/document.py:362 +msgid "Export Zip document" +msgstr "導出Zip文檔" + +#: community/apps/dataset/views/document.py:376 +#: community/apps/dataset/views/document.py:377 +msgid "Get document details" +msgstr "獲取文檔詳情" + +#: community/apps/dataset/views/document.py:391 +#: community/apps/dataset/views/document.py:392 +msgid "Modify document" +msgstr "修改文檔" + +#: community/apps/dataset/views/document.py:409 +#: community/apps/dataset/views/document.py:410 +msgid "Delete document" +msgstr "刪除文檔" + +#: community/apps/dataset/views/document.py:427 +#: community/apps/dataset/views/document.py:428 +msgid "Get a list of segment IDs" +msgstr "獲取分段id列表" + +#: community/apps/dataset/views/document.py:439 +#: community/apps/dataset/views/document.py:440 +msgid "Segmented document" +msgstr "分段文檔" + +#: community/apps/dataset/views/file.py:42 +#: community/apps/dataset/views/file.py:43 +msgid "Get file" +msgstr "獲取文件" + +#: community/apps/dataset/views/image.py:28 +#: community/apps/dataset/views/image.py:29 +#: community/apps/dataset/views/image.py:34 +msgid "Upload image" +msgstr "上傳圖片" + +#: community/apps/dataset/views/image.py:35 +#: community/apps/dataset/views/image.py:44 +msgid "Image" +msgstr "圖片" + +#: community/apps/dataset/views/image.py:42 +#: community/apps/dataset/views/image.py:43 +msgid "Get Image" +msgstr "獲取圖片" + +#: community/apps/dataset/views/paragraph.py:28 +#: community/apps/dataset/views/paragraph.py:29 +msgid "Paragraph list" +msgstr "段落列表" + +#: community/apps/dataset/views/paragraph.py:32 +#: community/apps/dataset/views/paragraph.py:51 +#: community/apps/dataset/views/paragraph.py:69 +#: community/apps/dataset/views/paragraph.py:85 +#: community/apps/dataset/views/paragraph.py:103 +#: community/apps/dataset/views/paragraph.py:121 +#: community/apps/dataset/views/paragraph.py:140 +#: community/apps/dataset/views/paragraph.py:156 +#: community/apps/dataset/views/paragraph.py:172 +#: community/apps/dataset/views/paragraph.py:193 +#: community/apps/dataset/views/paragraph.py:211 +#: community/apps/dataset/views/paragraph.py:238 +msgid "Knowledge Base/Documentation/Paragraph" +msgstr "知識庫/文檔/段落" + +#: community/apps/dataset/views/paragraph.py:46 +#: community/apps/dataset/views/paragraph.py:47 +msgid "Create Paragraph" +msgstr "創建段落" + +#: community/apps/dataset/views/paragraph.py:64 +#: community/apps/dataset/views/paragraph.py:65 +msgid "Add associated questions" +msgstr "添加關聯問題" + +#: community/apps/dataset/views/paragraph.py:80 +#: community/apps/dataset/views/paragraph.py:81 +msgid "Get a list of paragraph questions" +msgstr "獲取段落問題列表" + +#: community/apps/dataset/views/paragraph.py:99 +#: community/apps/dataset/views/paragraph.py:100 +msgid "Disassociation issue" +msgstr "取消關聯問題" + +#: community/apps/dataset/views/paragraph.py:117 +#: community/apps/dataset/views/paragraph.py:118 +msgid "Related questions" +msgstr "關聯問題" + +#: community/apps/dataset/views/paragraph.py:135 +#: community/apps/dataset/views/paragraph.py:136 +msgid "Modify paragraph data" +msgstr "修改段落數據" + +#: community/apps/dataset/views/paragraph.py:152 +#: community/apps/dataset/views/paragraph.py:153 +msgid "Get paragraph details" +msgstr "獲取段落詳情" + +#: community/apps/dataset/views/paragraph.py:168 +#: community/apps/dataset/views/paragraph.py:169 +msgid "Delete paragraph" +msgstr "刪除段落" + +#: community/apps/dataset/views/paragraph.py:187 +#: community/apps/dataset/views/paragraph.py:188 +msgid "Delete paragraphs in batches" +msgstr "批量刪除段落" + +#: community/apps/dataset/views/paragraph.py:206 +#: community/apps/dataset/views/paragraph.py:207 +msgid "Migrate paragraphs in batches" +msgstr "批量遷移段落" + +#: community/apps/dataset/views/paragraph.py:233 +#: community/apps/dataset/views/paragraph.py:234 +msgid "Get paragraph list by pagination" +msgstr "獲取分頁段落列表" + +#: community/apps/dataset/views/problem.py:28 +#: community/apps/dataset/views/problem.py:29 +msgid "Question list" +msgstr "問題列表" + +#: community/apps/dataset/views/problem.py:32 +#: community/apps/dataset/views/problem.py:50 +#: community/apps/dataset/views/problem.py:68 +#: community/apps/dataset/views/problem.py:88 +#: community/apps/dataset/views/problem.py:103 +#: community/apps/dataset/views/problem.py:120 +#: community/apps/dataset/views/problem.py:136 +#: community/apps/dataset/views/problem.py:155 +msgid "Knowledge Base/Documentation/Paragraph/Question" +msgstr "知識庫/文檔/段落/問題" + +#: community/apps/dataset/views/problem.py:45 +#: community/apps/dataset/views/problem.py:46 +msgid "Create question" +msgstr "創建問題" + +#: community/apps/dataset/views/problem.py:64 +#: community/apps/dataset/views/problem.py:65 +msgid "Get a list of associated paragraphs" +msgstr "獲取關聯段落列表" + +#: community/apps/dataset/views/problem.py:82 +#: community/apps/dataset/views/problem.py:83 +msgid "Batch deletion issues" +msgstr "批量刪除問題" + +#: community/apps/dataset/views/problem.py:98 +#: community/apps/dataset/views/problem.py:99 +msgid "Batch associated paragraphs" +msgstr "批量關聯段落" + +#: community/apps/dataset/views/problem.py:116 +#: community/apps/dataset/views/problem.py:117 +msgid "Delete question" +msgstr "刪除問題" + +#: community/apps/dataset/views/problem.py:131 +#: community/apps/dataset/views/problem.py:132 +msgid "Modify question" +msgstr "修改問題" + +#: community/apps/dataset/views/problem.py:150 +#: community/apps/dataset/views/problem.py:151 +msgid "Get the list of questions by page" +msgstr "獲取分頁問題列表" + +#: community/apps/embedding/task/embedding.py:30 +#: community/apps/embedding/task/embedding.py:81 +#, python-brace-format +msgid "Failed to obtain vector model: {error} {traceback}" +msgstr "獲取向量模型失敗: {error} {traceback}" + +#: community/apps/embedding/task/embedding.py:110 +#, python-brace-format +msgid "Start--->Vectorized dataset: {dataset_id}" +msgstr "開始--->向量化知識庫: {dataset_id}" + +#: community/apps/embedding/task/embedding.py:114 +#, python-brace-format +msgid "Dataset documentation: {document_names}" +msgstr "知識庫文檔: {document_names}" + +#: community/apps/embedding/task/embedding.py:127 +#, python-brace-format +msgid "End--->Vectorized dataset: {dataset_id}" +msgstr "結束--->向量化知識庫: {dataset_id}" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:70 +#: community/apps/function_lib/serializers/function_lib_serializer.py:83 +#: community/apps/function_lib/swagger_api/function_lib_api.py:68 +#: community/apps/function_lib/swagger_api/function_lib_api.py:69 +#: community/apps/function_lib/swagger_api/function_lib_api.py:84 +#: community/apps/function_lib/swagger_api/function_lib_api.py:85 +#: community/apps/function_lib/swagger_api/function_lib_api.py:130 +#: community/apps/function_lib/swagger_api/function_lib_api.py:131 +#: community/apps/function_lib/swagger_api/function_lib_api.py:176 +#: community/apps/function_lib/swagger_api/function_lib_api.py:177 +msgid "variable name" +msgstr "變量名" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:71 +#: community/apps/function_lib/swagger_api/function_lib_api.py:88 +#: community/apps/function_lib/swagger_api/function_lib_api.py:89 +#: community/apps/function_lib/swagger_api/function_lib_api.py:134 +#: community/apps/function_lib/swagger_api/function_lib_api.py:135 +#: community/apps/function_lib/swagger_api/function_lib_api.py:180 +#: community/apps/function_lib/swagger_api/function_lib_api.py:181 +msgid "required" +msgstr "必填" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:74 +msgid "fields only support string|int|dict|array|float" +msgstr "字段只支持string|int|dict|array|float" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:85 +#: community/apps/function_lib/swagger_api/function_lib_api.py:72 +#: community/apps/function_lib/swagger_api/function_lib_api.py:73 +msgid "variable value" +msgstr "變量值" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:93 +#: community/apps/function_lib/serializers/function_lib_serializer.py:104 +#: community/apps/function_lib/serializers/function_lib_serializer.py:119 +#: community/apps/function_lib/serializers/py_lint_serializer.py:23 +#: community/apps/function_lib/swagger_api/function_lib_api.py:28 +#: community/apps/function_lib/swagger_api/function_lib_api.py:29 +#: community/apps/function_lib/swagger_api/function_lib_api.py:75 +#: community/apps/function_lib/swagger_api/function_lib_api.py:76 +#: community/apps/function_lib/swagger_api/function_lib_api.py:117 +#: community/apps/function_lib/swagger_api/function_lib_api.py:118 +#: community/apps/function_lib/swagger_api/function_lib_api.py:163 +#: community/apps/function_lib/swagger_api/function_lib_api.py:164 +#: community/apps/function_lib/swagger_api/py_lint_api.py:22 +#: community/apps/function_lib/swagger_api/py_lint_api.py:23 +msgid "function content" +msgstr "函數內容" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:98 +#: community/apps/function_lib/serializers/function_lib_serializer.py:114 +#: community/apps/function_lib/serializers/function_lib_serializer.py:135 +#: community/apps/function_lib/serializers/function_lib_serializer.py:388 +#: community/apps/function_lib/swagger_api/function_lib_api.py:24 +#: community/apps/function_lib/swagger_api/function_lib_api.py:25 +#: community/apps/function_lib/swagger_api/function_lib_api.py:46 +#: community/apps/function_lib/swagger_api/function_lib_api.py:113 +#: community/apps/function_lib/swagger_api/function_lib_api.py:114 +#: community/apps/function_lib/swagger_api/function_lib_api.py:159 +#: community/apps/function_lib/swagger_api/function_lib_api.py:160 +msgid "function name" +msgstr "函數名" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:101 +#: community/apps/function_lib/serializers/function_lib_serializer.py:117 +#: community/apps/function_lib/serializers/function_lib_serializer.py:138 +#: community/apps/function_lib/swagger_api/function_lib_api.py:26 +#: community/apps/function_lib/swagger_api/function_lib_api.py:27 +#: community/apps/function_lib/swagger_api/function_lib_api.py:51 +#: community/apps/function_lib/swagger_api/function_lib_api.py:115 +#: community/apps/function_lib/swagger_api/function_lib_api.py:116 +#: community/apps/function_lib/swagger_api/function_lib_api.py:161 +#: community/apps/function_lib/swagger_api/function_lib_api.py:162 +msgid "function description" +msgstr "函數描述" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:232 +msgid "field has no value set" +msgstr "字段沒有設置值" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:248 +#: community/apps/function_lib/serializers/function_lib_serializer.py:253 +msgid "type error" +msgstr "類型錯誤" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:256 +#, python-brace-format +msgid "Field: {name} Type: {_type} Value: {value} Type conversion error" +msgstr "字段: {name} 類型: {_type} 值: {value} 類型轉換錯誤" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:261 +msgid "function id" +msgstr "函數 id" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:267 +#: community/apps/function_lib/serializers/function_lib_serializer.py:303 +#: community/apps/function_lib/serializers/function_lib_serializer.py:366 +#: community/apps/function_lib/serializers/function_lib_serializer.py:396 +msgid "Function does not exist" +msgstr "函數不存在" + +#: community/apps/function_lib/serializers/function_lib_serializer.py:357 +#: community/apps/function_lib/serializers/function_lib_serializer.py:386 +#| msgid "function" +msgid "function ID" +msgstr "函數" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:23 +#: community/apps/function_lib/swagger_api/function_lib_api.py:205 +msgid "ID" +msgstr "" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:30 +#: community/apps/function_lib/swagger_api/function_lib_api.py:31 +msgid "input field" +msgstr "輸入字段" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:62 +#: community/apps/function_lib/swagger_api/function_lib_api.py:78 +#: community/apps/function_lib/swagger_api/function_lib_api.py:124 +#: community/apps/function_lib/swagger_api/function_lib_api.py:170 +msgid "Input variable list" +msgstr "輸入變量列表" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:94 +#: community/apps/function_lib/swagger_api/function_lib_api.py:140 +#: community/apps/function_lib/swagger_api/function_lib_api.py:186 +msgid "Field type string|int|dict|array|float" +msgstr "字段類型 string|int|dict|array|float" + +#: community/apps/function_lib/swagger_api/function_lib_api.py:100 +#: community/apps/function_lib/swagger_api/function_lib_api.py:146 +#: community/apps/function_lib/swagger_api/function_lib_api.py:192 +msgid "The source only supports custom|reference" +msgstr "來源只支持custom|reference" + +#: community/apps/function_lib/views/function_lib_views.py:28 +#: community/apps/function_lib/views/function_lib_views.py:29 +msgid "Get function list" +msgstr "獲取函數列表" + +#: community/apps/function_lib/views/function_lib_views.py:30 +#: community/apps/function_lib/views/function_lib_views.py:46 +#: community/apps/function_lib/views/function_lib_views.py:59 +#: community/apps/function_lib/views/function_lib_views.py:74 +#: community/apps/function_lib/views/function_lib_views.py:85 +#: community/apps/function_lib/views/function_lib_views.py:95 +#: community/apps/function_lib/views/function_lib_views.py:111 +#: community/apps/function_lib/views/py_lint.py:29 +msgid "Function" +msgstr "函數库" + +#: community/apps/function_lib/views/function_lib_views.py:43 +#: community/apps/function_lib/views/function_lib_views.py:44 +msgid "Create function" +msgstr "創建函數" + +#: community/apps/function_lib/views/function_lib_views.py:56 +#: community/apps/function_lib/views/function_lib_views.py:57 +msgid "Debug function" +msgstr "調試函數" + +#: community/apps/function_lib/views/function_lib_views.py:71 +#: community/apps/function_lib/views/function_lib_views.py:72 +msgid "Update function" +msgstr "更新函數" + +#: community/apps/function_lib/views/function_lib_views.py:83 +#: community/apps/function_lib/views/function_lib_views.py:84 +msgid "Delete function" +msgstr "刪除函數" + +#: community/apps/function_lib/views/function_lib_views.py:93 +#: community/apps/function_lib/views/function_lib_views.py:94 +msgid "Get function details" +msgstr "獲取函數詳情" + +#: community/apps/function_lib/views/function_lib_views.py:106 +#: community/apps/function_lib/views/function_lib_views.py:107 +msgid "Get function list by pagination" +msgstr "獲取分頁函數列表" + +#: community/apps/function_lib/views/function_lib_views.py:129 +#| msgid "not a function" +msgid "Import function" +msgstr "导入函數" + +#: community/apps/function_lib/views/function_lib_views.py:143 +#| msgid "not a function" +msgid "Export function" +msgstr "导出函數" + +#: community/apps/function_lib/views/py_lint.py:26 +#: community/apps/function_lib/views/py_lint.py:27 +msgid "Check code" +msgstr "檢查代碼" + +#: community/apps/setting/models_provider/base_model_provider.py:66 +msgid "Model type cannot be empty" +msgstr "模型類型不能爲空" + +#: community/apps/setting/models_provider/base_model_provider.py:91 +msgid "The current platform does not support downloading models" +msgstr "當前平臺不支持下載模型" + +#: community/apps/setting/models_provider/base_model_provider.py:146 +msgid "LLM" +msgstr "大語言模型" + +#: community/apps/setting/models_provider/base_model_provider.py:147 +msgid "Embedding Model" +msgstr "向量模型" + +#: community/apps/setting/models_provider/base_model_provider.py:148 +msgid "Speech2Text" +msgstr "語音識別" + +#: community/apps/setting/models_provider/base_model_provider.py:149 +msgid "TTS" +msgstr "語音合成" + +#: community/apps/setting/models_provider/base_model_provider.py:150 +msgid "Vision Model" +msgstr "圖片理解" + +#: community/apps/setting/models_provider/base_model_provider.py:151 +msgid "Image Generation" +msgstr "圖片生成" + +#: community/apps/setting/models_provider/base_model_provider.py:152 +msgid "Rerank" +msgstr "重排模型" + +#: community/apps/setting/models_provider/base_model_provider.py:226 +msgid "The model does not support" +msgstr "模型不支持" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 +msgid "" +"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " +"Lab, developers can integrate high-quality text retrieval and sorting " +"through the LlamaIndex framework." +msgstr "" +"阿里巴巴通義實驗室開發的GTE-Rerank文本排序系列模型,開發者可以通過LlamaIndex" +"框架進行集成高質量文本檢索、排序。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 +msgid "" +"Chinese (including various dialects such as Cantonese), English, Japanese, " +"and Korean support free switching between multiple languages." +msgstr "中文(含粵語等各種方言)、英文、日語、韓語支持多個語種自由切換" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 +msgid "" +"CosyVoice is based on a new generation of large generative speech models, " +"which can predict emotions, intonation, rhythm, etc. based on context, and " +"has better anthropomorphic effects." +msgstr "" +"CosyVoice基於新一代生成式語音大模型,能根據上下文預測情緒、語調、韻律等,具有" +"更好的擬人效果" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 +msgid "" +"Universal text vector is Tongyi Lab's multi-language text unified vector " +"model based on the LLM base. It provides high-level vector services for " +"multiple mainstream languages around the world and helps developers quickly " +"convert text data into high-quality vector data." +msgstr "" +"通用文本向量,是通義實驗室基於LLM底座的多語言文本統一向量模型,面向全球多個主" +"流語種,提供高水準的向量服務,幫助開發者將文本數據快速轉換爲高質量的向量數" +"據。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40 +msgid "" +"Tongyi Wanxiang - a large image model for text generation, supports " +"bilingual input in Chinese and English, and supports the input of reference " +"pictures for reference content or reference style migration. Key styles " +"include but are not limited to watercolor, oil painting, Chinese painting, " +"sketch, flat illustration, two-dimensional, and 3D. Cartoon." +msgstr "" +"通義萬相-文本生成圖像大模型,支持中英文雙語輸入,支持輸入參考圖片進行參考內容" +"或者參考風格遷移,重點風格包括但不限於水彩、油畫、中國畫、素描、扁平插畫、二" +"次元、3D卡通。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 +msgid "Alibaba Cloud Bailian" +msgstr "阿里雲百鍊" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:28 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:40 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:68 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:55 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:45 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:23 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:58 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:41 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:39 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:44 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:27 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:31 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:44 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:22 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:61 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:40 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:68 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:19 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:78 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:53 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:46 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:27 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:29 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:24 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:47 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:19 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:39 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:25 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:21 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:59 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:39 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:41 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:40 +#, python-brace-format +msgid "{model_type} Model type is not supported" +msgstr "模型類型 {model_type} 不支持" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:32 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填項" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:52 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:55 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:43 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:37 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:54 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:56 +#: community/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py:43 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:54 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:56 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py:54 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:52 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:60 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:76 +#: community/apps/setting/models_provider/impl/xf_model_provider/model/tts.py:101 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:34 +#: community/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py:44 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:56 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py:49 +msgid "Hello" +msgstr "你好!" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:58 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:38 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:86 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:73 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:65 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:40 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:77 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:61 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:38 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:45 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:51 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:64 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:39 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:80 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:86 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:64 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:66 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:57 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:104 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:55 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:62 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:70 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:38 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:38 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:50 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:84 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:41 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:65 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:60 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:40 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:37 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:77 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:56 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:61 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:64 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:59 +#, python-brace-format +msgid "" +"Verification failed, please check whether the parameters are correct: {error}" +msgstr "驗證失敗,請檢查參數是否正確: {error}" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:12 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:20 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:14 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:22 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:41 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:14 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:22 +msgid "Temperature" +msgstr "溫度" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:13 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:21 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:18 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:15 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:42 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:16 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:16 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:23 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic" +msgstr "較高的數值會使輸出更加隨機,而較低的數值會使其更加集中和確定" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:21 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:29 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:26 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:31 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:31 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:50 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:23 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:31 +msgid "Output the maximum Tokens" +msgstr "輸出最大Token數" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:22 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:30 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:27 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:32 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:32 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:24 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:25 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:25 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:32 +msgid "Specify the maximum number of tokens that the model can generate" +msgstr "指定模型可以生成的最大 tokens" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:72 +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:60 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:32 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:50 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:28 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:63 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:46 +#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:46 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:62 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:63 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:49 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:27 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:66 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:72 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:49 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:45 +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:51 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:65 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:47 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:58 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:55 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:72 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:34 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:71 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:29 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:52 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:40 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:59 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:26 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:64 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:44 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:46 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:51 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:45 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填項" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:14 +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:15 +msgid "Image size" +msgstr "圖片尺寸" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22 +msgid "Specify the size of the generated image, such as: 1024x1024" +msgstr "指定生成圖片的尺寸, 如: 1024x1024" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:43 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:41 +msgid "Number of pictures" +msgstr "圖片數量" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34 +msgid "Specify the number of generated images" +msgstr "指定生成圖片的數量" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Style" +msgstr "風格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Specify the style of generated images" +msgstr "指定生成圖片的風格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45 +msgid "Default value, the image style is randomly output by the model" +msgstr "默認值,圖片風格由模型隨機輸出" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46 +msgid "photography" +msgstr "攝影" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47 +msgid "Portraits" +msgstr "人像寫真" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48 +msgid "3D cartoon" +msgstr "3D卡通" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49 +msgid "animation" +msgstr "動畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50 +msgid "painting" +msgstr "油畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51 +msgid "watercolor" +msgstr "水彩" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52 +msgid "sketch" +msgstr "素描" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53 +msgid "Chinese painting" +msgstr "中國畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54 +msgid "flat illustration" +msgstr "扁平插畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15 +msgid "timbre" +msgstr "音色" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +msgid "Chinese sounds can support mixed scenes of Chinese and English" +msgstr "中文音色支持中英文混合場景" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +msgid "Long Xiaochun" +msgstr "龍小淳" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21 +msgid "Long Xiaoxia" +msgstr "龍小夏" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22 +msgid "Long Xiaochen" +msgstr "龍小誠" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23 +msgid "Long Xiaobai" +msgstr "龍小白" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24 +msgid "Long laotie" +msgstr "龍老鐵" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25 +msgid "Long Shu" +msgstr "龍書" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 +msgid "Long Shuo" +msgstr "龍碩" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 +msgid "Long Jing" +msgstr "龍婧" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 +msgid "Long Miao" +msgstr "龍妙" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 +msgid "Long Yue" +msgstr "龍悅" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 +msgid "Long Yuan" +msgstr "龍媛" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 +msgid "Long Fei" +msgstr "龍飛" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 +msgid "Long Jielidou" +msgstr "龍傑力豆" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 +msgid "Long Tong" +msgstr "龍彤" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 +msgid "Long Xiang" +msgstr "龍祥" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "speaking speed" +msgstr "語速" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +msgid "[0.5,2], the default is 1, usually one decimal place is enough" +msgstr "[0.5,2],默認爲1,通常一位小數就足夠了" + +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:34 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:74 +msgid "API URL" +msgstr "" + +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:35 +#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:75 +msgid "API Key" +msgstr "" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 +msgid "" +"An update to Claude 2 that doubles the context window and improves " +"reliability, hallucination rates, and evidence-based accuracy in long " +"documents and RAG contexts." +msgstr "" +"Claude 2 的更新,採用雙倍的上下文窗口,並在長文檔和 RAG 上下文中提高可靠性、" +"幻覺率和循證準確性。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 +msgid "" +"Anthropic is a powerful model that can handle a variety of tasks, from " +"complex dialogue and creative content generation to detailed command " +"obedience." +msgstr "" +"Anthropic 功能強大的模型,可處理各種任務,從複雜的對話和創意內容生成到詳細的" +"指令服從。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 +msgid "" +"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" +"instant responsiveness. The model can answer simple queries and requests " +"quickly. Customers will be able to build seamless AI experiences that mimic " +"human interactions. Claude 3 Haiku can process images and return text " +"output, and provides 200K context windows." +msgstr "" +"Claude 3 Haiku 是 Anthropic 最快速、最緊湊的模型,具有近乎即時的響應能力。該" +"模型可以快速回答簡單的查詢和請求。客戶將能夠構建模仿人類交互的無縫人工智能體" +"驗。 Claude 3 Haiku 可以處理圖像和返回文本輸出,並且提供 200K 上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 +msgid "" +"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " +"intelligence and speed, especially when it comes to handling enterprise " +"workloads. This model offers maximum utility while being priced lower than " +"competing products, and it's been engineered to be a solid choice for " +"deploying AI at scale." +msgstr "" +"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之間取得理想的平衡,尤其是在" +"處理企業工作負載方面。該模型提供最大的效用,同時價格低於競爭產品,並且其經過" +"精心設計,是大規模部署人工智能的可靠選擇。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 +msgid "" +"The Claude 3.5 Sonnet raises the industry standard for intelligence, " +"outperforming competing models and the Claude 3 Opus in extensive " +"evaluations, with the speed and cost-effectiveness of our mid-range models." +msgstr "" +"Claude 3.5 Sonnet提高了智能的行業標準,在廣泛的評估中超越了競爭對手的型號和" +"Claude 3 Opus,具有我們中端型號的速度和成本效益。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 +msgid "" +"A faster, more affordable but still very powerful model that can handle a " +"range of tasks including casual conversation, text analysis, summarization " +"and document question answering." +msgstr "" +"一種更快速、更實惠但仍然非常強大的模型,它可以處理一系列任務,包括隨意對話、" +"文本分析、摘要和文檔問題回答。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 +msgid "" +"Titan Text Premier is the most powerful and advanced model in the Titan Text " +"series, designed to deliver exceptional performance for a variety of " +"enterprise applications. With its cutting-edge features, it delivers greater " +"accuracy and outstanding results, making it an excellent choice for " +"organizations looking for a top-notch text processing solution." +msgstr "" +"Titan Text Premier 是 Titan Text 系列中功能強大且先進的型號,旨在爲各種企業應" +"用程序提供卓越的性能。憑藉其尖端功能,它提供了更高的準確性和出色的結果,使其" +"成爲尋求一流文本處理解決方案的組織的絕佳選擇。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 +msgid "" +"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" +"tuning English-language tasks, including summarization and copywriting, " +"where customers require smaller, more cost-effective, and highly " +"customizable models." +msgstr "" +"Amazon Titan Text Lite 是一種輕量級的高效模型,非常適合英語任務的微調,包括摘" +"要和文案寫作等,在這種場景下,客戶需要更小、更經濟高效且高度可定製的模型" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 +msgid "" +"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " +"it ideal for a variety of high-level general language tasks, such as open-" +"ended text generation and conversational chat, as well as support in " +"retrieval-augmented generation (RAG). At launch, the model is optimized for " +"English, but other languages are supported." +msgstr "" +"Amazon Titan Text Express 的上下文長度長達 8000 個 tokens,因而非常適合各種高" +"級常規語言任務,例如開放式文本生成和對話式聊天,以及檢索增強生成(RAG)中的支" +"持。在發佈時,該模型針對英語進行了優化,但也支持其他語言。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 +msgid "" +"7B dense converter for rapid deployment and easy customization. Small in " +"size yet powerful in a variety of use cases. Supports English and code, as " +"well as 32k context windows." +msgstr "" +"7B 密集型轉換器,可快速部署,易於定製。體積雖小,但功能強大,適用於各種用例。" +"支持英語和代碼,以及 32k 的上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 +msgid "" +"Advanced Mistral AI large-scale language model capable of handling any " +"language task, including complex multilingual reasoning, text understanding, " +"transformation, and code generation." +msgstr "" +"先進的 Mistral AI 大型語言模型,能夠處理任何語言任務,包括複雜的多語言推理、" +"文本理解、轉換和代碼生成。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 +msgid "" +"Ideal for content creation, conversational AI, language understanding, R&D, " +"and enterprise applications" +msgstr "非常適合內容創作、會話式人工智能、語言理解、研發和企業應用" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 +msgid "" +"Ideal for limited computing power and resources, edge devices, and faster " +"training times." +msgstr "非常適合有限的計算能力和資源、邊緣設備和更快的訓練時間。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 +msgid "" +"Titan Embed Text is the largest embedding model in the Amazon Titan Embed " +"series and can handle various text embedding tasks, such as text " +"classification, text similarity calculation, etc." +msgstr "" +"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以處理各種文本" +"嵌入任務,如文本分類、文本相似度計算等。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 +#, python-brace-format +msgid "The following fields are required: {keys}" +msgstr "以下字段是必填項: {keys}" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:44 +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:64 +msgid "Verification failed, please check whether the parameters are correct" +msgstr "驗證失敗,請檢查參數是否正確" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28 +msgid "Picture quality" +msgstr "圖片質量" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17 +msgid "" +"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " +"to find one that suits your desired tone and audience. The current voiceover " +"is optimized for English." +msgstr "" +"嘗試不同的聲音(合金、回聲、寓言、縞瑪瑙、新星和閃光),找到一種適合您所需的" +"音調和聽衆的聲音。當前的語音針對英語進行了優化。" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 +msgid "Good at common conversational tasks, supports 32K contexts" +msgstr "擅長通用對話任務,支持 32K 上下文" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 +msgid "Good at handling programming tasks, supports 16K contexts" +msgstr "擅長處理編程任務,支持 16K 上下文" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 +msgid "Latest Gemini 1.0 Pro model, updated with Google update" +msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 +msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" +msgstr "最新的Gemini 1.0 Pro Vision模型,隨Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 +msgid "Latest Gemini 1.5 Flash model, updated with Google updates" +msgstr "最新的Gemini 1.5 Flash模型,隨Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53 +msgid "convert audio to text" +msgstr "將音頻轉換爲文本" + +#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:53 +#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:54 +msgid "Model catalog" +msgstr "模型目錄" + +#: community/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py:39 +msgid "local model" +msgstr "本地模型" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:43 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:48 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:35 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:43 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:24 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:44 +msgid "API domain name is invalid" +msgstr "API域名無效" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:35 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:48 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:53 +#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:40 +#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:47 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:30 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:48 +msgid "The model does not exist, please download the model first" +msgstr "模型不存在,請先下載模型" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 7B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。" +"這是 7B 預訓練模型的存儲庫。其他模型的鏈接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 13B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。" +"這是 13B 預訓練模型的存儲庫。其他模型的鏈接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 70B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。" +"這是 70B 預訓練模型的存儲庫。其他模型的鏈接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 +msgid "" +"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " +"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " +"that it has strong Chinese conversation capabilities." +msgstr "" +"由於Llama2本身的中文對齊較弱,我們採用中文指令集,對meta-llama/Llama-2-13b-" +"chat-hf進行LoRA微調,使其具備較強的中文對話能力。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 8 billion " +"parameters." +msgstr "Meta Llama 3:迄今爲止最有能力的公開產品LLM。80億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 70 billion " +"parameters." +msgstr "Meta Llama 3:迄今爲止最有能力的公開產品LLM。700億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 +msgid "" +"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 500 million parameters." +msgstr "" +"qwen 1.5 0.5b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有" +"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。5億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 +msgid "" +"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 1.8 billion parameters." +msgstr "" +"qwen 1.5 1.8b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有" +"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。18億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 +msgid "" +"Compared with previous versions, qwen 1.5 4b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"4 billion parameters." +msgstr "" +"qwen 1.5 4b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。40億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 +msgid "" +"Compared with previous versions, qwen 1.5 7b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"7 billion parameters." +msgstr "" +"qwen 1.5 7b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。70億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 +msgid "" +"Compared with previous versions, qwen 1.5 14b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"14 billion parameters." +msgstr "" +"qwen 1.5 14b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。140億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 +msgid "" +"Compared with previous versions, qwen 1.5 32b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"32 billion parameters." +msgstr "" +"qwen 1.5 32b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。320億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 +msgid "" +"Compared with previous versions, qwen 1.5 72b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"72 billion parameters." +msgstr "" +"qwen 1.5 72b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。720億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 +msgid "" +"Compared with previous versions, qwen 1.5 110b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 110 billion parameters." +msgstr "" +"qwen 1.5 110b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有" +"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。1100億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 +msgid "" +"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " +"model." +msgstr "Phi-3 Mini是Microsoft的3.8B參數,輕量級,最先進的開放模型。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 +msgid "" +"A high-performance open embedding model with a large token context window." +msgstr "一個具有大 tokens 上下文窗口的高性能開放嵌入模型。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " +"or 1792x1024 pixels." +msgstr "" +"圖像生成端點允許您根據文本提示創建原始圖像。使用 DALL·E 3 時,圖像的尺寸可以" +"爲 1024x1024、1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +msgid "" +" \n" +"By default, images are produced in standard quality, but with DALL·E 3 you " +"can set quality: \"hd\" to enhance detail. Square, standard quality images " +"are generated fastest.\n" +" " +msgstr "" +"默認情況下,圖像以標準質量生成,但使用 DALL·E 3 時,您可以設置質量:“hd”以增" +"強細節。方形、標準質量的圖像生成速度最快。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 +msgid "" +"You can use DALL·E 3 to request 1 image at a time (requesting more images by " +"issuing parallel requests), or use DALL·E 2 with the n parameter to request " +"up to 10 images at a time." +msgstr "" +"您可以使用 DALL·E 3 一次請求 1 個圖像(通過發出並行請求來請求更多圖像),或者" +"使用帶有 n 參數的 DALL·E 2 一次最多請求 10 個圖像。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111 +msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-3.5-turbo,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38 +msgid "Latest gpt-4, updated with OpenAI adjustments" +msgstr "最新的gpt-4,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99 +msgid "" +"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " +"adjustments" +msgstr "最新的GPT-4o,比gpt-4-turbo更便宜、更快,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102 +msgid "" +"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " +"adjustments" +msgstr "最新的gpt-4o-mini,比gpt-4o更便宜、更快,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46 +msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49 +msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo-preview,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53 +msgid "" +"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " +"tokens" +msgstr "2024年1月25日的gpt-3.5-turbo快照,支持上下文長度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57 +msgid "" +"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " +"tokens" +msgstr "2023年11月6日的gpt-3.5-turbo快照,支持上下文長度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61 +msgid "" +"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " +"13, 2024" +msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照,將於2024年6月13日棄用" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65 +msgid "" +"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" +msgstr "2024年5月13日的gpt-4o快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69 +msgid "" +"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年4月9日的gpt-4-turbo快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72 +msgid "" +"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年1月25日的gpt-4-turbo快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75 +msgid "" +"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " +"tokens" +msgstr "2023年11月6日的gpt-4-turbo快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63 +msgid "Tongyi Qianwen" +msgstr "通義千問" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:46 +msgid "Please provide server URL" +msgstr "請提供服務器URL" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:49 +msgid "Please provide the model" +msgstr "請提供模型" + +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:52 +msgid "Please provide the API Key" +msgstr "請提供API金鑰" + +#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 +msgid "Tencent Cloud" +msgstr "腾訊云" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:41 +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:88 +#, python-brace-format +msgid "{keys} is required" +msgstr "{keys} 是必填項" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "painting style" +msgstr "繪畫風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "If not passed, the default value is 201 (Japanese anime style)" +msgstr "如果未傳遞,則默認值爲201(日本動漫風格)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18 +msgid "Not limited to style" +msgstr "不限於風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19 +msgid "ink painting" +msgstr "水墨畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20 +msgid "concept art" +msgstr "概念藝術" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21 +msgid "Oil painting 1" +msgstr "油畫1" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22 +msgid "Oil Painting 2 (Van Gogh)" +msgstr "油畫2(梵高)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23 +msgid "watercolor painting" +msgstr "水彩畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24 +msgid "pixel art" +msgstr "像素畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25 +msgid "impasto style" +msgstr "厚塗風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26 +msgid "illustration" +msgstr "插圖" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27 +msgid "paper cut style" +msgstr "剪紙風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28 +msgid "Impressionism 1 (Monet)" +msgstr "印象派1(莫奈)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29 +msgid "Impressionism 2" +msgstr "印象派2" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31 +msgid "classical portraiture" +msgstr "古典肖像畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32 +msgid "black and white sketch" +msgstr "黑白素描畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33 +msgid "cyberpunk" +msgstr "賽博朋克" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34 +msgid "science fiction style" +msgstr "科幻風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35 +msgid "dark style" +msgstr "暗黑風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37 +msgid "vaporwave" +msgstr "蒸汽波" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38 +msgid "Japanese animation" +msgstr "日系動漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39 +msgid "monster style" +msgstr "怪獸風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40 +msgid "Beautiful ancient style" +msgstr "唯美古風" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41 +msgid "retro anime" +msgstr "復古動漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42 +msgid "Game cartoon hand drawing" +msgstr "遊戲卡通手繪" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43 +msgid "Universal realistic style" +msgstr "通用寫實風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "Generate image resolution" +msgstr "生成圖像分辨率" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "If not transmitted, the default value is 768:768." +msgstr "不傳默認使用768:768。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 +msgid "" +"The most effective version of the current hybrid model, the trillion-level " +"parameter scale MOE-32K long article model. Reaching the absolute leading " +"level on various benchmarks, with complex instructions and reasoning, " +"complex mathematical capabilities, support for function call, and " +"application focus optimization in fields such as multi-language translation, " +"finance, law, and medical care" +msgstr "" +"當前混元模型中效果最優版本,萬億級參數規模 MOE-32K 長文模型。在各種 " +"benchmark 上達到絕對領先的水平,複雜指令和推理,具備複雜數學能力,支持 " +"functioncall,在多語言翻譯、金融法律醫療等領域應用重點優化" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 +msgid "" +"A better routing strategy is adopted to simultaneously alleviate the " +"problems of load balancing and expert convergence. For long articles, the " +"needle-in-a-haystack index reaches 99.9%" +msgstr "" +"採用更優的路由策略,同時緩解了負載均衡和專家趨同的問題。長文方面,大海撈針指" +"標達到99.9%" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 +msgid "" +"Upgraded to MOE structure, the context window is 256k, leading many open " +"source models in multiple evaluation sets such as NLP, code, mathematics, " +"industry, etc." +msgstr "" +"升級爲 MOE 結構,上下文窗口爲 256k ,在 NLP,代碼,數學,行業等多項評測集上領" +"先衆多開源模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 +msgid "" +"Hunyuan's latest version of the role-playing model, a role-playing model " +"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " +"model combined with the role-playing scene data set for additional training, " +"and has better basic effects in role-playing scenes." +msgstr "" +"混元最新版角色扮演模型,混元官方精調訓練推出的角色扮演模型,基於混元模型結合" +"角色扮演場景數據集進行增訓,在角色扮演場景具有更好的基礎效果" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 +msgid "" +"Hunyuan's latest MOE architecture FunctionCall model has been trained with " +"high-quality FunctionCall data and has a context window of 32K, leading in " +"multiple dimensions of evaluation indicators." +msgstr "" +"混元最新 MOE 架構 FunctionCall 模型,經過高質量的 FunctionCall 數據訓練,上下" +"文窗口達 32K,在多個維度的評測指標上處於領先。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 +msgid "" +"Hunyuan's latest code generation model, after training the base model with " +"200B high-quality code data, and iterating on high-quality SFT data for half " +"a year, the context long window length has been increased to 8K, and it " +"ranks among the top in the automatic evaluation indicators of code " +"generation in the five major languages; the five major languages In the " +"manual high-quality evaluation of 10 comprehensive code tasks that consider " +"all aspects, the performance is in the first echelon." +msgstr "" +"混元最新代碼生成模型,經過 200B 高質量代碼數據增訓基座模型,迭代半年高質量 " +"SFT 數據訓練,上下文長窗口長度增大到 8K,五大語言代碼生成自動評測指標上位居前" +"列;五大語言10項考量各方面綜合代碼任務人工高質量評測上,性能處於第一梯隊" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 +msgid "" +"Tencent's Hunyuan Embedding interface can convert text into high-quality " +"vector data. The vector dimension is 1024 dimensions." +msgstr "" +"騰訊混元 Embedding 接口,可以將文本轉化爲高質量的向量數據。向量維度爲1024維。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 +msgid "Mixed element visual model" +msgstr "混元視覺模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 +msgid "Hunyuan graph model" +msgstr "混元生圖模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 +msgid "Tencent Hunyuan" +msgstr "騰訊混元" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 +msgid "Facebook’s 125M parameter model" +msgstr "Facebook的125M參數模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 +msgid "BAAI’s 7B parameter model" +msgstr "BAAI的7B參數模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 +msgid "BAAI’s 13B parameter mode" +msgstr "BAAI的13B參數模型" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 +msgid "" +"If the gap between width, height and 512 is too large, the picture rendering " +"effect will be poor and the probability of excessive delay will increase " +"significantly. Recommended ratio and corresponding width and height before " +"super score: width*height" +msgstr "" +"寬、高與512差距過大,則出圖效果不佳、延遲過長概率顯著增加。超分前建議比例及對" +"應寬高:width*height" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29 +msgid "Universal female voice" +msgstr "通用女聲" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25 +msgid "Supernatural timbre-ZiZi 2.0" +msgstr "超自然音色-梓梓2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26 +msgid "Supernatural timbre-ZiZi" +msgstr "超自然音色-梓梓" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27 +msgid "Supernatural sound-Ranran 2.0" +msgstr "超自然音色-燃燃2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28 +msgid "Supernatural sound-Ranran" +msgstr "超自然音色-燃燃" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30 +msgid "Universal male voice" +msgstr "通用男聲" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +msgid "[0.2,3], the default is 1, usually one decimal place is enough" +msgstr "[0.2,3],默認爲1,通常保留一位小數即可" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 +msgid "" +"The user goes to the model inference page of Volcano Ark to create an " +"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " +"it." +msgstr "" +"用戶前往火山方舟的模型推理頁面創建推理接入點,這裏需要輸入ep-xxxxxxxxxx-yyyy" +"進行調用" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 +msgid "Universal 2.0-Vincent Diagram" +msgstr "通用2.0-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 +msgid "Universal 2.0Pro-Vincent Chart" +msgstr "通用2.0Pro-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 +msgid "Universal 1.4-Vincent Chart" +msgstr "通用1.4-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 +msgid "Animation 1.3.0-Vincent Picture" +msgstr "動漫1.3.0-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 +msgid "Animation 1.3.1-Vincent Picture" +msgstr "動漫1.3.1-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 +msgid "volcano engine" +msgstr "火山引擎" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:51 +#, python-brace-format +msgid "{model_name} The model does not support" +msgstr "{model_name} 模型不支持" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 +msgid "" +"ERNIE-Bot-4 is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot-4是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問答、" +"內容創作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 +msgid "" +"ERNIE-Bot is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問答、內" +"容創作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 +msgid "" +"ERNIE-Bot-turbo is a large language model independently developed by Baidu. " +"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " +"content creation and generation, and has a faster response speed." +msgstr "" +"ERNIE-Bot-turbo是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問" +"答、內容創作生成等能力,響應速度更快。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 +msgid "" +"BLOOMZ-7B is a well-known large language model in the industry. It was " +"developed and open sourced by BigScience and can output text in 46 languages " +"and 13 programming languages." +msgstr "" +"BLOOMZ-7B是業內知名的大語言模型,由BigScience研發並開源,能夠以46種語言和13種" +"編程語言輸出文本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 +msgid "" +"Llama-2-13b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning and knowledge application. " +"Llama-2-13b-chat is a native open source version with balanced performance " +"and effect, suitable for conversation scenarios." +msgstr "" +"Llama-2-13b-chat由Meta AI研發並開源,在編碼、推理及知識應用等場景表現優秀," +"Llama-2-13b-chat是性能與效果均衡的原生開源版本,適用於對話場景。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 +msgid "" +"Llama-2-70b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning, and knowledge application. " +"Llama-2-70b-chat is a native open source version with high-precision effects." +msgstr "" +"Llama-2-70b-chat由Meta AI研發並開源,在編碼、推理及知識應用等場景表現優秀," +"Llama-2-70b-chat是高精度效果的原生開源版本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 +msgid "" +"The Chinese enhanced version developed by the Qianfan team based on " +"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" +"EVAL." +msgstr "" +"千帆團隊在Llama-2-7b基礎上的中文增強版本,在CMMLU、C-EVAL等中文知識庫上表現優" +"異。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 +msgid "" +"Embedding-V1 is a text representation model based on Baidu Wenxin large " +"model technology. It can convert text into a vector form represented by " +"numerical values and can be used in text retrieval, information " +"recommendation, knowledge mining and other scenarios. Embedding-V1 provides " +"the Embeddings interface, which can generate corresponding vector " +"representations based on input content. You can call this interface to input " +"text into the model and obtain the corresponding vector representation for " +"subsequent text processing and analysis." +msgstr "" +"Embedding-V1是一個基於百度文心大模型技術的文本表示模型,可以將文本轉化爲用數" +"值表示的向量形式,用於文本檢索、信息推薦、知識挖掘等場景。 Embedding-V1提供了" +"Embeddings接口,可以根據輸入內容生成對應的向量表示。您可以通過調用該接口,將" +"文本輸入到模型中,獲取到對應的向量表示,從而進行後續的文本處理和分析。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 +msgid "Thousand sails large model" +msgstr "千帆大模型" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42 +msgid "Please outline this picture" +msgstr "請描述這張圖片" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15 +msgid "Speaker" +msgstr "發音人" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16 +msgid "" +"Speaker, optional value: Please go to the console to add a trial or purchase " +"speaker. After adding, the speaker parameter value will be displayed." +msgstr "" +"發音人,可選值:請到控制檯添加試用或購買發音人,添加後即顯示發音人蔘數值" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21 +msgid "iFlytek Xiaoyan" +msgstr "訊飛小燕" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22 +msgid "iFlytek Xujiu" +msgstr "訊飛許久" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23 +msgid "iFlytek Xiaoping" +msgstr "訊飛小萍" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24 +msgid "iFlytek Xiaojing" +msgstr "訊飛小婧" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25 +msgid "iFlytek Xuxiaobao" +msgstr "訊飛許小寶" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "Speech speed, optional value: [0-100], default is 50" +msgstr "語速,可選值:[0-100],默認爲50" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39 +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50 +msgid "Chinese and English recognition" +msgstr "中英文識別" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66 +msgid "iFlytek Spark" +msgstr "訊飛星火" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " +"1792x1024 pixels." +msgstr "" +"圖像生成端點允許您根據文本提示創建原始圖像。圖像的尺寸可以爲 1024x1024、" +"1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29 +msgid "" +"By default, images are generated in standard quality, you can set quality: " +"\"hd\" to enhance detail. Square, standard quality images are generated " +"fastest." +msgstr "" +"默認情況下,圖像以標準質量生成,您可以設置質量:“hd”以增強細節。方形、標準質" +"量的圖像生成速度最快。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42 +msgid "" +"You can request 1 image at a time (requesting more images by making parallel " +"requests), or up to 10 images at a time using the n parameter." +msgstr "" +"您可以一次請求 1 個圖像(通過發出並行請求來請求更多圖像),或者使用 n 參數一" +"次最多請求 10 個圖像。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20 +msgid "Chinese female" +msgstr "中文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21 +msgid "Chinese male" +msgstr "中文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22 +msgid "Japanese male" +msgstr "日語男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23 +msgid "Cantonese female" +msgstr "粵語女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24 +msgid "English female" +msgstr "英文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25 +msgid "English male" +msgstr "英文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26 +msgid "Korean female" +msgstr "韓語女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 +msgid "" +"Code Llama is a language model specifically designed for code generation." +msgstr "Code Llama 是一個專門用於代碼生成的語言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 +msgid "" +" \n" +"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " +"designed to perform specific tasks.\n" +" " +msgstr "" +"Code Llama Instruct 是 Code Llama 的指令微調版本,專爲執行特定任務而設計。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 +msgid "" +"Code Llama Python is a language model specifically designed for Python code " +"generation." +msgstr "Code Llama Python 是一個專門用於 Python 代碼生成的語言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 +msgid "" +"CodeQwen 1.5 is a language model for code generation with high performance." +msgstr "CodeQwen 1.5 是一個用於代碼生成的語言模型,具有較高的性能。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 +msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." +msgstr "CodeQwen 1.5 Chat 是一個聊天模型版本的 CodeQwen 1.5。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 +msgid "Deepseek is a large-scale language model with 13 billion parameters." +msgstr "Deepseek Chat 是一個聊天模型版本的 Deepseek。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16 +msgid "" +"Image size, only cogview-3-plus supports this parameter. Optional range: " +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " +"default is 1024x1024." +msgstr "" +"圖片尺寸,僅 cogview-3-plus 支持該參數。可選範圍:" +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默認是" +"1024x1024。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 +msgid "" +"Have strong multi-modal understanding capabilities. Able to understand up to " +"five images simultaneously and supports video content understanding" +msgstr "具有強大的多模態理解能力。能夠同時理解多達五張圖像,並支持視頻內容理解" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis" +msgstr "專注於單圖理解。適用於需要高效圖像解析的場景" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis (free)" +msgstr "專注於單圖理解。適用於需要高效圖像解析的場景(免費)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 +msgid "" +"Quickly and accurately generate images based on user text descriptions. " +"Resolution supports 1024x1024" +msgstr "根據用戶文字描述快速、精準生成圖像。分辨率支持1024x1024" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes" +msgstr "根據用戶文字描述生成高質量圖像,支持多圖片尺寸" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes (free)" +msgstr "根據用戶文字描述生成高質量圖像,支持多圖片尺寸(免費)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 +msgid "zhipu AI" +msgstr "智譜 AI" + +#: community/apps/setting/serializers/model_apply_serializers.py:32 +#: community/apps/setting/serializers/model_apply_serializers.py:37 +msgid "vector text" +msgstr "向量文本" + +#: community/apps/setting/serializers/model_apply_serializers.py:33 +msgid "vector text list" +msgstr "向量文本列表" + +#: community/apps/setting/serializers/model_apply_serializers.py:41 +msgid "text" +msgstr "文本" + +#: community/apps/setting/serializers/model_apply_serializers.py:42 +msgid "metadata" +msgstr "元數據" + +#: community/apps/setting/serializers/model_apply_serializers.py:47 +msgid "query" +msgstr "查詢" + +#: community/apps/setting/serializers/provider_serializers.py:79 +#: community/apps/setting/serializers/provider_serializers.py:83 +#: community/apps/setting/serializers/provider_serializers.py:130 +#: community/apps/setting/serializers/provider_serializers.py:176 +#: community/apps/setting/serializers/provider_serializers.py:190 +#: community/apps/setting/swagger_api/provide_api.py:30 +#: community/apps/setting/swagger_api/provide_api.py:54 +#: community/apps/setting/swagger_api/provide_api.py:55 +#: community/apps/setting/swagger_api/provide_api.py:87 +#: community/apps/setting/swagger_api/provide_api.py:88 +#: community/apps/setting/swagger_api/provide_api.py:170 +msgid "model name" +msgstr "模型名稱" + +#: community/apps/setting/serializers/provider_serializers.py:81 +#: community/apps/setting/serializers/provider_serializers.py:132 +#: community/apps/setting/serializers/provider_serializers.py:142 +#: community/apps/setting/serializers/provider_serializers.py:180 +#: community/apps/setting/swagger_api/provide_api.py:26 +#: community/apps/setting/swagger_api/provide_api.py:51 +#: community/apps/setting/swagger_api/provide_api.py:52 +#: community/apps/setting/swagger_api/provide_api.py:84 +#: community/apps/setting/swagger_api/provide_api.py:85 +#: community/apps/setting/swagger_api/provide_api.py:134 +#: community/apps/setting/swagger_api/provide_api.py:165 +msgid "model type" +msgstr "模型類型" + +#: community/apps/setting/serializers/provider_serializers.py:85 +#: community/apps/setting/serializers/provider_serializers.py:178 +#: community/apps/setting/serializers/provider_serializers.py:402 +#: community/apps/setting/swagger_api/provide_api.py:35 +#: community/apps/setting/swagger_api/provide_api.py:57 +#: community/apps/setting/swagger_api/provide_api.py:58 +#: community/apps/setting/swagger_api/provide_api.py:79 +#: community/apps/setting/swagger_api/provide_api.py:80 +#: community/apps/setting/swagger_api/provide_api.py:105 +#: community/apps/setting/swagger_api/provide_api.py:129 +#: community/apps/setting/swagger_api/provide_api.py:160 +#: community/apps/setting/swagger_api/provide_api.py:179 +msgid "provider" +msgstr "供應商" + +#: community/apps/setting/serializers/provider_serializers.py:87 +#: community/apps/setting/serializers/provider_serializers.py:134 +#: community/apps/setting/serializers/provider_serializers.py:182 +msgid "permission type" +msgstr "權限類型" + +#: community/apps/setting/serializers/provider_serializers.py:89 +msgid "create user" +msgstr "創建者" + +#: community/apps/setting/serializers/provider_serializers.py:138 +#: community/apps/setting/serializers/provider_serializers.py:186 +msgid "permissions only supportPUBLIC|PRIVATE" +msgstr "權限類型只支持PUBLIC|PRIVATE" + +#: community/apps/setting/serializers/provider_serializers.py:145 +#: community/apps/setting/serializers/provider_serializers.py:196 +msgid "certification information" +msgstr "認證信息" + +#: community/apps/setting/serializers/provider_serializers.py:193 +msgid "parameter configuration" +msgstr "參數配置" + +#: community/apps/setting/serializers/provider_serializers.py:202 +#, python-brace-format +msgid "Model name【{model_name}】already exists" +msgstr "模型名稱【{model_name}】已存在" + +#: community/apps/setting/serializers/system_setting.py:29 +#: community/apps/setting/swagger_api/system_setting.py:25 +#: community/apps/setting/swagger_api/system_setting.py:26 +#: community/apps/setting/swagger_api/system_setting.py:57 +#: community/apps/setting/swagger_api/system_setting.py:58 +msgid "SMTP host" +msgstr "SMTP 主機" + +#: community/apps/setting/serializers/system_setting.py:30 +#: community/apps/setting/swagger_api/system_setting.py:28 +#: community/apps/setting/swagger_api/system_setting.py:29 +#: community/apps/setting/swagger_api/system_setting.py:60 +#: community/apps/setting/swagger_api/system_setting.py:61 +msgid "SMTP port" +msgstr "SMTP 端口" + +#: community/apps/setting/serializers/system_setting.py:31 +#: community/apps/setting/serializers/system_setting.py:35 +#: community/apps/setting/swagger_api/system_setting.py:31 +#: community/apps/setting/swagger_api/system_setting.py:32 +#: community/apps/setting/swagger_api/system_setting.py:43 +#: community/apps/setting/swagger_api/system_setting.py:44 +#: community/apps/setting/swagger_api/system_setting.py:63 +#: community/apps/setting/swagger_api/system_setting.py:64 +#: community/apps/setting/swagger_api/system_setting.py:75 +#: community/apps/setting/swagger_api/system_setting.py:76 +msgid "Sender's email" +msgstr "發件人郵箱" + +#: community/apps/setting/serializers/system_setting.py:32 +#: community/apps/setting/swagger_api/system_setting.py:34 +#: community/apps/setting/swagger_api/system_setting.py:35 +#: community/apps/setting/swagger_api/system_setting.py:66 +#: community/apps/setting/swagger_api/system_setting.py:67 +#: community/apps/users/serializers/user_serializers.py:72 +#: community/apps/users/serializers/user_serializers.py:112 +#: community/apps/users/serializers/user_serializers.py:143 +#: community/apps/users/serializers/user_serializers.py:211 +#: community/apps/users/serializers/user_serializers.py:293 +#: community/apps/users/serializers/user_serializers.py:346 +#: community/apps/users/serializers/user_serializers.py:671 +#: community/apps/users/serializers/user_serializers.py:703 +#: community/apps/users/serializers/user_serializers.py:704 +#: community/apps/users/serializers/user_serializers.py:743 +#: community/apps/users/serializers/user_serializers.py:763 +#: community/apps/users/serializers/user_serializers.py:764 +#: community/apps/users/views/user.py:109 +#: community/apps/users/views/user.py:110 +#: community/apps/users/views/user.py:111 +#: community/apps/users/views/user.py:112 +msgid "Password" +msgstr "密碼" + +#: community/apps/setting/serializers/system_setting.py:33 +#: community/apps/setting/swagger_api/system_setting.py:37 +#: community/apps/setting/swagger_api/system_setting.py:38 +#: community/apps/setting/swagger_api/system_setting.py:69 +#: community/apps/setting/swagger_api/system_setting.py:70 +msgid "Whether to enable TLS" +msgstr "是否啓用 TLS" + +#: community/apps/setting/serializers/system_setting.py:34 +#: community/apps/setting/swagger_api/system_setting.py:40 +#: community/apps/setting/swagger_api/system_setting.py:41 +#: community/apps/setting/swagger_api/system_setting.py:72 +#: community/apps/setting/swagger_api/system_setting.py:73 +msgid "Whether to enable SSL" +msgstr "是否啓用 SSL" + +#: community/apps/setting/serializers/system_setting.py:49 +msgid "Email verification failed" +msgstr "郵箱驗證失敗" + +#: community/apps/setting/serializers/team_serializers.py:43 +#: community/apps/users/serializers/user_serializers.py:70 +#: community/apps/users/serializers/user_serializers.py:111 +#: community/apps/users/serializers/user_serializers.py:136 +#: community/apps/users/serializers/user_serializers.py:209 +#: community/apps/users/serializers/user_serializers.py:470 +#: community/apps/users/serializers/user_serializers.py:493 +#: community/apps/users/serializers/user_serializers.py:518 +#: community/apps/users/serializers/user_serializers.py:519 +#: community/apps/users/serializers/user_serializers.py:581 +#: community/apps/users/serializers/user_serializers.py:627 +#: community/apps/users/serializers/user_serializers.py:628 +#: community/apps/users/serializers/user_serializers.py:663 +#: community/apps/users/serializers/user_serializers.py:700 +#: community/apps/users/serializers/user_serializers.py:701 +msgid "Username" +msgstr "用戶名" + +#: community/apps/setting/serializers/team_serializers.py:44 +#: community/apps/users/serializers/user_serializers.py:131 +#: community/apps/users/serializers/user_serializers.py:210 +#: community/apps/users/serializers/user_serializers.py:226 +#: community/apps/users/serializers/user_serializers.py:256 +#: community/apps/users/serializers/user_serializers.py:287 +#: community/apps/users/serializers/user_serializers.py:343 +#: community/apps/users/serializers/user_serializers.py:356 +#: community/apps/users/serializers/user_serializers.py:438 +#: community/apps/users/serializers/user_serializers.py:471 +#: community/apps/users/serializers/user_serializers.py:494 +#: community/apps/users/serializers/user_serializers.py:520 +#: community/apps/users/serializers/user_serializers.py:582 +#: community/apps/users/serializers/user_serializers.py:629 +#: community/apps/users/serializers/user_serializers.py:658 +#: community/apps/users/serializers/user_serializers.py:702 +#: community/apps/users/serializers/user_serializers.py:713 +#: community/apps/users/serializers/user_serializers.py:734 +msgid "Email" +msgstr "郵箱" + +#: community/apps/setting/serializers/team_serializers.py:47 +#: community/apps/setting/serializers/team_serializers.py:148 +#: community/apps/setting/serializers/team_serializers.py:256 +msgid "team id" +msgstr "團隊 id" + +#: community/apps/setting/serializers/team_serializers.py:48 +#: community/apps/setting/serializers/team_serializers.py:254 +#: community/apps/setting/serializers/team_serializers.py:324 +msgid "member id" +msgstr "成員 id" + +#: community/apps/setting/serializers/team_serializers.py:54 +msgid "use" +msgstr "使用" + +#: community/apps/setting/serializers/team_serializers.py:55 +msgid "manage" +msgstr "管理" + +#: community/apps/setting/serializers/team_serializers.py:60 +msgid "Operation permissions USE, MANAGE permissions" +msgstr "操作權限 USE, MANAGE 權限" + +#: community/apps/setting/serializers/team_serializers.py:63 +msgid "use permission" +msgstr "使用權限" + +#: community/apps/setting/serializers/team_serializers.py:64 +msgid "use permission True|False" +msgstr "使用權限 True|False" + +#: community/apps/setting/serializers/team_serializers.py:66 +msgid "manage permission" +msgstr "管理權限" + +#: community/apps/setting/serializers/team_serializers.py:67 +msgid "manage permission True|False" +msgstr "管理權限 True|False" + +#: community/apps/setting/serializers/team_serializers.py:73 +msgid "target id" +msgstr "目標 id" + +#: community/apps/setting/serializers/team_serializers.py:82 +#: community/apps/setting/serializers/team_serializers.py:83 +msgid "dataset id/application id" +msgstr "知識庫 id/應用 id" + +#: community/apps/setting/serializers/team_serializers.py:105 +msgid "Non-existent application|knowledge base id[" +msgstr "應用|知識庫 id[ 不存在" + +#: community/apps/setting/serializers/team_serializers.py:139 +#: community/apps/setting/serializers/team_serializers.py:140 +msgid "Permission data" +msgstr "權限數據" + +#: community/apps/setting/serializers/team_serializers.py:157 +#: community/apps/setting/serializers/team_serializers.py:158 +msgid "user id list" +msgstr "用戶 id 列表" + +#: community/apps/setting/serializers/team_serializers.py:168 +#: community/apps/setting/serializers/team_serializers.py:169 +msgid "Username or email" +msgstr "用戶名或郵箱" + +#: community/apps/setting/serializers/team_serializers.py:217 +msgid "Username or email is required" +msgstr "用戶名或郵箱是必填項" + +#: community/apps/setting/serializers/team_serializers.py:221 +#: community/apps/users/serializers/user_serializers.py:800 +msgid "User does not exist" +msgstr "用戶不存在" + +#: community/apps/setting/serializers/team_serializers.py:224 +msgid "The current members already exist in the team, do not add them again." +msgstr "當前成員已存在於團隊中,無需再次添加。" + +#: community/apps/setting/serializers/team_serializers.py:248 +msgid "member list" +msgstr "成員列表" + +#: community/apps/setting/serializers/team_serializers.py:263 +msgid "The member does not exist, please add a member first" +msgstr "成員不存在,請先添加成員" + +#: community/apps/setting/serializers/team_serializers.py:297 +msgid "Administrator rights do not allow modification" +msgstr "管理員權限不允許修改" + +#: community/apps/setting/serializers/team_serializers.py:311 +msgid "Unable to remove team admin" +msgstr "不支持移除團隊管理員" + +#: community/apps/setting/serializers/valid_serializers.py:32 +#: community/apps/users/serializers/user_serializers.py:190 +#: community/apps/users/serializers/user_serializers.py:777 +msgid "" +"The community version supports up to 2 users. If you need more users, please " +"contact us (https://fit2cloud.com/)." +msgstr "" +"社區版最多支持 2 個用戶,如需擁有更多用戶,請聯繫我們(https://" +"fit2cloud.com/)。" + +#: community/apps/setting/serializers/valid_serializers.py:41 +#: community/apps/setting/swagger_api/valid_api.py:27 +msgid "check quantity" +msgstr "檢查數量" + +#: community/apps/setting/swagger_api/provide_api.py:43 +#: community/apps/setting/swagger_api/provide_api.py:44 +#: community/apps/setting/swagger_api/provide_api.py:71 +#: community/apps/setting/swagger_api/provide_api.py:72 +#: community/apps/setting/swagger_api/provide_api.py:190 +#: community/apps/setting/swagger_api/provide_api.py:191 +msgid "parameters required to call the function" +msgstr "調用函數所需要的參數" + +#: community/apps/setting/swagger_api/provide_api.py:60 +#: community/apps/setting/swagger_api/provide_api.py:61 +#: community/apps/setting/swagger_api/provide_api.py:90 +#: community/apps/setting/swagger_api/provide_api.py:91 +msgid "model certificate information" +msgstr "模型認證信息" + +#: community/apps/setting/swagger_api/provide_api.py:114 +#: community/apps/setting/swagger_api/provide_api.py:115 +msgid "model type description" +msgstr "模型類型描述" + +#: community/apps/setting/swagger_api/provide_api.py:115 +#| msgid "Create model" +msgid "large language model" +msgstr "大型語言模型" + +#: community/apps/setting/swagger_api/provide_api.py:116 +#: community/apps/setting/swagger_api/provide_api.py:117 +#: community/apps/setting/swagger_api/provide_api.py:147 +#: community/apps/setting/swagger_api/provide_api.py:148 +msgid "model type value" +msgstr "模型類型值" + +#: community/apps/setting/swagger_api/provide_api.py:145 +#: community/apps/setting/swagger_api/provide_api.py:146 +msgid "model description" +msgstr "模型描述" + +#: community/apps/setting/swagger_api/provide_api.py:184 +msgid "function that needs to be executed" +msgstr "需要執行的函數" + +#: community/apps/setting/swagger_api/system_setting.py:19 +#: community/apps/setting/swagger_api/system_setting.py:20 +#: community/apps/setting/swagger_api/system_setting.py:51 +#: community/apps/setting/swagger_api/system_setting.py:52 +msgid "Email related parameters" +msgstr "郵箱相關參數" + +#: community/apps/setting/swagger_api/valid_api.py:22 +msgid "Verification type: application|dataset|user" +msgstr "認證類型:application|dataset|user" + +#: community/apps/setting/views/Team.py:27 +#: community/apps/setting/views/Team.py:28 +msgid "Get a list of team members" +msgstr "獲取團隊成員列表" + +#: community/apps/setting/views/Team.py:30 +#: community/apps/setting/views/Team.py:40 +#: community/apps/setting/views/Team.py:54 +#: community/apps/setting/views/Team.py:68 +#: community/apps/setting/views/Team.py:80 +#: community/apps/setting/views/Team.py:92 +#: community/apps/users/serializers/user_serializers.py:198 +#: community/apps/users/serializers/user_serializers.py:791 +msgid "team" +msgstr "團隊" + +#: community/apps/setting/views/Team.py:37 +#: community/apps/setting/views/Team.py:38 +msgid "Add member" +msgstr "添加成員" + +#: community/apps/setting/views/Team.py:51 +#: community/apps/setting/views/Team.py:52 +msgid "Add members in batches" +msgstr "批量添加成員" + +#: community/apps/setting/views/Team.py:65 +#: community/apps/setting/views/Team.py:66 +msgid "Get team member permissions" +msgstr "獲取團隊成員權限" + +#: community/apps/setting/views/Team.py:76 +#: community/apps/setting/views/Team.py:77 +msgid "Update team member permissions" +msgstr "更新團隊成員權限" + +#: community/apps/setting/views/Team.py:89 +#: community/apps/setting/views/Team.py:90 +msgid "Remove member" +msgstr "移除成員" + +#: community/apps/setting/views/model.py:30 +#: community/apps/setting/views/model.py:31 +msgid "Create model" +msgstr "創建模型" + +#: community/apps/setting/views/model.py:33 +#: community/apps/setting/views/model.py:45 +#: community/apps/setting/views/model.py:57 +#: community/apps/setting/views/model.py:74 +#: community/apps/setting/views/model.py:88 +#: community/apps/setting/views/model.py:103 +#: community/apps/setting/views/model.py:114 +#: community/apps/setting/views/model.py:129 +#: community/apps/setting/views/model.py:141 +#: community/apps/setting/views/model.py:151 +#: community/apps/setting/views/model.py:170 +#: community/apps/setting/views/model.py:180 +#: community/apps/setting/views/model.py:204 +#: community/apps/setting/views/model.py:219 +#: community/apps/setting/views/model.py:239 +#: community/apps/setting/views/model.py:257 +#: community/apps/setting/views/model_apply.py:26 +#: community/apps/setting/views/model_apply.py:36 +#: community/apps/setting/views/model_apply.py:46 +msgid "model" +msgstr "模型設置" + +#: community/apps/setting/views/model.py:42 +#: community/apps/setting/views/model.py:43 +msgid "Download model, trial only with Ollama platform" +msgstr "下載模型,僅支持 Ollama 平臺試用" + +#: community/apps/setting/views/model.py:54 +#: community/apps/setting/views/model.py:55 +msgid "Get model list" +msgstr "獲取模型列表" + +#: community/apps/setting/views/model.py:71 +#: community/apps/setting/views/model.py:73 +msgid "" +"Query model meta information, this interface does not carry authentication " +"information" +msgstr "查詢模型元信息,該接口不攜帶認證信息" + +#: community/apps/setting/views/model.py:86 +#: community/apps/setting/views/model.py:87 +msgid "Pause model download" +msgstr "下載模型暫停" + +#: community/apps/setting/views/model.py:111 +#: community/apps/setting/views/model.py:112 +msgid "Save model parameter form" +msgstr "保存模型參數表單" + +#: community/apps/setting/views/model.py:126 +#: community/apps/setting/views/model.py:127 +msgid "Update model" +msgstr "更新模型" + +#: community/apps/setting/views/model.py:138 +#: community/apps/setting/views/model.py:139 +msgid "Delete model" +msgstr "刪除模型" + +#: community/apps/setting/views/model.py:149 +#: community/apps/setting/views/model.py:150 +msgid "Query model details" +msgstr "查詢模型詳情" + +#: community/apps/setting/views/model.py:166 +#: community/apps/setting/views/model.py:167 +msgid "Call the supplier function to obtain form data" +msgstr "調用供應商函數,獲取表單數據" + +#: community/apps/setting/views/model.py:178 +#: community/apps/setting/views/model.py:179 +msgid "Get a list of model suppliers" +msgstr "獲取模型供應商列表" + +#: community/apps/setting/views/model.py:200 +#: community/apps/setting/views/model.py:201 +msgid "Get a list of model types" +msgstr "獲取模型類型列表" + +#: community/apps/setting/views/model.py:215 +#: community/apps/setting/views/model.py:216 +#: community/apps/setting/views/model.py:236 +#: community/apps/setting/views/model.py:254 +#: community/apps/setting/views/model.py:255 +msgid "Get the model creation form" +msgstr "獲取模型創建表單" + +#: community/apps/setting/views/model.py:235 +msgid "Get model default parameters" +msgstr "獲取模型默認參數" + +#: community/apps/setting/views/model_apply.py:23 +#: community/apps/setting/views/model_apply.py:24 +#: community/apps/setting/views/model_apply.py:33 +#: community/apps/setting/views/model_apply.py:34 +msgid "Vectorization documentation" +msgstr "向量化文檔" + +#: community/apps/setting/views/model_apply.py:43 +#: community/apps/setting/views/model_apply.py:44 +msgid "Reorder documents" +msgstr "重排序文檔" + +#: community/apps/setting/views/system_setting.py:29 +#: community/apps/setting/views/system_setting.py:30 +msgid "Create or update email settings" +msgstr "創建或更新郵箱設置" + +#: community/apps/setting/views/system_setting.py:31 +#: community/apps/setting/views/system_setting.py:45 +#: community/apps/setting/views/system_setting.py:57 +msgid "Email settings" +msgstr "郵箱設置" + +#: community/apps/setting/views/system_setting.py:41 +#: community/apps/setting/views/system_setting.py:42 +msgid "Test email settings" +msgstr "測試郵箱設置" + +#: community/apps/setting/views/system_setting.py:54 +#: community/apps/setting/views/system_setting.py:55 +msgid "Get email settings" +msgstr "獲取郵箱設置" + +#: community/apps/setting/views/valid.py:26 +#: community/apps/setting/views/valid.py:27 +msgid "Get verification results" +msgstr "獲取認證結果" + +#: community/apps/users/serializers/user_serializers.py:62 +#: community/apps/users/serializers/user_serializers.py:63 +msgid "System version number" +msgstr "系統版本號" + +#: community/apps/users/serializers/user_serializers.py:141 +#: community/apps/users/serializers/user_serializers.py:669 +msgid "Username must be 6-20 characters long" +msgstr "用戶名必須是 6-20 個字符長" + +#: community/apps/users/serializers/user_serializers.py:148 +#: community/apps/users/serializers/user_serializers.py:156 +#: community/apps/users/serializers/user_serializers.py:676 +#: community/apps/users/serializers/user_serializers.py:748 +msgid "" +"The password must be 6-20 characters long and must be a combination of " +"letters, numbers, and special characters." +msgstr "密碼必須是 6-20 個字符長,且必須是字母、數字和特殊字符的組合" + +#: community/apps/users/serializers/user_serializers.py:151 +#: community/apps/users/serializers/user_serializers.py:212 +#: community/apps/users/serializers/user_serializers.py:213 +#: community/apps/users/serializers/user_serializers.py:300 +#: community/apps/users/serializers/user_serializers.py:347 +#: community/apps/users/serializers/user_serializers.py:348 +#: community/apps/users/serializers/user_serializers.py:749 +#: community/apps/users/serializers/user_serializers.py:765 +#: community/apps/users/serializers/user_serializers.py:766 +msgid "Confirm Password" +msgstr "確認密碼" + +#: community/apps/users/serializers/user_serializers.py:158 +#: community/apps/users/serializers/user_serializers.py:214 +#: community/apps/users/serializers/user_serializers.py:215 +#: community/apps/users/serializers/user_serializers.py:229 +#: community/apps/users/serializers/user_serializers.py:257 +#: community/apps/users/serializers/user_serializers.py:258 +#: community/apps/users/serializers/user_serializers.py:291 +#: community/apps/users/serializers/user_serializers.py:344 +#: community/apps/users/serializers/user_serializers.py:345 +#: community/apps/users/views/user.py:107 +#: community/apps/users/views/user.py:108 +msgid "Verification code" +msgstr "驗證碼" + +#: community/apps/users/serializers/user_serializers.py:232 +#: community/apps/users/serializers/user_serializers.py:259 +#: community/apps/users/serializers/user_serializers.py:360 +#: community/apps/users/serializers/user_serializers.py:439 +msgid "Type" +msgstr "類型" + +#: community/apps/users/serializers/user_serializers.py:236 +#: community/apps/users/serializers/user_serializers.py:362 +msgid "The type only supports register|reset_password" +msgstr "該類型僅支持 register|reset_password" + +#: community/apps/users/serializers/user_serializers.py:266 +msgid "Is it successful" +msgstr "是否成功" + +#: community/apps/users/serializers/user_serializers.py:268 +msgid "Error message" +msgstr "錯誤信息" + +#: community/apps/users/serializers/user_serializers.py:280 +msgid "language only support:" +msgstr "語言只支持:" + +#: community/apps/users/serializers/user_serializers.py:298 +#: community/apps/users/serializers/user_serializers.py:305 +#: community/apps/users/serializers/user_serializers.py:754 +msgid "" +"The confirmation password must be 6-20 characters long and must be a " +"combination of letters, numbers, and special characters." +msgstr "確認密碼長度6-20個字符,必須字母、數字、特殊字符組合" + +#: community/apps/users/serializers/user_serializers.py:380 +#, python-brace-format +msgid "Do not send emails again within {seconds} seconds" +msgstr "{seconds} 秒內請勿重複發送郵件" + +#: community/apps/users/serializers/user_serializers.py:410 +msgid "" +"The email service has not been set up. Please contact the administrator to " +"set up the email service in [Email Settings]." +msgstr "郵箱服務未設置,請聯繫管理員在【郵箱設置】中設置郵箱服務" + +#: community/apps/users/serializers/user_serializers.py:421 +#, python-brace-format +msgid "【Intelligent knowledge base question and answer system-{action}】" +msgstr "【智能知識庫問答系統-{action}】" + +#: community/apps/users/serializers/user_serializers.py:422 +#: community/apps/users/views/user.py:194 +#: community/apps/users/views/user.py:195 +msgid "User registration" +msgstr "用戶註冊" + +#: community/apps/users/serializers/user_serializers.py:422 +#: community/apps/users/views/user.py:212 +#: community/apps/users/views/user.py:213 +#: community/apps/users/views/user.py:301 +#: community/apps/users/views/user.py:302 +msgid "Change password" +msgstr "修改密碼" + +#: community/apps/users/serializers/user_serializers.py:474 +#: community/apps/users/serializers/user_serializers.py:475 +msgid "Permissions" +msgstr "權限列表" + +#: community/apps/users/serializers/user_serializers.py:509 +#: community/apps/users/serializers/user_serializers.py:610 +#: community/apps/users/serializers/user_serializers.py:618 +msgid "Email or username" +msgstr "郵箱或用戶名" + +#: community/apps/users/serializers/user_serializers.py:560 +msgid "All" +msgstr "全部" + +#: community/apps/users/serializers/user_serializers.py:561 +msgid "Me" +msgstr "我的" + +#: community/apps/users/serializers/user_serializers.py:583 +#: community/apps/users/serializers/user_serializers.py:680 +#: community/apps/users/serializers/user_serializers.py:705 +#: community/apps/users/serializers/user_serializers.py:719 +#: community/apps/users/serializers/user_serializers.py:736 +msgid "Phone" +msgstr "手機號" + +#: community/apps/users/serializers/user_serializers.py:587 +msgid "Source" +msgstr "來源" + +#: community/apps/users/serializers/user_serializers.py:588 +#: community/apps/users/serializers/user_serializers.py:678 +#: community/apps/users/serializers/user_serializers.py:706 +#: community/apps/users/serializers/user_serializers.py:717 +#: community/apps/users/serializers/user_serializers.py:735 +msgid "Name" +msgstr "名字" + +#: community/apps/users/serializers/user_serializers.py:727 +msgid "Email is already in use" +msgstr "郵箱已被使用" + +#: community/apps/users/serializers/user_serializers.py:808 +msgid "Unable to delete administrator" +msgstr "不能刪除管理員" + +#: community/apps/users/serializers/user_serializers.py:845 +msgid "Cannot modify administrator status" +msgstr "不能修改管理員狀態" + +#: community/apps/users/views/user.py:37 community/apps/users/views/user.py:38 +msgid "Get MaxKB related information" +msgstr "獲取 MaxKB 相關信息" + +#: community/apps/users/views/user.py:40 +msgid "System parameters" +msgstr "系統參數" + +#: community/apps/users/views/user.py:50 community/apps/users/views/user.py:51 +msgid "Get current user information" +msgstr "獲取當前用戶信息" + +#: community/apps/users/views/user.py:63 community/apps/users/views/user.py:64 +msgid "Get user list" +msgstr "獲取用戶列表" + +#: community/apps/users/views/user.py:67 community/apps/users/views/user.py:90 +#: community/apps/users/views/user.py:116 +#: community/apps/users/views/user.py:136 +#: community/apps/users/views/user.py:152 +#: community/apps/users/views/user.py:178 +#: community/apps/users/views/user.py:199 +#: community/apps/users/views/user.py:217 +#: community/apps/users/views/user.py:234 +#: community/apps/users/views/user.py:249 +#: community/apps/users/views/user.py:373 +msgid "User" +msgstr "用戶" + +#: community/apps/users/views/user.py:79 community/apps/users/views/user.py:80 +msgid "Switch Language" +msgstr "切换语音" + +#: community/apps/users/views/user.py:101 +#: community/apps/users/views/user.py:102 +msgid "Modify current user password" +msgstr "修改當前用戶密碼" + +#: community/apps/users/views/user.py:125 +msgid "Failed to change password" +msgstr "修改密碼失敗" + +#: community/apps/users/views/user.py:133 +#: community/apps/users/views/user.py:134 +msgid "Send email to current user" +msgstr "給當前用戶發送郵件" + +#: community/apps/users/views/user.py:149 +#: community/apps/users/views/user.py:150 +msgid "Sign out" +msgstr "登出" + +#: community/apps/users/views/user.py:205 +msgid "Registration successful" +msgstr "註冊成功" + +#: community/apps/users/views/user.py:229 +#: community/apps/users/views/user.py:230 +msgid "Check whether the verification code is correct" +msgstr "檢查驗證碼是否正確" + +#: community/apps/users/views/user.py:244 +#: community/apps/users/views/user.py:245 +msgid "Send email" +msgstr "發送郵件" + +#: community/apps/users/views/user.py:262 +#: community/apps/users/views/user.py:263 +msgid "Add user" +msgstr "添加用戶" + +#: community/apps/users/views/user.py:266 +#: community/apps/users/views/user.py:282 +#: community/apps/users/views/user.py:306 +#: community/apps/users/views/user.py:324 +#: community/apps/users/views/user.py:338 +#: community/apps/users/views/user.py:354 +msgid "User management" +msgstr "用戶管理" + +#: community/apps/users/views/user.py:280 +#: community/apps/users/views/user.py:281 +msgid "Get user paginated list" +msgstr "獲取用戶分頁列表" + +#: community/apps/users/views/user.py:320 +#: community/apps/users/views/user.py:321 +msgid "Delete user" +msgstr "刪除用戶" + +#: community/apps/users/views/user.py:334 +#: community/apps/users/views/user.py:335 +msgid "Get user information" +msgstr "獲取用戶信息" + +#: community/apps/users/views/user.py:349 +#: community/apps/users/views/user.py:350 +msgid "Update user information" +msgstr "更新用戶信息" + +#: community/apps/users/views/user.py:369 +#: community/apps/users/views/user.py:370 +msgid "Get user list by type" +msgstr "按類型獲取用戶列表" + +#~ msgid "MaxKB table template.csv" +#~ msgstr "MaxKB表格模版.csv" + +#~ msgid "MaxKB table template.xlsx" +#~ msgstr "MaxKB表格模版.xlsx" + +msgid "Fail" +msgstr "失敗" + +msgid "Menu" +msgstr "操作菜單" + +msgid "Operate" +msgstr "操作" + +msgid "Operate user" +msgstr "操作用戶" + +msgid "Ip Address" +msgstr "IP地址" + +msgid "API Details" +msgstr "API詳情" + +msgid "Operate Time" +msgstr "操作時間" + +msgid "System Settings/API Key" +msgstr "系統 API Key" + +msgid "Appearance Settings" +msgstr "外觀設置" + +msgid "Conversation Log" +msgstr "對話日誌" + +msgid "login authentication" +msgstr "登錄驗證" + +msgid "Paragraph" +msgstr "段落" + +msgid "Batch generate related" +msgstr "分段生成问题" + +msgid "Application access" +msgstr "應用接入" + +msgid "Add internal function" +msgstr "添加內寘函數" + +msgid "Batch generate related documents" +msgstr "批量生成问题" + +msgid "No permission to use this function {name}" +msgstr "無權使用此模型{name}" + +msgid "Function {name} is unavailable" +msgstr "函數{name} 不可用" + +msgid "Field: {name} Type: {_type} Value: {value} Type error" +msgstr "欄位: {name} 類型: {_type} 值: {value} 類型錯誤" + +msgid "Field: {name} Type: {_type} Value: {value} Unsupported types" +msgstr "欄位: {name} 類型: {_type} 值: {value} 不支持的類型" + +msgid "Field: {name} No value set" +msgstr "欄位: {name} 未設定值" + +msgid "Generate related" +msgstr "生成問題" + +msgid "Obtain graphical captcha" +msgstr "獲取圖形驗證碼" + +msgid "Captcha code error or expiration" +msgstr "驗證碼錯誤或過期" + +msgid "captcha" +msgstr "驗證碼" \ No newline at end of file diff --git a/apps/ops/__init__.py b/apps/ops/__init__.py new file mode 100644 index 00000000000..a02f13af3f6 --- /dev/null +++ b/apps/ops/__init__.py @@ -0,0 +1,9 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py.py + @date:2024/8/16 14:47 + @desc: +""" +from .celery import app as celery_app diff --git a/apps/ops/celery/__init__.py b/apps/ops/celery/__init__.py new file mode 100644 index 00000000000..ece1714bc8b --- /dev/null +++ b/apps/ops/celery/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +import os + +from celery import Celery +from celery.schedules import crontab +from kombu import Exchange, Queue +from smartdoc import settings +from .heartbeat import * + +# set the default Django settings module for the 'celery' program. +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smartdoc.settings') + +app = Celery('MaxKB') + +configs = {k: v for k, v in settings.__dict__.items() if k.startswith('CELERY')} +configs['worker_concurrency'] = 5 +# Using a string here means the worker will not have to +# pickle the object when using Windows. +# app.config_from_object('django.conf:settings', namespace='CELERY') + +configs["task_queues"] = [ + Queue("celery", Exchange("celery"), routing_key="celery"), + Queue("model", Exchange("model"), routing_key="model") +] +app.namespace = 'CELERY' +app.conf.update( + {key.replace('CELERY_', '') if key.replace('CELERY_', '').lower() == key.replace('CELERY_', + '') else key: configs.get( + key) for + key + in configs.keys()}) +app.autodiscover_tasks(lambda: [app_config.split('.')[0] for app_config in settings.INSTALLED_APPS]) diff --git a/apps/ops/celery/const.py b/apps/ops/celery/const.py new file mode 100644 index 00000000000..2f887023fb3 --- /dev/null +++ b/apps/ops/celery/const.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# + +CELERY_LOG_MAGIC_MARK = b'\x00\x00\x00\x00\x00' \ No newline at end of file diff --git a/apps/ops/celery/decorator.py b/apps/ops/celery/decorator.py new file mode 100644 index 00000000000..317a7f7aefa --- /dev/null +++ b/apps/ops/celery/decorator.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# +from functools import wraps + +_need_registered_period_tasks = [] +_after_app_ready_start_tasks = [] +_after_app_shutdown_clean_periodic_tasks = [] + + +def add_register_period_task(task): + _need_registered_period_tasks.append(task) + + +def get_register_period_tasks(): + return _need_registered_period_tasks + + +def add_after_app_shutdown_clean_task(name): + _after_app_shutdown_clean_periodic_tasks.append(name) + + +def get_after_app_shutdown_clean_tasks(): + return _after_app_shutdown_clean_periodic_tasks + + +def add_after_app_ready_task(name): + _after_app_ready_start_tasks.append(name) + + +def get_after_app_ready_tasks(): + return _after_app_ready_start_tasks + + +def register_as_period_task( + crontab=None, interval=None, name=None, + args=(), kwargs=None, + description=''): + """ + Warning: Task must have not any args and kwargs + :param crontab: "* * * * *" + :param interval: 60*60*60 + :param args: () + :param kwargs: {} + :param description: " + :param name: "" + :return: + """ + if crontab is None and interval is None: + raise SyntaxError("Must set crontab or interval one") + + def decorate(func): + if crontab is None and interval is None: + raise SyntaxError("Interval and crontab must set one") + + # Because when this decorator run, the task was not created, + # So we can't use func.name + task = '{func.__module__}.{func.__name__}'.format(func=func) + _name = name if name else task + add_register_period_task({ + _name: { + 'task': task, + 'interval': interval, + 'crontab': crontab, + 'args': args, + 'kwargs': kwargs if kwargs else {}, + 'description': description + } + }) + + @wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return wrapper + + return decorate + + +def after_app_ready_start(func): + # Because when this decorator run, the task was not created, + # So we can't use func.name + name = '{func.__module__}.{func.__name__}'.format(func=func) + if name not in _after_app_ready_start_tasks: + add_after_app_ready_task(name) + + @wraps(func) + def decorate(*args, **kwargs): + return func(*args, **kwargs) + + return decorate + + +def after_app_shutdown_clean_periodic(func): + # Because when this decorator run, the task was not created, + # So we can't use func.name + name = '{func.__module__}.{func.__name__}'.format(func=func) + if name not in _after_app_shutdown_clean_periodic_tasks: + add_after_app_shutdown_clean_task(name) + + @wraps(func) + def decorate(*args, **kwargs): + return func(*args, **kwargs) + + return decorate diff --git a/apps/ops/celery/heartbeat.py b/apps/ops/celery/heartbeat.py new file mode 100644 index 00000000000..339a3c60a6c --- /dev/null +++ b/apps/ops/celery/heartbeat.py @@ -0,0 +1,25 @@ +from pathlib import Path + +from celery.signals import heartbeat_sent, worker_ready, worker_shutdown + + +@heartbeat_sent.connect +def heartbeat(sender, **kwargs): + worker_name = sender.eventer.hostname.split('@')[0] + heartbeat_path = Path('/tmp/worker_heartbeat_{}'.format(worker_name)) + heartbeat_path.touch() + + +@worker_ready.connect +def worker_ready(sender, **kwargs): + worker_name = sender.hostname.split('@')[0] + ready_path = Path('/tmp/worker_ready_{}'.format(worker_name)) + ready_path.touch() + + +@worker_shutdown.connect +def worker_shutdown(sender, **kwargs): + worker_name = sender.hostname.split('@')[0] + for signal in ['ready', 'heartbeat']: + path = Path('/tmp/worker_{}_{}'.format(signal, worker_name)) + path.unlink(missing_ok=True) diff --git a/apps/ops/celery/logger.py b/apps/ops/celery/logger.py new file mode 100644 index 00000000000..1b2843c2b85 --- /dev/null +++ b/apps/ops/celery/logger.py @@ -0,0 +1,225 @@ +from logging import StreamHandler +from threading import get_ident + +from celery import current_task +from celery.signals import task_prerun, task_postrun +from django.conf import settings +from kombu import Connection, Exchange, Queue, Producer +from kombu.mixins import ConsumerMixin + +from .utils import get_celery_task_log_path +from .const import CELERY_LOG_MAGIC_MARK + +routing_key = 'celery_log' +celery_log_exchange = Exchange('celery_log_exchange', type='direct') +celery_log_queue = [Queue('celery_log', celery_log_exchange, routing_key=routing_key)] + + +class CeleryLoggerConsumer(ConsumerMixin): + def __init__(self): + self.connection = Connection(settings.CELERY_LOG_BROKER_URL) + + def get_consumers(self, Consumer, channel): + return [Consumer(queues=celery_log_queue, + accept=['pickle', 'json'], + callbacks=[self.process_task]) + ] + + def handle_task_start(self, task_id, message): + pass + + def handle_task_end(self, task_id, message): + pass + + def handle_task_log(self, task_id, msg, message): + pass + + def process_task(self, body, message): + action = body.get('action') + task_id = body.get('task_id') + msg = body.get('msg') + if action == CeleryLoggerProducer.ACTION_TASK_LOG: + self.handle_task_log(task_id, msg, message) + elif action == CeleryLoggerProducer.ACTION_TASK_START: + self.handle_task_start(task_id, message) + elif action == CeleryLoggerProducer.ACTION_TASK_END: + self.handle_task_end(task_id, message) + + +class CeleryLoggerProducer: + ACTION_TASK_START, ACTION_TASK_LOG, ACTION_TASK_END = range(3) + + def __init__(self): + self.connection = Connection(settings.CELERY_LOG_BROKER_URL) + + @property + def producer(self): + return Producer(self.connection) + + def publish(self, payload): + self.producer.publish( + payload, serializer='json', exchange=celery_log_exchange, + declare=[celery_log_exchange], routing_key=routing_key + ) + + def log(self, task_id, msg): + payload = {'task_id': task_id, 'msg': msg, 'action': self.ACTION_TASK_LOG} + return self.publish(payload) + + def read(self): + pass + + def flush(self): + pass + + def task_end(self, task_id): + payload = {'task_id': task_id, 'action': self.ACTION_TASK_END} + return self.publish(payload) + + def task_start(self, task_id): + payload = {'task_id': task_id, 'action': self.ACTION_TASK_START} + return self.publish(payload) + + +class CeleryTaskLoggerHandler(StreamHandler): + terminator = '\r\n' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + task_prerun.connect(self.on_task_start) + task_postrun.connect(self.on_start_end) + + @staticmethod + def get_current_task_id(): + if not current_task: + return + task_id = current_task.request.root_id + return task_id + + def on_task_start(self, sender, task_id, **kwargs): + return self.handle_task_start(task_id) + + def on_start_end(self, sender, task_id, **kwargs): + return self.handle_task_end(task_id) + + def after_task_publish(self, sender, body, **kwargs): + pass + + def emit(self, record): + task_id = self.get_current_task_id() + if not task_id: + return + try: + self.write_task_log(task_id, record) + self.flush() + except Exception: + self.handleError(record) + + def write_task_log(self, task_id, msg): + pass + + def handle_task_start(self, task_id): + pass + + def handle_task_end(self, task_id): + pass + + +class CeleryThreadingLoggerHandler(CeleryTaskLoggerHandler): + @staticmethod + def get_current_thread_id(): + return str(get_ident()) + + def emit(self, record): + thread_id = self.get_current_thread_id() + try: + self.write_thread_task_log(thread_id, record) + self.flush() + except ValueError: + self.handleError(record) + + def write_thread_task_log(self, thread_id, msg): + pass + + def handle_task_start(self, task_id): + pass + + def handle_task_end(self, task_id): + pass + + def handleError(self, record) -> None: + pass + + +class CeleryTaskMQLoggerHandler(CeleryTaskLoggerHandler): + def __init__(self): + self.producer = CeleryLoggerProducer() + super().__init__(stream=None) + + def write_task_log(self, task_id, record): + msg = self.format(record) + self.producer.log(task_id, msg) + + def flush(self): + self.producer.flush() + + +class CeleryTaskFileHandler(CeleryTaskLoggerHandler): + def __init__(self, *args, **kwargs): + self.f = None + super().__init__(*args, **kwargs) + + def emit(self, record): + msg = self.format(record) + if not self.f or self.f.closed: + return + self.f.write(msg) + self.f.write(self.terminator) + self.flush() + + def flush(self): + self.f and self.f.flush() + + def handle_task_start(self, task_id): + log_path = get_celery_task_log_path(task_id) + self.f = open(log_path, 'a') + + def handle_task_end(self, task_id): + self.f and self.f.close() + + +class CeleryThreadTaskFileHandler(CeleryThreadingLoggerHandler): + def __init__(self, *args, **kwargs): + self.thread_id_fd_mapper = {} + self.task_id_thread_id_mapper = {} + super().__init__(*args, **kwargs) + + def write_thread_task_log(self, thread_id, record): + f = self.thread_id_fd_mapper.get(thread_id, None) + if not f: + raise ValueError('Not found thread task file') + msg = self.format(record) + f.write(msg.encode()) + f.write(self.terminator.encode()) + f.flush() + + def flush(self): + for f in self.thread_id_fd_mapper.values(): + f.flush() + + def handle_task_start(self, task_id): + print('handle_task_start') + log_path = get_celery_task_log_path(task_id) + thread_id = self.get_current_thread_id() + self.task_id_thread_id_mapper[task_id] = thread_id + f = open(log_path, 'ab') + self.thread_id_fd_mapper[thread_id] = f + + def handle_task_end(self, task_id): + print('handle_task_end') + ident_id = self.task_id_thread_id_mapper.get(task_id, '') + f = self.thread_id_fd_mapper.pop(ident_id, None) + if f and not f.closed: + f.write(CELERY_LOG_MAGIC_MARK) + f.close() + self.task_id_thread_id_mapper.pop(task_id, None) diff --git a/apps/ops/celery/signal_handler.py b/apps/ops/celery/signal_handler.py new file mode 100644 index 00000000000..46671a0d8fa --- /dev/null +++ b/apps/ops/celery/signal_handler.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# +import logging +import os + +from celery import subtask +from celery.signals import ( + worker_ready, worker_shutdown, after_setup_logger, task_revoked, task_prerun +) +from django.core.cache import cache +from django_celery_beat.models import PeriodicTask + +from .decorator import get_after_app_ready_tasks, get_after_app_shutdown_clean_tasks +from .logger import CeleryThreadTaskFileHandler + +logger = logging.getLogger(__file__) +safe_str = lambda x: x + + +@worker_ready.connect +def on_app_ready(sender=None, headers=None, **kwargs): + if cache.get("CELERY_APP_READY", 0) == 1: + return + cache.set("CELERY_APP_READY", 1, 10) + tasks = get_after_app_ready_tasks() + logger.debug("Work ready signal recv") + logger.debug("Start need start task: [{}]".format(", ".join(tasks))) + for task in tasks: + periodic_task = PeriodicTask.objects.filter(task=task).first() + if periodic_task and not periodic_task.enabled: + logger.debug("Periodic task [{}] is disabled!".format(task)) + continue + subtask(task).delay() + + +def delete_files(directory): + if os.path.isdir(directory): + for filename in os.listdir(directory): + file_path = os.path.join(directory, filename) + if os.path.isfile(file_path): + os.remove(file_path) + + +@worker_shutdown.connect +def after_app_shutdown_periodic_tasks(sender=None, **kwargs): + if cache.get("CELERY_APP_SHUTDOWN", 0) == 1: + return + cache.set("CELERY_APP_SHUTDOWN", 1, 10) + tasks = get_after_app_shutdown_clean_tasks() + logger.debug("Worker shutdown signal recv") + logger.debug("Clean period tasks: [{}]".format(', '.join(tasks))) + PeriodicTask.objects.filter(name__in=tasks).delete() + + +@after_setup_logger.connect +def add_celery_logger_handler(sender=None, logger=None, loglevel=None, format=None, **kwargs): + if not logger: + return + task_handler = CeleryThreadTaskFileHandler() + task_handler.setLevel(loglevel) + formatter = logging.Formatter(format) + task_handler.setFormatter(formatter) + logger.addHandler(task_handler) + + +@task_revoked.connect +def on_task_revoked(request, terminated, signum, expired, **kwargs): + print('task_revoked', terminated) + + +@task_prerun.connect +def on_taskaa_start(sender, task_id, **kwargs): + pass + # sender.update_state(state='REVOKED', +# meta={'exc_type': 'Exception', 'exc': 'Exception', 'message': '暂停任务', 'exc_message': ''}) diff --git a/apps/ops/celery/utils.py b/apps/ops/celery/utils.py new file mode 100644 index 00000000000..288089f6f2e --- /dev/null +++ b/apps/ops/celery/utils.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# +import logging +import os +import uuid + +from django.conf import settings +from django_celery_beat.models import ( + PeriodicTasks +) + +from smartdoc.const import PROJECT_DIR + +logger = logging.getLogger(__file__) + + +def disable_celery_periodic_task(task_name): + from django_celery_beat.models import PeriodicTask + PeriodicTask.objects.filter(name=task_name).update(enabled=False) + PeriodicTasks.update_changed() + + +def delete_celery_periodic_task(task_name): + from django_celery_beat.models import PeriodicTask + PeriodicTask.objects.filter(name=task_name).delete() + PeriodicTasks.update_changed() + + +def get_celery_periodic_task(task_name): + from django_celery_beat.models import PeriodicTask + task = PeriodicTask.objects.filter(name=task_name).first() + return task + + +def make_dirs(name, mode=0o755, exist_ok=False): + """ 默认权限设置为 0o755 """ + return os.makedirs(name, mode=mode, exist_ok=exist_ok) + + +def get_task_log_path(base_path, task_id, level=2): + task_id = str(task_id) + try: + uuid.UUID(task_id) + except: + return os.path.join(PROJECT_DIR, 'data', 'caution.txt') + + rel_path = os.path.join(*task_id[:level], task_id + '.log') + path = os.path.join(base_path, rel_path) + make_dirs(os.path.dirname(path), exist_ok=True) + return path + + +def get_celery_task_log_path(task_id): + return get_task_log_path(settings.CELERY_LOG_DIR, task_id) + + +def get_celery_status(): + from . import app + i = app.control.inspect() + ping_data = i.ping() or {} + active_nodes = [k for k, v in ping_data.items() if v.get('ok') == 'pong'] + active_queue_worker = set([n.split('@')[0] for n in active_nodes if n]) + # Celery Worker 数量: 2 + if len(active_queue_worker) < 2: + print("Not all celery worker worked") + return False + else: + return True diff --git a/apps/setting/migrations/0005_model_permission_type.py b/apps/setting/migrations/0005_model_permission_type.py new file mode 100644 index 00000000000..dba081a1965 --- /dev/null +++ b/apps/setting/migrations/0005_model_permission_type.py @@ -0,0 +1,46 @@ +# Generated by Django 4.2.13 on 2024-07-15 15:23 +import json + +from django.db import migrations, models +from django.db.models import QuerySet + +from common.util.rsa_util import rsa_long_encrypt +from setting.models import Status, PermissionType +from smartdoc.const import CONFIG + +default_embedding_model_id = '42f63a3d-427e-11ef-b3ec-a8a1595801ab' + + +def save_default_embedding_model(apps, schema_editor): + ModelModel = apps.get_model('setting', 'Model') + cache_folder = CONFIG.get('EMBEDDING_MODEL_PATH') + model_name = CONFIG.get('EMBEDDING_MODEL_NAME') + credential = {'cache_folder': cache_folder} + model_credential_str = json.dumps(credential) + model = ModelModel(id=default_embedding_model_id, name='maxkb-embedding', status=Status.SUCCESS, + model_type="EMBEDDING", model_name=model_name, user_id='f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', + provider='model_local_provider', + credential=rsa_long_encrypt(model_credential_str), meta={}, + permission_type=PermissionType.PUBLIC) + model.save() + + +def reverse_code_embedding_model(apps, schema_editor): + ModelModel = apps.get_model('setting', 'Model') + QuerySet(ModelModel).filter(id=default_embedding_model_id).delete() + + +class Migration(migrations.Migration): + dependencies = [ + ('setting', '0004_alter_model_credential'), + ] + + operations = [ + migrations.AddField( + model_name='model', + name='permission_type', + field=models.CharField(choices=[('PUBLIC', '公开'), ('PRIVATE', '私有')], default='PRIVATE', max_length=20, + verbose_name='权限类型'), + ), + migrations.RunPython(save_default_embedding_model, reverse_code_embedding_model) + ] diff --git a/apps/setting/migrations/0006_alter_model_status.py b/apps/setting/migrations/0006_alter_model_status.py new file mode 100644 index 00000000000..209f57c94d8 --- /dev/null +++ b/apps/setting/migrations/0006_alter_model_status.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.14 on 2024-07-23 18:14 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('setting', '0005_model_permission_type'), + ] + + operations = [ + migrations.AlterField( + model_name='model', + name='status', + field=models.CharField(choices=[('SUCCESS', '成功'), ('ERROR', '失败'), ('DOWNLOAD', '下载中'), ('PAUSE_DOWNLOAD', '暂停下载')], default='SUCCESS', max_length=20, verbose_name='设置类型'), + ), + ] diff --git a/apps/setting/migrations/0007_model_model_params_form.py b/apps/setting/migrations/0007_model_model_params_form.py new file mode 100644 index 00000000000..fa40b660d63 --- /dev/null +++ b/apps/setting/migrations/0007_model_model_params_form.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-10-15 14:49 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('setting', '0006_alter_model_status'), + ] + + operations = [ + migrations.AddField( + model_name='model', + name='model_params_form', + field=models.JSONField(default=list, verbose_name='模型参数配置'), + ), + ] diff --git a/apps/setting/migrations/0008_modelparam.py b/apps/setting/migrations/0008_modelparam.py new file mode 100644 index 00000000000..8be3892b38b --- /dev/null +++ b/apps/setting/migrations/0008_modelparam.py @@ -0,0 +1,25 @@ +# Generated by Django 4.2.15 on 2024-10-16 13:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('setting', '0007_model_model_params_form'), + ] + + operations = [ + migrations.CreateModel( + name='ModelParam', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('label', models.CharField(max_length=128, verbose_name='参数')), + ('field', models.CharField(max_length=256, verbose_name='显示名称')), + ('default_value', models.CharField(max_length=1000, verbose_name='默认值')), + ('input_type', models.CharField(max_length=32, verbose_name='组件类型')), + ('attrs', models.JSONField(verbose_name='属性')), + ('required', models.BooleanField(verbose_name='必填')), + ], + ), + ] diff --git a/apps/setting/migrations/0009_set_default_model_params_form.py b/apps/setting/migrations/0009_set_default_model_params_form.py new file mode 100644 index 00000000000..6b4d4b4531f --- /dev/null +++ b/apps/setting/migrations/0009_set_default_model_params_form.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.15 on 2024-10-15 14:49 + +from django.db import migrations, models + +sql = """ +UPDATE "public"."model" +SET "model_params_form" = '[{"attrs": {"max": 1, "min": 0.1, "step": 0.01, "precision": 2, "show-input": true, "show-input-controls": false}, "field": "temperature", "label": {"attrs": {"tooltip": "较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定"}, "label": "温度", "input_type": "TooltipLabel", "props_info": {}}, "required": true, "input_type": "Slider", "props_info": {}, "trigger_type": "OPTION_LIST", "default_value": 0.5, "relation_show_field_dict": {}, "relation_trigger_field_dict": {}}, {"attrs": {"max": 100000, "min": 1, "step": 1, "precision": 0, "show-input": true, "show-input-controls": false}, "field": "max_tokens", "label": {"attrs": {"tooltip": "指定模型可生成的最大token个数"}, "label": "输出最大Tokens", "input_type": "TooltipLabel", "props_info": {}}, "required": true, "input_type": "Slider", "props_info": {}, "trigger_type": "OPTION_LIST", "default_value": 4096, "relation_show_field_dict": {}, "relation_trigger_field_dict": {}}]' +WHERE jsonb_array_length(model_params_form)=0 +""" + + +class Migration(migrations.Migration): + dependencies = [ + ('setting', '0008_modelparam'), + ] + + operations = [ + migrations.RunSQL(sql) + ] diff --git a/apps/setting/migrations/0010_log.py b/apps/setting/migrations/0010_log.py new file mode 100644 index 00000000000..2ce90187131 --- /dev/null +++ b/apps/setting/migrations/0010_log.py @@ -0,0 +1,33 @@ +# Generated by Django 4.2.18 on 2025-03-25 03:22 + +import common.encoder.encoder +from django.db import migrations, models +import uuid + + +class Migration(migrations.Migration): + + dependencies = [ + ('setting', '0009_set_default_model_params_form'), + ] + + operations = [ + migrations.CreateModel( + name='Log', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, verbose_name='主键id')), + ('menu', models.CharField(max_length=128, verbose_name='操作菜单')), + ('operate', models.CharField(max_length=128, verbose_name='操作')), + ('operation_object', models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='操作对象')), + ('user', models.JSONField(default=dict, verbose_name='用户信息')), + ('status', models.IntegerField(verbose_name='状态')), + ('ip_address', models.CharField(max_length=128, verbose_name='ip地址')), + ('details', models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='详情')), + ], + options={ + 'db_table': 'log', + }, + ), + ] diff --git a/apps/setting/models/log_management.py b/apps/setting/models/log_management.py new file mode 100644 index 00000000000..66de20468bc --- /dev/null +++ b/apps/setting/models/log_management.py @@ -0,0 +1,38 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: log_management.py + @date:2025/3/17 9:54 + @desc: +""" +import uuid + +from django.db import models + +from common.encoder.encoder import SystemEncoder +from common.mixins.app_model_mixin import AppModelMixin + + +class Log(AppModelMixin): + """ + 审计日志 + """ + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") + + menu = models.CharField(max_length=128, verbose_name="操作菜单") + + operate = models.CharField(max_length=128, verbose_name="操作") + + operation_object = models.JSONField(verbose_name="操作对象", default=dict, encoder=SystemEncoder) + + user = models.JSONField(verbose_name="用户信息", default=dict) + + status = models.IntegerField(verbose_name="状态") + + ip_address = models.CharField(max_length=128, verbose_name="ip地址") + + details = models.JSONField(verbose_name="详情", default=dict, encoder=SystemEncoder) + + class Meta: + db_table = "log" diff --git a/apps/setting/models/model_management.py b/apps/setting/models/model_management.py index 5bdd1b296e1..638161e4630 100644 --- a/apps/setting/models/model_management.py +++ b/apps/setting/models/model_management.py @@ -22,6 +22,20 @@ class Status(models.TextChoices): DOWNLOAD = "DOWNLOAD", '下载中' + PAUSE_DOWNLOAD = "PAUSE_DOWNLOAD", '暂停下载' + + +class PermissionType(models.TextChoices): + PUBLIC = "PUBLIC", '公开' + PRIVATE = "PRIVATE", "私有" + +class ModelParam(models.Model): + label = models.CharField(max_length=128, verbose_name="参数") + field = models.CharField(max_length=256, verbose_name="显示名称") + default_value = models.CharField(max_length=1000, verbose_name="默认值") + input_type = models.CharField(max_length=32, verbose_name="组件类型") + attrs = models.JSONField(verbose_name="属性") + required = models.BooleanField(verbose_name="必填") class Model(AppModelMixin): """ @@ -46,6 +60,17 @@ class Model(AppModelMixin): meta = models.JSONField(verbose_name="模型元数据,用于存储下载,或者错误信息", default=dict) + permission_type = models.CharField(max_length=20, verbose_name='权限类型', choices=PermissionType.choices, + default=PermissionType.PRIVATE) + + model_params_form = models.JSONField(verbose_name="模型参数配置", default=list) + + + def is_permission(self, user_id): + if self.permission_type == PermissionType.PUBLIC or str(user_id) == str(self.user_id): + return True + return False + class Meta: db_table = "model" unique_together = ['name', 'user_id'] diff --git a/apps/setting/models_provider/__init__.py b/apps/setting/models_provider/__init__.py index 53b7001e589..fb278630ad3 100644 --- a/apps/setting/models_provider/__init__.py +++ b/apps/setting/models_provider/__init__.py @@ -6,3 +6,89 @@ @date:2023/10/31 17:16 @desc: """ +import json +from typing import Dict + +from common.util.rsa_util import rsa_long_decrypt +from setting.models_provider.constants.model_provider_constants import ModelProvideConstants + + +def get_model_(provider, model_type, model_name, credential, model_id, use_local=False, **kwargs): + """ + 获取模型实例 + @param provider: 供应商 + @param model_type: 模型类型 + @param model_name: 模型名称 + @param credential: 认证信息 + @param model_id: 模型id + @param use_local: 是否调用本地模型 只适用于本地供应商 + @return: 模型实例 + """ + model = get_provider(provider).get_model(model_type, model_name, + json.loads( + rsa_long_decrypt(credential)), + model_id=model_id, + use_local=use_local, + streaming=True, **kwargs) + return model + + +def get_model(model, **kwargs): + """ + 获取模型实例 + @param model: model 数据库Model实例对象 + @return: 模型实例 + """ + return get_model_(model.provider, model.model_type, model.model_name, model.credential, str(model.id), **kwargs) + + +def get_provider(provider): + """ + 获取供应商实例 + @param provider: 供应商字符串 + @return: 供应商实例 + """ + return ModelProvideConstants[provider].value + + +def get_model_list(provider, model_type): + """ + 获取模型列表 + @param provider: 供应商字符串 + @param model_type: 模型类型 + @return: 模型列表 + """ + return get_provider(provider).get_model_list(model_type) + + +def get_model_credential(provider, model_type, model_name): + """ + 获取模型认证实例 + @param provider: 供应商字符串 + @param model_type: 模型类型 + @param model_name: 模型名称 + @return: 认证实例对象 + """ + return get_provider(provider).get_model_credential(model_type, model_name) + + +def get_model_type_list(provider): + """ + 获取模型类型列表 + @param provider: 供应商字符串 + @return: 模型类型列表 + """ + return get_provider(provider).get_model_type_list() + + +def is_valid_credential(provider, model_type, model_name, model_credential: Dict[str, object], model_params, raise_exception=False): + """ + 校验模型认证参数 + @param provider: 供应商字符串 + @param model_type: 模型类型 + @param model_name: 模型名称 + @param model_credential: 模型认证数据 + @param raise_exception: 是否抛出错误 + @return: True|False + """ + return get_provider(provider).is_valid_credential(model_type, model_name, model_credential, model_params, raise_exception) diff --git a/apps/setting/models_provider/base_model_provider.py b/apps/setting/models_provider/base_model_provider.py index 3796b5bbe5b..2b02bdc1fb1 100644 --- a/apps/setting/models_provider/base_model_provider.py +++ b/apps/setting/models_provider/base_model_provider.py @@ -9,11 +9,14 @@ from abc import ABC, abstractmethod from enum import Enum from functools import reduce -from typing import Dict, Iterator +from typing import Dict, Iterator, Type, List -from langchain.chat_models.base import BaseChatModel +from pydantic import BaseModel from common.exception.app_exception import AppApiException +from django.utils.translation import gettext_lazy as _ + +from common.util.common import encryption class DownModelChunkStatus(Enum): @@ -47,39 +50,74 @@ def to_dict(self): class IModelProvider(ABC): + @abstractmethod + def get_model_info_manage(self): + pass @abstractmethod def get_model_provide_info(self): pass - @abstractmethod def get_model_type_list(self): - pass + return self.get_model_info_manage().get_model_type_list() - @abstractmethod def get_model_list(self, model_type): - pass + if model_type is None: + raise AppApiException(500, _('Model type cannot be empty')) + return self.get_model_info_manage().get_model_list_by_model_type(model_type) - @abstractmethod def get_model_credential(self, model_type, model_name): - pass + model_info = self.get_model_info_manage().get_model_info(model_type, model_name) + return model_info.model_credential - @abstractmethod - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel: - pass + def get_model_params(self, model_type, model_name): + model_info = self.get_model_info_manage().get_model_info(model_type, model_name) + return model_info.model_credential + + def is_valid_credential(self, model_type, model_name, model_credential: Dict[str, object], + model_params: Dict[str, object], raise_exception=False): + model_info = self.get_model_info_manage().get_model_info(model_type, model_name) + return model_info.model_credential.is_valid(model_type, model_name, model_credential, model_params, self, + raise_exception=raise_exception) + + def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseModel: + model_info = self.get_model_info_manage().get_model_info(model_type, model_name) + return model_info.model_class.new_instance(model_type, model_name, model_credential, **model_kwargs) - @abstractmethod def get_dialogue_number(self): - pass + return 3 def down_model(self, model_type: str, model_name, model_credential: Dict[str, object]) -> Iterator[DownModelChunk]: - raise AppApiException(500, "当前平台不支持下载模型") + raise AppApiException(500, _('The current platform does not support downloading models')) + + +class MaxKBBaseModel(ABC): + @staticmethod + @abstractmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + pass + + @staticmethod + def is_cache_model(): + return True + + @staticmethod + def filter_optional_params(model_kwargs): + optional_params = {} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming', 'show_ref_label']: + if key == 'extra_body' and isinstance(value, dict): + optional_params = {**optional_params, **value} + else: + optional_params[key] = value + return optional_params class BaseModelCredential(ABC): @abstractmethod - def is_valid(self, model_type: str, model_name, model: Dict[str, object], raise_exception=False): + def is_valid(self, model_type: str, model_name, model: Dict[str, object], model_params, provider, + raise_exception=True): pass @abstractmethod @@ -90,6 +128,13 @@ def encryption_dict(self, model_info: Dict[str, object]): """ pass + def get_model_params_setting_form(self, model_name): + """ + 模型参数设置表单 + :return: + """ + pass + @staticmethod def encryption(message: str): """ @@ -97,31 +142,28 @@ def encryption(message: str): :param message: :return: """ - max_pre_len = 8 - max_post_len = 4 - message_len = len(message) - pre_len = int(message_len / 5 * 2) - post_len = int(message_len / 5 * 1) - pre_str = "".join([message[index] for index in - range(0, max_pre_len if pre_len > max_pre_len else 1 if pre_len <= 0 else int(pre_len))]) - end_str = "".join( - [message[index] for index in - range(message_len - (int(post_len) if pre_len < max_post_len else max_post_len), message_len)]) - content = "***************" - return pre_str + content + end_str + return encryption(message) class ModelTypeConst(Enum): - LLM = {'code': 'LLM', 'message': '大语言模型'} + LLM = {'code': 'LLM', 'message': _('LLM')} + EMBEDDING = {'code': 'EMBEDDING', 'message': _('Embedding Model')} + STT = {'code': 'STT', 'message': _('Speech2Text')} + TTS = {'code': 'TTS', 'message': _('TTS')} + IMAGE = {'code': 'IMAGE', 'message': _('Vision Model')} + TTI = {'code': 'TTI', 'message': _('Image Generation')} + RERANKER = {'code': 'RERANKER', 'message': _('Rerank')} class ModelInfo: def __init__(self, name: str, desc: str, model_type: ModelTypeConst, model_credential: BaseModelCredential, + model_class: Type[MaxKBBaseModel], **keywords): self.name = name self.desc = desc self.model_type = model_type.name self.model_credential = model_credential + self.model_class = model_class if keywords is not None: for key in keywords.keys(): self.__setattr__(key, keywords.get(key)) @@ -143,10 +185,69 @@ def get_desc(self): def get_model_type(self): return self.model_type + def get_model_class(self): + return self.model_class + def to_dict(self): return reduce(lambda x, y: {**x, **y}, [{attr: self.__getattribute__(attr)} for attr in vars(self) if - not attr.startswith("__") and not attr == 'model_credential'], {}) + not attr.startswith("__") and not attr == 'model_credential' and not attr == 'model_class'], {}) + + +class ModelInfoManage: + def __init__(self): + self.model_dict = {} + self.model_list = [] + self.default_model_list = [] + self.default_model_dict = {} + + def append_model_info(self, model_info: ModelInfo): + self.model_list.append(model_info) + model_type_dict = self.model_dict.get(model_info.model_type) + if model_type_dict is None: + self.model_dict[model_info.model_type] = {model_info.name: model_info} + else: + model_type_dict[model_info.name] = model_info + + def append_default_model_info(self, model_info: ModelInfo): + self.default_model_list.append(model_info) + self.default_model_dict[model_info.model_type] = model_info + + def get_model_list(self): + return [model.to_dict() for model in self.model_list] + + def get_model_list_by_model_type(self, model_type): + return [model.to_dict() for model in self.model_list if model.model_type == model_type] + + def get_model_type_list(self): + return [{'key': _type.value.get('message'), 'value': _type.value.get('code')} for _type in ModelTypeConst if + len([model for model in self.model_list if model.model_type == _type.name]) > 0] + + def get_model_info(self, model_type, model_name) -> ModelInfo: + model_info = self.model_dict.get(model_type, {}).get(model_name, self.default_model_dict.get(model_type)) + if model_info is None: + raise AppApiException(500, _('The model does not support')) + return model_info + + class builder: + def __init__(self): + self.modelInfoManage = ModelInfoManage() + + def append_model_info(self, model_info: ModelInfo): + self.modelInfoManage.append_model_info(model_info) + return self + + def append_model_info_list(self, model_info_list: List[ModelInfo]): + for model_info in model_info_list: + self.modelInfoManage.append_model_info(model_info) + return self + + def append_default_model_info(self, model_info: ModelInfo): + self.modelInfoManage.append_default_model_info(model_info) + return self + + def build(self): + return self.modelInfoManage class ModelProvideInfo: diff --git a/apps/setting/models_provider/constants/model_provider_constants.py b/apps/setting/models_provider/constants/model_provider_constants.py index 0a7565f383b..e68b9361f0b 100644 --- a/apps/setting/models_provider/constants/model_provider_constants.py +++ b/apps/setting/models_provider/constants/model_provider_constants.py @@ -8,16 +8,32 @@ """ from enum import Enum +from setting.models_provider.impl.aliyun_bai_lian_model_provider.aliyun_bai_lian_model_provider import \ + AliyunBaiLianModelProvider +from setting.models_provider.impl.anthropic_model_provider.anthropic_model_provider import AnthropicModelProvider +from setting.models_provider.impl.aws_bedrock_model_provider.aws_bedrock_model_provider import BedrockModelProvider from setting.models_provider.impl.azure_model_provider.azure_model_provider import AzureModelProvider +from setting.models_provider.impl.deepseek_model_provider.deepseek_model_provider import DeepSeekModelProvider +from setting.models_provider.impl.gemini_model_provider.gemini_model_provider import GeminiModelProvider +from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import KimiModelProvider from setting.models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider from setting.models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider from setting.models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider +from setting.models_provider.impl.regolo_model_provider.regolo_model_provider import \ + RegoloModelProvider +from setting.models_provider.impl.siliconCloud_model_provider.siliconCloud_model_provider import \ + SiliconCloudModelProvider +from setting.models_provider.impl.tencent_cloud_model_provider.tencent_cloud_model_provider import \ + TencentCloudModelProvider +from setting.models_provider.impl.tencent_model_provider.tencent_model_provider import TencentModelProvider +from setting.models_provider.impl.vllm_model_provider.vllm_model_provider import VllmModelProvider +from setting.models_provider.impl.volcanic_engine_model_provider.volcanic_engine_model_provider import \ + VolcanicEngineModelProvider from setting.models_provider.impl.wenxin_model_provider.wenxin_model_provider import WenxinModelProvider -from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import KimiModelProvider from setting.models_provider.impl.xf_model_provider.xf_model_provider import XunFeiModelProvider +from setting.models_provider.impl.xinference_model_provider.xinference_model_provider import XinferenceModelProvider from setting.models_provider.impl.zhipu_model_provider.zhipu_model_provider import ZhiPuModelProvider -from setting.models_provider.impl.deepseek_model_provider.deepseek_model_provider import DeepSeekModelProvider -from setting.models_provider.impl.gemini_model_provider.gemini_model_provider import GeminiModelProvider +from setting.models_provider.impl.local_model_provider.local_model_provider import LocalModelProvider class ModelProvideConstants(Enum): @@ -31,3 +47,14 @@ class ModelProvideConstants(Enum): model_xf_provider = XunFeiModelProvider() model_deepseek_provider = DeepSeekModelProvider() model_gemini_provider = GeminiModelProvider() + model_volcanic_engine_provider = VolcanicEngineModelProvider() + model_tencent_provider = TencentModelProvider() + model_tencent_cloud_provider = TencentCloudModelProvider() + model_aws_bedrock_provider = BedrockModelProvider() + model_local_provider = LocalModelProvider() + model_xinference_provider = XinferenceModelProvider() + model_vllm_provider = VllmModelProvider() + aliyun_bai_lian_model_provider = AliyunBaiLianModelProvider() + model_anthropic_provider = AnthropicModelProvider() + model_siliconCloud_provider = SiliconCloudModelProvider() + model_regolo_provider = RegoloModelProvider() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/__init__.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/__init__.py new file mode 100644 index 00000000000..3c10c5535f7 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/9/9 17:42 + @desc: +""" diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py new file mode 100644 index 00000000000..b1d72f0869a --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py @@ -0,0 +1,117 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: aliyun_bai_lian_model_provider.py + @date:2024/9/9 17:43 + @desc: +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ + ModelInfoManage +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.embedding import \ + AliyunBaiLianEmbeddingCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.image import QwenVLModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.llm import BaiLianLLMModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.reranker import \ + AliyunBaiLianRerankerCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.stt import AliyunBaiLianSTTModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.tti import QwenTextToImageModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.tts import AliyunBaiLianTTSModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.image import QwenVLChatModel +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.llm import BaiLianChatModel +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.stt import AliyunBaiLianSpeechToText +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tti import QwenTextToImageModel +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tts import AliyunBaiLianTextToSpeech +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _, gettext + +aliyun_bai_lian_model_credential = AliyunBaiLianRerankerCredential() +aliyun_bai_lian_tts_model_credential = AliyunBaiLianTTSModelCredential() +aliyun_bai_lian_stt_model_credential = AliyunBaiLianSTTModelCredential() +aliyun_bai_lian_embedding_model_credential = AliyunBaiLianEmbeddingCredential() +aliyun_bai_lian_llm_model_credential = BaiLianLLMModelCredential() +qwenvl_model_credential = QwenVLModelCredential() +qwentti_model_credential = QwenTextToImageModelCredential() + +model_info_list = [ModelInfo('gte-rerank', + _('With the GTE-Rerank text sorting series model developed by Alibaba Tongyi Lab, developers can integrate high-quality text retrieval and sorting through the LlamaIndex framework.'), + ModelTypeConst.RERANKER, aliyun_bai_lian_model_credential, AliyunBaiLianReranker), + ModelInfo('paraformer-realtime-v2', + _('Chinese (including various dialects such as Cantonese), English, Japanese, and Korean support free switching between multiple languages.'), + ModelTypeConst.STT, aliyun_bai_lian_stt_model_credential, AliyunBaiLianSpeechToText), + ModelInfo('cosyvoice-v1', + _('CosyVoice is based on a new generation of large generative speech models, which can predict emotions, intonation, rhythm, etc. based on context, and has better anthropomorphic effects.'), + ModelTypeConst.TTS, aliyun_bai_lian_tts_model_credential, AliyunBaiLianTextToSpeech), + ModelInfo('text-embedding-v1', + _("Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data."), + ModelTypeConst.EMBEDDING, aliyun_bai_lian_embedding_model_credential, + AliyunBaiLianEmbedding), + ModelInfo('qwen3-0.6b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-1.7b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-4b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-8b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-14b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-32b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-30b-a3b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen3-235b-a22b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + + ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen-plus', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel), + ModelInfo('qwen-max', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, + BaiLianChatModel) + ] + +module_info_vl_list = [ + ModelInfo('qwen-vl-max', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), + ModelInfo('qwen-vl-max-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), + ModelInfo('qwen-vl-plus-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), +] +module_info_tti_list = [ + ModelInfo('wanx-v1', + _('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'), + ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_model_info_list(module_info_vl_list) + .append_default_model_info(module_info_vl_list[0]) + .append_model_info_list(module_info_tti_list) + .append_default_model_info(module_info_tti_list[0]) + .append_default_model_info(model_info_list[1]) + .append_default_model_info(model_info_list[2]) + .append_default_model_info(model_info_list[3]) + .append_default_model_info(model_info_list[4]) + .append_default_model_info(model_info_list[0]) + .build() +) + + +class AliyunBaiLianModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='aliyun_bai_lian_model_provider', name=gettext('Alibaba Cloud Bailian'), + icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', + 'aliyun_bai_lian_model_provider', + 'icon', + 'aliyun_bai_lian_icon_svg'))) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py new file mode 100644 index 00000000000..f8d527ff39d --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/10/16 17:01 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import ValidCode, BaseModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding + + +class AliyunBaiLianEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['dashscope_api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model: AliyunBaiLianEmbedding = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'dashscope_api_key': super().encryption(model.get('dashscope_api_key', ''))} + + dashscope_api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py new file mode 100644 index 00000000000..3f3caafa0fd --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=1.0, + _min=0.1, + _max=1.9, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class QwenVLModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py new file mode 100644 index 00000000000..9da30b72796 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py @@ -0,0 +1,100 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class BaiLianLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class BaiLianLLMStreamModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + stream = forms.SwitchField(label=TooltipLabel(_('Is the answer in streaming mode'), + _('Is the answer in streaming mode')), + required=True, default_value=True) + + +class BaiLianLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + if model_params.get('stream'): + for res in model.stream([HumanMessage(content=gettext('Hello'))]): + pass + else: + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + if 'qwen3' in model_name: + return BaiLianLLMStreamModelParams() + return BaiLianLLMModelParams() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py new file mode 100644 index 00000000000..8386c562ef0 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py + @date:2024/9/9 17:51 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ +from langchain_core.documents import Document + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker + + +class AliyunBaiLianRerankerCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + if not model_type == 'RERANKER': + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['dashscope_api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model: AliyunBaiLianReranker = provider.get_model(model_type, model_name, model_credential) + model.compress_documents([Document(page_content=_('Hello'))], _('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'dashscope_api_key': super().encryption(model.get('dashscope_api_key', ''))} + + dashscope_api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py new file mode 100644 index 00000000000..e659b31af92 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py @@ -0,0 +1,48 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AliyunBaiLianSTTModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField("API Key", required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py new file mode 100644 index 00000000000..cc904fe226f --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py @@ -0,0 +1,98 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')), + required=True, + default_value='1024*1024', + option_list=[ + {'value': '1024*1024', 'label': '1024*1024'}, + {'value': '720*1280', 'label': '720*1280'}, + {'value': '768*1152', 'label': '768*1152'}, + {'value': '1280*720', 'label': '1280*720'}, + ], + text_field='label', + value_field='value') + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')), + required=True, default_value=1, + _min=1, + _max=4, + _step=1, + precision=0) + style = forms.SingleSelect( + TooltipLabel(_('Style'), _('Specify the style of generated images')), + required=True, + default_value='', + option_list=[ + {'value': '', 'label': _('Default value, the image style is randomly output by the model')}, + {'value': '', 'label': _('photography')}, + {'value': '', 'label': _('Portraits')}, + {'value': '<3d cartoon>', 'label': _('3D cartoon')}, + {'value': '', 'label': _('animation')}, + {'value': '', 'label': _('painting')}, + {'value': '', 'label': _('watercolor')}, + {'value': '', 'label': _('sketch')}, + {'value': '', 'label': _('Chinese painting')}, + {'value': '', 'label': _('flat illustration')}, + ], + text_field='label', + value_field='value' + ) + + +class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py new file mode 100644 index 00000000000..6e2c64c84ee --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py @@ -0,0 +1,83 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AliyunBaiLianTTSModelGeneralParams(BaseForm): + voice = forms.SingleSelect( + TooltipLabel(_('timbre'), _('Chinese sounds can support mixed scenes of Chinese and English')), + required=True, default_value='longxiaochun', + text_field='value', + value_field='value', + option_list=[ + {'text': _('Long Xiaochun'), 'value': 'longxiaochun'}, + {'text': _('Long Xiaoxia'), 'value': 'longxiaoxia'}, + {'text': _('Long Xiaochen'), 'value': 'longxiaocheng'}, + {'text': _('Long Xiaobai'), 'value': 'longxiaobai'}, + {'text': _('Long laotie'), 'value': 'longlaotie'}, + {'text': _('Long Shu'), 'value': 'longshu'}, + {'text': _('Long Shuo'), 'value': 'longshuo'}, + {'text': _('Long Jing'), 'value': 'longjing'}, + {'text': _('Long Miao'), 'value': 'longmiao'}, + {'text': _('Long Yue'), 'value': 'longyue'}, + {'text': _('Long Yuan'), 'value': 'longyuan'}, + {'text': _('Long Fei'), 'value': 'longfei'}, + {'text': _('Long Jielidou'), 'value': 'longjielidou'}, + {'text': _('Long Tong'), 'value': 'longtong'}, + {'text': _('Long Xiang'), 'value': 'longxiang'}, + {'text': 'Stella', 'value': 'loongstella'}, + {'text': 'Bella', 'value': 'loongbella'}, + ]) + speech_rate = forms.SliderField( + TooltipLabel(_('speaking speed'), _('[0.5,2], the default is 1, usually one decimal place is enough')), + required=True, default_value=1, + _min=0.5, + _max=2, + _step=0.1, + precision=1) + + +class AliyunBaiLianTTSModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField("API Key", required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return AliyunBaiLianTTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/icon/aliyun_bai_lian_icon_svg b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/icon/aliyun_bai_lian_icon_svg new file mode 100644 index 00000000000..0678828dd27 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/icon/aliyun_bai_lian_icon_svg @@ -0,0 +1 @@ +【icon】阿里百炼大模型 \ No newline at end of file diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/embedding.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/embedding.py new file mode 100644 index 00000000000..401d12ee924 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/embedding.py @@ -0,0 +1,66 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/10/16 16:34 + @desc: +""" +from functools import reduce +from typing import Dict, List + +from langchain_community.embeddings import DashScopeEmbeddings +from langchain_community.embeddings.dashscope import embed_with_retry + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def proxy_embed_documents(texts: List[str], step_size, embed_documents): + value = [embed_documents(texts[start_index:start_index + step_size]) for start_index in + range(0, len(texts), step_size)] + return reduce(lambda x, y: [*x, *y], value, []) + + +class AliyunBaiLianEmbedding(MaxKBBaseModel, DashScopeEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return AliyunBaiLianEmbedding( + model=model_name, + dashscope_api_key=model_credential.get('dashscope_api_key') + ) + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + if self.model == 'text-embedding-v3': + return proxy_embed_documents(texts, 6, self._embed_documents) + return self._embed_documents(texts) + + def _embed_documents(self, texts: List[str]) -> List[List[float]]: + """Call out to DashScope's embedding endpoint for embedding search docs. + + Args: + texts: The list of texts to embed. + chunk_size: The chunk size of embeddings. If None, will use the chunk size + specified by the class. + + Returns: + List of embeddings, one for each text. + """ + embeddings = embed_with_retry( + self, input=texts, text_type="document", model=self.model + ) + embedding_list = [item["embedding"] for item in embeddings] + return embedding_list + + def embed_query(self, text: str) -> List[float]: + """Call out to DashScope's embedding endpoint for embedding query text. + + Args: + text: The text to embed. + + Returns: + Embedding for the text. + """ + embedding = embed_with_retry( + self, input=[text], text_type="document", model=self.model + )[0]["embedding"] + return embedding diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/iat_mp3_16k.mp3 new file mode 100644 index 00000000000..75e744c8ff5 Binary files /dev/null and b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/iat_mp3_16k.mp3 differ diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py new file mode 100644 index 00000000000..7cda97f2388 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py @@ -0,0 +1,22 @@ +# coding=utf-8 + +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + chat_tong_yi = QwenVLChatModel( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + streaming=True, + stream_usage=True, + extra_body=optional_params + ) + return chat_tong_yi diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py new file mode 100644 index 00000000000..ee3ee6488c2 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + if 'qwen-omni-turbo' in model_name or 'qwq' in model_name: + optional_params['streaming'] = True + return BaiLianChatModel( + model=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/reranker.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/reranker.py new file mode 100644 index 00000000000..5c9bea4af2a --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/reranker.py @@ -0,0 +1,20 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py.py + @date:2024/9/2 16:42 + @desc: +""" +from typing import Dict + +from langchain_community.document_compressors import DashScopeRerank + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class AliyunBaiLianReranker(MaxKBBaseModel, DashScopeRerank): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return AliyunBaiLianReranker(model=model_name, dashscope_api_key=model_credential.get('dashscope_api_key'), + top_n=model_kwargs.get('top_n', 3)) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/stt.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/stt.py new file mode 100644 index 00000000000..ad17a32a95b --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/stt.py @@ -0,0 +1,75 @@ +import os +import tempfile +from typing import Dict + +import dashscope +from dashscope.audio.asr import (Recognition) +from pydub import AudioSegment + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + + +class AliyunBaiLianSpeechToText(MaxKBBaseModel, BaseSpeechToText): + api_key: str + model: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.model = kwargs.get('model') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + if model_name == 'qwen-omni-turbo': + optional_params['streaming'] = True + return AliyunBaiLianSpeechToText( + model=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + cwd = os.path.dirname(os.path.abspath(__file__)) + with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f: + self.speech_to_text(f) + + def speech_to_text(self, audio_file): + dashscope.api_key = self.api_key + recognition = Recognition(model=self.model, + format='mp3', + sample_rate=16000, + callback=None) + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + # 将上传的文件保存到临时文件中 + temp_file.write(audio_file.read()) + # 获取临时文件的路径 + temp_file_path = temp_file.name + + try: + audio = AudioSegment.from_file(temp_file_path) + if audio.channels != 1: + audio = audio.set_channels(1) + audio = audio.set_frame_rate(16000) + + # 将转换后的音频文件保存到临时文件中 + audio.export(temp_file_path, format='mp3') + # 识别临时文件 + result = recognition.call(temp_file_path) + text = '' + if result.status_code == 200: + result_sentence = result.get_sentence() + if result_sentence is not None: + for sentence in result_sentence: + text += sentence['text'] + return text + else: + raise Exception('Error: ', result.message) + finally: + # 删除临时文件 + os.remove(temp_file_path) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py new file mode 100644 index 00000000000..c39e1b3a7fc --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py @@ -0,0 +1,59 @@ +# coding=utf-8 +from http import HTTPStatus +from typing import Dict + +from dashscope import ImageSynthesis +from django.utils.translation import gettext +from langchain_community.chat_models import ChatTongyi +from langchain_core.messages import HumanMessage + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): + api_key: str + model_name: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.model_name = kwargs.get('model_name') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024*1024', 'style': '', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + chat_tong_yi = QwenTextToImageModel( + model_name=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + return chat_tong_yi + + def is_cache_model(self): + return False + + def check_auth(self): + chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max') + chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])]) + + def generate_image(self, prompt: str, negative_prompt: str = None): + # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + rsp = ImageSynthesis.call(api_key=self.api_key, + model=self.model_name, + prompt=prompt, + negative_prompt=negative_prompt, + **self.params) + file_urls = [] + if rsp.status_code == HTTPStatus.OK: + for result in rsp.output.results: + file_urls.append(result.url) + else: + print('sync_call Failed, status_code: %s, code: %s, message: %s' % + (rsp.status_code, rsp.code, rsp.message)) + return file_urls diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py new file mode 100644 index 00000000000..60c1a77fad8 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py @@ -0,0 +1,57 @@ +from typing import Dict + +import dashscope + +from django.utils.translation import gettext as _ + +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech + + +class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'voice': 'longxiaochun', 'speech_rate': 1.0}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + + return AliyunBaiLianTextToSpeech( + model=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + self.text_to_speech(_('Hello')) + + def text_to_speech(self, text): + dashscope.api_key = self.api_key + text = _remove_empty_lines(text) + if 'sambert' in self.model: + from dashscope.audio.tts import SpeechSynthesizer + audio = SpeechSynthesizer.call(model=self.model, text=text, **self.params).get_audio_data() + else: + from dashscope.audio.tts_v2 import SpeechSynthesizer + synthesizer = SpeechSynthesizer(model=self.model, **self.params) + audio = synthesizer.call(text) + if audio is None: + raise Exception('Failed to generate audio') + if type(audio) == str: + print(audio) + raise Exception(audio) + return audio + + def is_cache_model(self): + return False diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/__init__.py b/apps/setting/models_provider/impl/anthropic_model_provider/__init__.py new file mode 100644 index 00000000000..2dc4ab10db4 --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/3/28 16:25 + @desc: +""" diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/anthropic_model_provider.py b/apps/setting/models_provider/impl/anthropic_model_provider/anthropic_model_provider.py new file mode 100644 index 00000000000..7b3f91f1a32 --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/anthropic_model_provider.py @@ -0,0 +1,62 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: openai_model_provider.py + @date:2024/3/28 16:26 + @desc: +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.anthropic_model_provider.credential.image import AnthropicImageModelCredential +from setting.models_provider.impl.anthropic_model_provider.credential.llm import AnthropicLLMModelCredential +from setting.models_provider.impl.anthropic_model_provider.model.image import AnthropicImage +from setting.models_provider.impl.anthropic_model_provider.model.llm import AnthropicChatModel +from smartdoc.conf import PROJECT_DIR + +openai_llm_model_credential = AnthropicLLMModelCredential() +openai_image_model_credential = AnthropicImageModelCredential() + +model_info_list = [ + ModelInfo('claude-3-opus-20240229', '', ModelTypeConst.LLM, + openai_llm_model_credential, AnthropicChatModel + ), + ModelInfo('claude-3-sonnet-20240229', '', ModelTypeConst.LLM, openai_llm_model_credential, + AnthropicChatModel), + ModelInfo('claude-3-haiku-20240307', '', ModelTypeConst.LLM, openai_llm_model_credential, + AnthropicChatModel), + ModelInfo('claude-3-5-sonnet-20240620', '', ModelTypeConst.LLM, openai_llm_model_credential, + AnthropicChatModel), + ModelInfo('claude-3-5-haiku-20241022', '', ModelTypeConst.LLM, openai_llm_model_credential, + AnthropicChatModel), + ModelInfo('claude-3-5-sonnet-20241022', '', ModelTypeConst.LLM, openai_llm_model_credential, + AnthropicChatModel), +] + +image_model_info = [ + ModelInfo('claude-3-5-sonnet-20241022', '', ModelTypeConst.IMAGE, openai_image_model_credential, + AnthropicImage), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info(model_info_list[0]) + .append_model_info_list(image_model_info) + .append_default_model_info(image_model_info[0]) + .build() +) + + +class AnthropicModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_anthropic_provider', name='Anthropic', icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'anthropic_model_provider', 'icon', + 'anthropic_icon_svg'))) diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py b/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py new file mode 100644 index 00000000000..bf49c2c4578 --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py @@ -0,0 +1,72 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AnthropicImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class AnthropicImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField(_('API URL'), required=True) + api_key = forms.PasswordInputField(_('API Key'), required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext("Hello")}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return AnthropicImageModelParams() diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py b/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py new file mode 100644 index 00000000000..350cd9414f7 --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:32 + @desc: +""" +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class AnthropicLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class AnthropicLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField(_('API URL'), required=True) + api_key = forms.PasswordInputField(_('API Key'), required=True) + + def get_model_params_setting_form(self, model_name): + return AnthropicLLMModelParams() diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/icon/anthropic_icon_svg b/apps/setting/models_provider/impl/anthropic_model_provider/icon/anthropic_icon_svg new file mode 100644 index 00000000000..342d40be8a9 --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/icon/anthropic_icon_svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/model/image.py b/apps/setting/models_provider/impl/anthropic_model_provider/model/image.py new file mode 100644 index 00000000000..9582522cc6e --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/model/image.py @@ -0,0 +1,26 @@ +from typing import Dict + +from langchain_anthropic import ChatAnthropic + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class AnthropicImage(MaxKBBaseModel, ChatAnthropic): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return AnthropicImage( + model=model_name, + anthropic_api_url=model_credential.get('api_base'), + anthropic_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + **optional_params, + ) diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/model/llm.py b/apps/setting/models_provider/impl/anthropic_model_provider/model/llm.py new file mode 100644 index 00000000000..de055e1044e --- /dev/null +++ b/apps/setting/models_provider/impl/anthropic_model_provider/model/llm.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/18 15:28 + @desc: +""" +from typing import List, Dict + +from langchain_anthropic import ChatAnthropic +from langchain_core.messages import BaseMessage, get_buffer_string + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class AnthropicChatModel(MaxKBBaseModel, ChatAnthropic): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + azure_chat_open_ai = AnthropicChatModel( + model=model_name, + anthropic_api_url=model_credential.get('api_base'), + anthropic_api_key=model_credential.get('api_key'), + **optional_params, + custom_get_token_ids=custom_get_token_ids + ) + return azure_chat_open_ai + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/__init__.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/__init__.py new file mode 100644 index 00000000000..8cb7f459eae --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py new file mode 100644 index 00000000000..e5bb0dd44a4 --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +import os +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import ( + IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, ModelInfoManage +) +from setting.models_provider.impl.aws_bedrock_model_provider.credential.embedding import BedrockEmbeddingCredential +from setting.models_provider.impl.aws_bedrock_model_provider.credential.llm import BedrockLLMModelCredential +from setting.models_provider.impl.aws_bedrock_model_provider.model.embedding import BedrockEmbeddingModel +from setting.models_provider.impl.aws_bedrock_model_provider.model.llm import BedrockModel +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + + +def _create_model_info(model_name, description, model_type, credential_class, model_class): + return ModelInfo( + name=model_name, + desc=description, + model_type=model_type, + model_credential=credential_class(), + model_class=model_class + ) + + +def _get_aws_bedrock_icon_path(): + return os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'aws_bedrock_model_provider', + 'icon', 'bedrock_icon_svg') + + +def _initialize_model_info(): + model_info_list = [ + _create_model_info( + 'anthropic.claude-v2:1', + _('An update to Claude 2 that doubles the context window and improves reliability, hallucination rates, and evidence-based accuracy in long documents and RAG contexts.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'anthropic.claude-v2', + _('Anthropic is a powerful model that can handle a variety of tasks, from complex dialogue and creative content generation to detailed command obedience.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'anthropic.claude-3-haiku-20240307-v1:0', + _("The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-instant responsiveness. The model can answer simple queries and requests quickly. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text output, and provides 200K context windows."), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'anthropic.claude-3-sonnet-20240229-v1:0', + _("The Claude 3 Sonnet model from Anthropic strikes the ideal balance between intelligence and speed, especially when it comes to handling enterprise workloads. This model offers maximum utility while being priced lower than competing products, and it's been engineered to be a solid choice for deploying AI at scale."), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'anthropic.claude-3-5-sonnet-20240620-v1:0', + _('The Claude 3.5 Sonnet raises the industry standard for intelligence, outperforming competing models and the Claude 3 Opus in extensive evaluations, with the speed and cost-effectiveness of our mid-range models.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'anthropic.claude-instant-v1', + _('A faster, more affordable but still very powerful model that can handle a range of tasks including casual conversation, text analysis, summarization and document question answering.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'amazon.titan-text-premier-v1:0', + _("Titan Text Premier is the most powerful and advanced model in the Titan Text series, designed to deliver exceptional performance for a variety of enterprise applications. With its cutting-edge features, it delivers greater accuracy and outstanding results, making it an excellent choice for organizations looking for a top-notch text processing solution."), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel + ), + _create_model_info( + 'amazon.titan-text-lite-v1', + _('Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-tuning English-language tasks, including summarization and copywriting, where customers require smaller, more cost-effective, and highly customizable models.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel), + _create_model_info( + 'amazon.titan-text-express-v1', + _('Amazon Titan Text Express has context lengths of up to 8,000 tokens, making it ideal for a variety of high-level general language tasks, such as open-ended text generation and conversational chat, as well as support in retrieval-augmented generation (RAG). At launch, the model is optimized for English, but other languages are supported.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel), + _create_model_info( + 'mistral.mistral-7b-instruct-v0:2', + _('7B dense converter for rapid deployment and easy customization. Small in size yet powerful in a variety of use cases. Supports English and code, as well as 32k context windows.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel), + _create_model_info( + 'mistral.mistral-large-2402-v1:0', + _('Advanced Mistral AI large-scale language model capable of handling any language task, including complex multilingual reasoning, text understanding, transformation, and code generation.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel), + _create_model_info( + 'meta.llama3-70b-instruct-v1:0', + _('Ideal for content creation, conversational AI, language understanding, R&D, and enterprise applications'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel), + _create_model_info( + 'meta.llama3-8b-instruct-v1:0', + _('Ideal for limited computing power and resources, edge devices, and faster training times.'), + ModelTypeConst.LLM, + BedrockLLMModelCredential, + BedrockModel), + ] + embedded_model_info_list = [ + _create_model_info( + 'amazon.titan-embed-text-v1', + _('Titan Embed Text is the largest embedding model in the Amazon Titan Embed series and can handle various text embedding tasks, such as text classification, text similarity calculation, etc.'), + ModelTypeConst.EMBEDDING, + BedrockEmbeddingCredential, + BedrockEmbeddingModel + ), + ] + + model_info_manage = ModelInfoManage.builder() \ + .append_model_info_list(model_info_list) \ + .append_default_model_info(model_info_list[0]) \ + .append_model_info_list(embedded_model_info_list) \ + .append_default_model_info(embedded_model_info_list[0]) \ + .build() + + return model_info_manage + + +class BedrockModelProvider(IModelProvider): + def __init__(self): + self._model_info_manage = _initialize_model_info() + + def get_model_info_manage(self): + return self._model_info_manage + + def get_model_provide_info(self): + icon_path = _get_aws_bedrock_icon_path() + icon_data = get_file_content(icon_path) + return ModelProvideInfo( + provider='model_aws_bedrock_provider', + name='Amazon Bedrock', + icon=icon_data + ) diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py new file mode 100644 index 00000000000..380335ce060 --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.aws_bedrock_model_provider.model.embedding import BedrockEmbeddingModel + + +class BedrockEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(mt.get('value') == model_type for mt in model_type_list): + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + return False + + required_keys = ['region_name', 'access_key_id', 'secret_access_key'] + if not all(key in model_credential for key in required_keys): + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('The following fields are required: {keys}').format( + keys=", ".join(required_keys))) + return False + + try: + model: BedrockEmbeddingModel = provider.get_model(model_type, model_name, model_credential) + aa = model.embed_query(_('Hello')) + print(aa) + except AppApiException: + raise + except Exception as e: + traceback.print_exc() + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + return False + + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'secret_access_key': super().encryption(model.get('secret_access_key', ''))} + + region_name = forms.TextInputField('Region Name', required=True) + access_key_id = forms.TextInputField('Access Key ID', required=True) + secret_access_key = forms.PasswordInputField('Secret Access Key', required=True) diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py new file mode 100644 index 00000000000..cc8f81f43b3 --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py @@ -0,0 +1,76 @@ +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import ValidCode, BaseModelCredential + + +class BedrockLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class BedrockLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(mt.get('value') == model_type for mt in model_type_list): + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + return False + + required_keys = ['region_name', 'access_key_id', 'secret_access_key'] + if not all(key in model_credential for key in required_keys): + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('The following fields are required: {keys}').format( + keys=", ".join(required_keys))) + return False + + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except AppApiException: + raise + except Exception as e: + traceback.print_exc() + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + return False + + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'secret_access_key': super().encryption(model.get('secret_access_key', ''))} + + region_name = forms.TextInputField('Region Name', required=True) + access_key_id = forms.TextInputField('Access Key ID', required=True) + secret_access_key = forms.PasswordInputField('Secret Access Key', required=True) + base_url = forms.TextInputField('Proxy URL', required=False) + + def get_model_params_setting_form(self, model_name): + return BedrockLLMModelParams() diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/icon/bedrock_icon_svg b/apps/setting/models_provider/impl/aws_bedrock_model_provider/icon/bedrock_icon_svg new file mode 100644 index 00000000000..5f176a7d27d --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/icon/bedrock_icon_svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/embedding.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/embedding.py new file mode 100644 index 00000000000..1375422524c --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/embedding.py @@ -0,0 +1,60 @@ +from langchain_community.embeddings import BedrockEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from typing import Dict, List + +from setting.models_provider.impl.aws_bedrock_model_provider.model.llm import _update_aws_credentials + + +class BedrockEmbeddingModel(MaxKBBaseModel, BedrockEmbeddings): + def __init__(self, model_id: str, region_name: str, credentials_profile_name: str, + **kwargs): + super().__init__(model_id=model_id, region_name=region_name, + credentials_profile_name=credentials_profile_name, **kwargs) + + @classmethod + def new_instance(cls, model_type: str, model_name: str, model_credential: Dict[str, str], + **model_kwargs) -> 'BedrockModel': + _update_aws_credentials(model_credential['access_key_id'], model_credential['access_key_id'], + model_credential['secret_access_key']) + return cls( + model_id=model_name, + region_name=model_credential['region_name'], + credentials_profile_name=model_credential['access_key_id'], + ) + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Compute doc embeddings using a Bedrock model. + + Args: + texts: The list of texts to embed + + Returns: + List of embeddings, one for each text. + """ + results = [] + for text in texts: + response = self._embedding_func(text) + + if self.normalize: + response = self._normalize_vector(response) + + results.append(response) + + return results + + def embed_query(self, text: str) -> List[float]: + """Compute query embeddings using a Bedrock model. + + Args: + text: The text to embed. + + Returns: + Embeddings for the text. + """ + embedding = self._embedding_func(text) + + if self.normalize: + return self._normalize_vector(embedding) + + return embedding diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/llm.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/llm.py new file mode 100644 index 00000000000..7b0088a4ab4 --- /dev/null +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/llm.py @@ -0,0 +1,104 @@ +import os +import re +from typing import Dict, List + +from botocore.config import Config +from langchain_community.chat_models import BedrockChat +from langchain_core.messages import BaseMessage, get_buffer_string + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def get_max_tokens_keyword(model_name): + """ + 根据模型名称返回正确的 max_tokens 关键字。 + + :param model_name: 模型名称字符串 + :return: 对应的 max_tokens 关键字字符串 + """ + maxTokens = ["ai21.j2-ultra-v1", "ai21.j2-mid-v1"] + # max_tokens_to_sample = ["anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-instant-v1"] + maxTokenCount = ["amazon.titan-text-lite-v1", "amazon.titan-text-express-v1"] + max_new_tokens = [ + "us.meta.llama3-2-1b-instruct-v1:0", "us.meta.llama3-2-3b-instruct-v1:0", "us.meta.llama3-2-11b-instruct-v1:0", + "us.meta.llama3-2-90b-instruct-v1:0"] + if model_name in maxTokens: + return 'maxTokens' + elif model_name in maxTokenCount: + return 'maxTokenCount' + elif model_name in max_new_tokens: + return 'max_new_tokens' + else: + return 'max_tokens' + + +class BedrockModel(MaxKBBaseModel, BedrockChat): + + @staticmethod + def is_cache_model(): + return False + + def __init__(self, model_id: str, region_name: str, credentials_profile_name: str, + streaming: bool = False, config: Config = None, **kwargs): + super().__init__(model_id=model_id, region_name=region_name, + credentials_profile_name=credentials_profile_name, streaming=streaming, config=config, + **kwargs) + + @classmethod + def new_instance(cls, model_type: str, model_name: str, model_credential: Dict[str, str], + **model_kwargs) -> 'BedrockModel': + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + + config = {} + # 判断model_kwargs是否包含 base_url 且不为空 + if 'base_url' in model_credential and model_credential['base_url']: + proxy_url = model_credential['base_url'] + config = Config( + proxies={ + 'http': proxy_url, + 'https': proxy_url + }, + connect_timeout=60, + read_timeout=60 + ) + _update_aws_credentials(model_credential['access_key_id'], model_credential['access_key_id'], + model_credential['secret_access_key']) + + return cls( + model_id=model_name, + region_name=model_credential['region_name'], + credentials_profile_name=model_credential['access_key_id'], + streaming=model_kwargs.pop('streaming', True), + model_kwargs=optional_params, + config=config + ) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + + +def _update_aws_credentials(profile_name, access_key_id, secret_access_key): + credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials") + os.makedirs(os.path.dirname(credentials_path), exist_ok=True) + + content = open(credentials_path, 'r').read() if os.path.exists(credentials_path) else '' + pattern = rf'\n*\[{profile_name}\]\n*(aws_access_key_id = .*)\n*(aws_secret_access_key = .*)\n*' + content = re.sub(pattern, '', content, flags=re.DOTALL) + + if not re.search(rf'\[{profile_name}\]', content): + content += f"\n[{profile_name}]\naws_access_key_id = {access_key_id}\naws_secret_access_key = {secret_access_key}\n" + + with open(credentials_path, 'w') as file: + file.write(content) diff --git a/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py b/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py index 3164dd8ea3e..e249f0b7cd1 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py +++ b/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py @@ -7,98 +7,110 @@ @desc: """ import os -from typing import Dict -from langchain.schema import HumanMessage - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \ - ModelInfo, \ - ModelTypeConst, ValidCode +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.azure_model_provider.credential.embedding import AzureOpenAIEmbeddingCredential +from setting.models_provider.impl.azure_model_provider.credential.image import AzureOpenAIImageModelCredential +from setting.models_provider.impl.azure_model_provider.credential.llm import AzureLLMModelCredential +from setting.models_provider.impl.azure_model_provider.credential.stt import AzureOpenAISTTModelCredential +from setting.models_provider.impl.azure_model_provider.credential.tti import AzureOpenAITextToImageModelCredential +from setting.models_provider.impl.azure_model_provider.credential.tts import AzureOpenAITTSModelCredential from setting.models_provider.impl.azure_model_provider.model.azure_chat_model import AzureChatModel +from setting.models_provider.impl.azure_model_provider.model.embedding import AzureOpenAIEmbeddingModel +from setting.models_provider.impl.azure_model_provider.model.image import AzureOpenAIImage +from setting.models_provider.impl.azure_model_provider.model.stt import AzureOpenAISpeechToText +from setting.models_provider.impl.azure_model_provider.model.tti import AzureOpenAITextToImage +from setting.models_provider.impl.azure_model_provider.model.tts import AzureOpenAITextToSpeech from smartdoc.conf import PROJECT_DIR - - -class DefaultAzureLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = AzureModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - - for key in ['api_base', 'api_key', 'deployment_name', 'api_version']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = AzureModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, '校验失败,请检查参数是否正确') - else: - return False - - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_version = forms.TextInputField("API 版本 (api_version)", required=True) - - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) - - api_key = forms.PasswordInputField("API Key (api_key)", required=True) - - deployment_name = forms.TextInputField("部署名 (deployment_name)", required=True) - - -base_azure_llm_model_credential = DefaultAzureLLMModelCredential() - -model_dict = { - 'deployment_name': ModelInfo('Azure OpenAI', '具体的基础模型由部署名决定', ModelTypeConst.LLM, - base_azure_llm_model_credential, api_version='2024-02-15-preview' - ) -} +from django.utils.translation import gettext_lazy as _ + +base_azure_llm_model_credential = AzureLLMModelCredential() +base_azure_embedding_model_credential = AzureOpenAIEmbeddingCredential() +base_azure_image_model_credential = AzureOpenAIImageModelCredential() +base_azure_tti_model_credential = AzureOpenAITextToImageModelCredential() +base_azure_tts_model_credential = AzureOpenAITTSModelCredential() +base_azure_stt_model_credential = AzureOpenAISTTModelCredential() + +default_model_info = [ + ModelInfo('Azure OpenAI', '', ModelTypeConst.LLM, + base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview' + ), + ModelInfo('gpt-4', '', ModelTypeConst.LLM, + base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview' + ), + ModelInfo('gpt-4o', '', ModelTypeConst.LLM, + base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview' + ), + ModelInfo('gpt-4o-mini', '', ModelTypeConst.LLM, + base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview' + ), +] + +embedding_model_info = [ + ModelInfo('text-embedding-3-large', '', ModelTypeConst.EMBEDDING, + base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15' + ), + ModelInfo('text-embedding-3-small', '', ModelTypeConst.EMBEDDING, + base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15' + ), + ModelInfo('text-embedding-ada-002', '', ModelTypeConst.EMBEDDING, + base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15' + ), +] + +image_model_info = [ + ModelInfo('gpt-4o', '', ModelTypeConst.IMAGE, + base_azure_image_model_credential, AzureOpenAIImage, api_version='2023-05-15' + ), + ModelInfo('gpt-4o-mini', '', ModelTypeConst.IMAGE, + base_azure_image_model_credential, AzureOpenAIImage, api_version='2023-05-15' + ), +] + +tti_model_info = [ + ModelInfo('dall-e-3', '', ModelTypeConst.TTI, + base_azure_tti_model_credential, AzureOpenAITextToImage, api_version='2023-05-15' + ), +] + +tts_model_info = [ + ModelInfo('tts', '', ModelTypeConst.TTS, + base_azure_tts_model_credential, AzureOpenAITextToSpeech, api_version='2023-05-15' + ), +] + +stt_model_info = [ + ModelInfo('whisper', '', ModelTypeConst.STT, + base_azure_stt_model_credential, AzureOpenAISpeechToText, api_version='2023-05-15' + ), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_default_model_info(default_model_info[0]) + .append_model_info_list(default_model_info) + .append_model_info_list(embedding_model_info) + .append_default_model_info(embedding_model_info[0]) + .append_model_info_list(image_model_info) + .append_default_model_info(image_model_info[0]) + .append_model_info_list(stt_model_info) + .append_default_model_info(stt_model_info[0]) + .append_model_info_list(tts_model_info) + .append_default_model_info(tts_model_info[0]) + .append_model_info_list(tti_model_info) + .append_default_model_info(tti_model_info[0]) + .build() +) class AzureModelProvider(IModelProvider): - def get_dialogue_number(self): - return 3 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> AzureChatModel: - azure_chat_open_ai = AzureChatModel( - azure_endpoint=model_credential.get('api_base'), - openai_api_version=model_credential.get('api_version', '2024-02-15-preview'), - deployment_name=model_credential.get('deployment_name'), - openai_api_key=model_credential.get('api_key'), - openai_api_type="azure" - ) - return azure_chat_open_ai - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return base_azure_llm_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): return ModelProvideInfo(provider='model_azure_provider', name='Azure OpenAI', icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'azure_model_provider', 'icon', 'azure_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py new file mode 100644 index 00000000000..471e6c38400 --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py @@ -0,0 +1,57 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 17:08 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AzureOpenAIEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key', 'api_version']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct')) + else: + return False + + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_version = forms.TextInputField("Api Version", required=True) + + api_base = forms.TextInputField('Azure Endpoint', required=True) + + api_key = forms.PasswordInputField("API Key", required=True) diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/image.py b/apps/setting/models_provider/impl/azure_model_provider/credential/image.py new file mode 100644 index 00000000000..ee8e7b850d6 --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/image.py @@ -0,0 +1,75 @@ +# coding=utf-8 +import base64 +import os +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class AzureOpenAIImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class AzureOpenAIImageModelCredential(BaseForm, BaseModelCredential): + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key', 'api_version']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return AzureOpenAIImageModelParams() diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py new file mode 100644 index 00000000000..ac17279240c --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py @@ -0,0 +1,96 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 17:08 + @desc: +""" +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage +from openai import BadRequestError + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class AzureLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class o3MiniLLMModelParams(BaseForm): + max_completion_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=5000, + _step=1, + precision=0) + + +class AzureLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key', 'deployment_name', 'api_version']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException) or isinstance(e, BadRequestError): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('Verification failed, please check whether the parameters are correct')) + else: + return False + + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_version = forms.TextInputField("API Version", required=True) + + api_base = forms.TextInputField('Azure Endpoint', required=True) + + api_key = forms.PasswordInputField("API Key", required=True) + + deployment_name = forms.TextInputField("Deployment name", required=True) + + def get_model_params_setting_form(self, model_name): + if 'o3' in model_name or 'o1' in model_name: + return o3MiniLLMModelParams() + return AzureLLMModelParams() diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py b/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py new file mode 100644 index 00000000000..f1575dbefef --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py @@ -0,0 +1,50 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AzureOpenAISTTModelCredential(BaseForm, BaseModelCredential): + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key', 'api_version']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py b/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py new file mode 100644 index 00000000000..34d0c510622 --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py @@ -0,0 +1,87 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AzureOpenAITTIModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')), + required=True, + default_value='1024x1024', + option_list=[ + {'value': '1024x1024', 'label': '1024x1024'}, + {'value': '1024x1792', 'label': '1024x1792'}, + {'value': '1792x1024', 'label': '1792x1024'}, + ], + text_field='label', + value_field='value' + ) + + quality = forms.SingleSelect( + TooltipLabel(_('Picture quality'), ''), + required=True, + default_value='standard', + option_list=[ + {'value': 'standard', 'label': 'standard'}, + {'value': 'hd', 'label': 'hd'}, + ], + text_field='label', + value_field='value' + ) + + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')), + required=True, default_value=1, + _min=1, + _max=10, + _step=1, + precision=0) + + +class AzureOpenAITextToImageModelCredential(BaseForm, BaseModelCredential): + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key', 'api_version']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return AzureOpenAITTIModelParams() diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py b/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py new file mode 100644 index 00000000000..a41365fecca --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py @@ -0,0 +1,68 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class AzureOpenAITTSModelGeneralParams(BaseForm): + # alloy, echo, fable, onyx, nova, shimmer + voice = forms.SingleSelect( + TooltipLabel('Voice', + _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')), + required=True, default_value='alloy', + text_field='value', + value_field='value', + option_list=[ + {'text': 'alloy', 'value': 'alloy'}, + {'text': 'echo', 'value': 'echo'}, + {'text': 'fable', 'value': 'fable'}, + {'text': 'onyx', 'value': 'onyx'}, + {'text': 'nova', 'value': 'nova'}, + {'text': 'shimmer', 'value': 'shimmer'}, + ]) + + +class AzureOpenAITTSModelCredential(BaseForm, BaseModelCredential): + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key', 'api_version']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return AzureOpenAITTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg b/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg index d6499d2ad97..e5a2f98a0b0 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg +++ b/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg @@ -1,9 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py b/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py index 6388dbde27b..6046ae67ebf 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py +++ b/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py @@ -6,15 +6,36 @@ @date:2024/4/28 11:45 @desc: """ -from typing import List + +from typing import List, Dict from langchain_core.messages import BaseMessage, get_buffer_string from langchain_openai import AzureChatOpenAI from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class AzureChatModel(MaxKBBaseModel, AzureChatOpenAI): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return AzureChatModel( + azure_endpoint=model_credential.get('api_base'), + model_name=model_name, + openai_api_version=model_credential.get('api_version', '2024-02-15-preview'), + deployment_name=model_credential.get('deployment_name'), + openai_api_key=model_credential.get('api_key'), + openai_api_type="azure", + **optional_params, + streaming=True, + ) -class AzureChatModel(AzureChatOpenAI): def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: try: return super().get_num_tokens_from_messages(messages) diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/embedding.py b/apps/setting/models_provider/impl/azure_model_provider/model/embedding.py new file mode 100644 index 00000000000..f1b35db8ea8 --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/model/embedding.py @@ -0,0 +1,25 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 17:44 + @desc: +""" +from typing import Dict + +from langchain_openai import AzureOpenAIEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class AzureOpenAIEmbeddingModel(MaxKBBaseModel, AzureOpenAIEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return AzureOpenAIEmbeddingModel( + model=model_name, + openai_api_key=model_credential.get('api_key'), + azure_endpoint=model_credential.get('api_base'), + openai_api_version=model_credential.get('api_version'), + openai_api_type="azure", + ) diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/image.py b/apps/setting/models_provider/impl/azure_model_provider/model/image.py new file mode 100644 index 00000000000..14abab3af9a --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/model/image.py @@ -0,0 +1,42 @@ +from typing import Dict, List + +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_openai import AzureChatOpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return AzureOpenAIImage( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + azure_endpoint=model_credential.get('api_base'), + openai_api_version=model_credential.get('api_version'), + openai_api_type="azure", + streaming=True, + **optional_params, + ) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/stt.py b/apps/setting/models_provider/impl/azure_model_provider/model/stt.py new file mode 100644 index 00000000000..5a4aab5fd06 --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/model/stt.py @@ -0,0 +1,62 @@ +import io +from typing import Dict + +from openai import AzureOpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class AzureOpenAISpeechToText(MaxKBBaseModel, BaseSpeechToText): + api_base: str + api_key: str + api_version: str + model: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.api_version = kwargs.get('api_version') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return AzureOpenAISpeechToText( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + api_version=model_credential.get('api_version'), + **optional_params, + ) + + def check_auth(self): + client = AzureOpenAI( + azure_endpoint=self.api_base, + api_key=self.api_key, + api_version=self.api_version + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def speech_to_text(self, audio_file): + client = AzureOpenAI( + azure_endpoint=self.api_base, + api_key=self.api_key, + api_version=self.api_version + ) + audio_data = audio_file.read() + buffer = io.BytesIO(audio_data) + buffer.name = "file.mp3" # this is the important line + res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer) + return res.text diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/tti.py b/apps/setting/models_provider/impl/azure_model_provider/model/tti.py new file mode 100644 index 00000000000..fd5c6ffab6b --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/model/tti.py @@ -0,0 +1,61 @@ +from typing import Dict + +from openai import AzureOpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class AzureOpenAITextToImage(MaxKBBaseModel, BaseTextToImage): + api_base: str + api_key: str + api_version: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.api_version = kwargs.get('api_version') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return AzureOpenAITextToImage( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + api_version=model_credential.get('api_version'), + **optional_params, + ) + + def is_cache_model(self): + return False + + def check_auth(self): + chat = AzureOpenAI(api_key=self.api_key, azure_endpoint=self.api_base, api_version=self.api_version) + response_list = chat.models.with_raw_response.list() + + # self.generate_image('生成一个小猫图片') + + def generate_image(self, prompt: str, negative_prompt: str = None): + chat = AzureOpenAI(api_key=self.api_key, azure_endpoint=self.api_base, api_version=self.api_version) + res = chat.images.generate(model=self.model, prompt=prompt, **self.params) + file_urls = [] + for content in res.data: + url = content.url + file_urls.append(url) + + return file_urls diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/tts.py b/apps/setting/models_provider/impl/azure_model_provider/model/tts.py new file mode 100644 index 00000000000..fa0676534e6 --- /dev/null +++ b/apps/setting/models_provider/impl/azure_model_provider/model/tts.py @@ -0,0 +1,69 @@ +from typing import Dict + +from openai import AzureOpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class AzureOpenAITextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + api_base: str + api_key: str + api_version: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.api_version = kwargs.get('api_version') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'voice': 'alloy'}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return AzureOpenAITextToSpeech( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + api_version=model_credential.get('api_version'), + **optional_params, + ) + + def check_auth(self): + client = AzureOpenAI( + azure_endpoint=self.api_base, + api_key=self.api_key, + api_version=self.api_version + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def text_to_speech(self, text): + client = AzureOpenAI( + azure_endpoint=self.api_base, + api_key=self.api_key, + api_version=self.api_version + ) + text = _remove_empty_lines(text) + with client.audio.speech.with_streaming_response.create( + model=self.model, + input=text, + **self.params + ) as response: + return response.read() + + def is_cache_model(self): + return False diff --git a/apps/setting/models_provider/impl/base_chat_open_ai.py b/apps/setting/models_provider/impl/base_chat_open_ai.py new file mode 100644 index 00000000000..ccafc0e0bf2 --- /dev/null +++ b/apps/setting/models_provider/impl/base_chat_open_ai.py @@ -0,0 +1,199 @@ +# coding=utf-8 +from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping + +from langchain_core.language_models import LanguageModelInput +from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \ + SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk +from langchain_core.messages.ai import UsageMetadata +from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk +from langchain_core.outputs import ChatGenerationChunk +from langchain_core.runnables import RunnableConfig, ensure_config +from langchain_core.tools import BaseTool +from langchain_openai import ChatOpenAI +from langchain_openai.chat_models.base import _create_usage_metadata + +from common.config.tokenizer_manage_config import TokenizerManage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +def _convert_delta_to_message_chunk( + _dict: Mapping[str, Any], default_class: type[BaseMessageChunk] +) -> BaseMessageChunk: + id_ = _dict.get("id") + role = cast(str, _dict.get("role")) + content = cast(str, _dict.get("content") or "") + additional_kwargs: dict = {} + if 'reasoning_content' in _dict: + additional_kwargs['reasoning_content'] = _dict.get('reasoning_content') + if _dict.get("function_call"): + function_call = dict(_dict["function_call"]) + if "name" in function_call and function_call["name"] is None: + function_call["name"] = "" + additional_kwargs["function_call"] = function_call + tool_call_chunks = [] + if raw_tool_calls := _dict.get("tool_calls"): + additional_kwargs["tool_calls"] = raw_tool_calls + try: + tool_call_chunks = [ + tool_call_chunk( + name=rtc["function"].get("name"), + args=rtc["function"].get("arguments"), + id=rtc.get("id"), + index=rtc["index"], + ) + for rtc in raw_tool_calls + ] + except KeyError: + pass + + if role == "user" or default_class == HumanMessageChunk: + return HumanMessageChunk(content=content, id=id_) + elif role == "assistant" or default_class == AIMessageChunk: + return AIMessageChunk( + content=content, + additional_kwargs=additional_kwargs, + id=id_, + tool_call_chunks=tool_call_chunks, # type: ignore[arg-type] + ) + elif role in ("system", "developer") or default_class == SystemMessageChunk: + if role == "developer": + additional_kwargs = {"__openai_role__": "developer"} + else: + additional_kwargs = {} + return SystemMessageChunk( + content=content, id=id_, additional_kwargs=additional_kwargs + ) + elif role == "function" or default_class == FunctionMessageChunk: + return FunctionMessageChunk(content=content, name=_dict["name"], id=id_) + elif role == "tool" or default_class == ToolMessageChunk: + return ToolMessageChunk( + content=content, tool_call_id=_dict["tool_call_id"], id=id_ + ) + elif role or default_class == ChatMessageChunk: + return ChatMessageChunk(content=content, role=role, id=id_) + else: + return default_class(content=content, id=id_) # type: ignore + + +class BaseChatOpenAI(ChatOpenAI): + usage_metadata: dict = {} + custom_get_token_ids = custom_get_token_ids + + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: + return self.usage_metadata + + def get_num_tokens_from_messages( + self, + messages: list[BaseMessage], + tools: Optional[ + Sequence[Union[dict[str, Any], type, Callable, BaseTool]] + ] = None, + ) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + return self.usage_metadata.get('input_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + return self.get_last_generation_info().get('output_tokens', 0) + + def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]: + kwargs['stream_usage'] = True + for chunk in super()._stream(*args, **kwargs): + if chunk.message.usage_metadata is not None: + self.usage_metadata = chunk.message.usage_metadata + yield chunk + + def _convert_chunk_to_generation_chunk( + self, + chunk: dict, + default_chunk_class: type, + base_generation_info: Optional[dict], + ) -> Optional[ChatGenerationChunk]: + if chunk.get("type") == "content.delta": # from beta.chat.completions.stream + return None + token_usage = chunk.get("usage") + choices = ( + chunk.get("choices", []) + # from beta.chat.completions.stream + or chunk.get("chunk", {}).get("choices", []) + ) + + usage_metadata: Optional[UsageMetadata] = ( + _create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None + ) + if len(choices) == 0: + # logprobs is implicitly None + generation_chunk = ChatGenerationChunk( + message=default_chunk_class(content="", usage_metadata=usage_metadata) + ) + return generation_chunk + + choice = choices[0] + if choice["delta"] is None: + return None + + message_chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + generation_info = {**base_generation_info} if base_generation_info else {} + + if finish_reason := choice.get("finish_reason"): + generation_info["finish_reason"] = finish_reason + if model_name := chunk.get("model"): + generation_info["model_name"] = model_name + if system_fingerprint := chunk.get("system_fingerprint"): + generation_info["system_fingerprint"] = system_fingerprint + + logprobs = choice.get("logprobs") + if logprobs: + generation_info["logprobs"] = logprobs + + if usage_metadata and isinstance(message_chunk, AIMessageChunk): + message_chunk.usage_metadata = usage_metadata + + generation_chunk = ChatGenerationChunk( + message=message_chunk, generation_info=generation_info or None + ) + return generation_chunk + + def invoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + config = ensure_config(config) + chat_result = cast( + "ChatGeneration", + self.generate_prompt( + [self._convert_input(input)], + stop=stop, + callbacks=config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.pop("run_id", None), + **kwargs, + ).generations[0][0], + + ).message + + self.usage_metadata = chat_result.response_metadata[ + 'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata + return chat_result diff --git a/apps/setting/models_provider/impl/base_stt.py b/apps/setting/models_provider/impl/base_stt.py new file mode 100644 index 00000000000..aae72a559eb --- /dev/null +++ b/apps/setting/models_provider/impl/base_stt.py @@ -0,0 +1,14 @@ +# coding=utf-8 +from abc import abstractmethod + +from pydantic import BaseModel + + +class BaseSpeechToText(BaseModel): + @abstractmethod + def check_auth(self): + pass + + @abstractmethod + def speech_to_text(self, audio_file): + pass diff --git a/apps/setting/models_provider/impl/base_tti.py b/apps/setting/models_provider/impl/base_tti.py new file mode 100644 index 00000000000..5e34d12cd11 --- /dev/null +++ b/apps/setting/models_provider/impl/base_tti.py @@ -0,0 +1,14 @@ +# coding=utf-8 +from abc import abstractmethod + +from pydantic import BaseModel + + +class BaseTextToImage(BaseModel): + @abstractmethod + def check_auth(self): + pass + + @abstractmethod + def generate_image(self, prompt: str, negative_prompt: str = None): + pass diff --git a/apps/setting/models_provider/impl/base_tts.py b/apps/setting/models_provider/impl/base_tts.py new file mode 100644 index 00000000000..6311f268653 --- /dev/null +++ b/apps/setting/models_provider/impl/base_tts.py @@ -0,0 +1,14 @@ +# coding=utf-8 +from abc import abstractmethod + +from pydantic import BaseModel + + +class BaseTextToSpeech(BaseModel): + @abstractmethod + def check_auth(self): + pass + + @abstractmethod + def text_to_speech(self, text): + pass diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py new file mode 100644 index 00000000000..015deacee48 --- /dev/null +++ b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py @@ -0,0 +1,77 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 17:51 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class DeepSeekLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class DeepSeekLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return DeepSeekLLMModelParams() diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py b/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py index 3baa5f04ad7..0ebb2884932 100644 --- a/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py +++ b/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py @@ -7,91 +7,41 @@ @Date :5/12/24 7:40 AM """ import os -from typing import Dict -from langchain.schema import HumanMessage - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \ - ModelInfo, ModelTypeConst, ValidCode -from setting.models_provider.impl.deepseek_model_provider.model.deepseek_chat_model import DeepSeekChatModel +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ + ModelInfoManage +from setting.models_provider.impl.deepseek_model_provider.credential.llm import DeepSeekLLMModelCredential +from setting.models_provider.impl.deepseek_model_provider.model.llm import DeepSeekChatModel from smartdoc.conf import PROJECT_DIR - - -class DeepSeekLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = DeepSeekModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - - for key in ['api_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = DeepSeekModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_key = forms.PasswordInputField('API Key', required=True) - +from django.utils.translation import gettext as _ deepseek_llm_model_credential = DeepSeekLLMModelCredential() +deepseek_reasoner = ModelInfo('deepseek-reasoner', '', ModelTypeConst.LLM, + deepseek_llm_model_credential, DeepSeekChatModel + ) -model_dict = { - 'deepseek-chat': ModelInfo('deepseek-chat', '擅长通用对话任务,支持 32K 上下文', ModelTypeConst.LLM, - deepseek_llm_model_credential, - ), - 'deepseek-coder': ModelInfo('deepseek-coder', '擅长处理编程任务,支持 16K 上下文', ModelTypeConst.LLM, - deepseek_llm_model_credential, - ), -} +deepseek_chat = ModelInfo('deepseek-chat', _('Good at common conversational tasks, supports 32K contexts'), + ModelTypeConst.LLM, + deepseek_llm_model_credential, DeepSeekChatModel + ) +deepseek_coder = ModelInfo('deepseek-coder', _('Good at handling programming tasks, supports 16K contexts'), + ModelTypeConst.LLM, + deepseek_llm_model_credential, + DeepSeekChatModel) -class DeepSeekModelProvider(IModelProvider): +model_info_manage = ModelInfoManage.builder().append_model_info(deepseek_reasoner).append_model_info(deepseek_chat).append_model_info( + deepseek_coder).append_default_model_info( + deepseek_coder).build() - def get_dialogue_number(self): - return 3 - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> DeepSeekChatModel: - deepseek_chat_open_ai = DeepSeekChatModel( - model=model_name, - openai_api_base='https://api.deepseek.com', - openai_api_key=model_credential.get('api_key') - ) - return deepseek_chat_open_ai +class DeepSeekModelProvider(IModelProvider): - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return deepseek_llm_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): return ModelProvideInfo(provider='model_deepseek_provider', name='DeepSeek', icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'deepseek_model_provider', 'icon', 'deepseek_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/model/deepseek_chat_model.py b/apps/setting/models_provider/impl/deepseek_model_provider/model/deepseek_chat_model.py deleted file mode 100644 index b7a54b302d9..00000000000 --- a/apps/setting/models_provider/impl/deepseek_model_provider/model/deepseek_chat_model.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :MaxKB -@File :deepseek_chat_model.py -@Author :Brian Yang -@Date :5/12/24 7:44 AM -""" -from typing import List - -from langchain_core.messages import BaseMessage, get_buffer_string -from langchain_openai import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage - - -class DeepSeekChatModel(ChatOpenAI): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - try: - return super().get_num_tokens_from_messages(messages) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - try: - return super().get_num_tokens(text) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py new file mode 100644 index 00000000000..081d648a716 --- /dev/null +++ b/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :MaxKB +@File :llm.py +@Author :Brian Yang +@Date :5/12/24 7:44 AM +""" +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class DeepSeekChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + + deepseek_chat_open_ai = DeepSeekChatModel( + model=model_name, + openai_api_base='https://api.deepseek.com', + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params + ) + return deepseek_chat_open_ai diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py new file mode 100644 index 00000000000..22724ec08e9 --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py @@ -0,0 +1,52 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 16:45 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class GeminiEmbeddingCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py new file mode 100644 index 00000000000..87d667a61c8 --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py @@ -0,0 +1,71 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class GeminiImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class GeminiImageModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return GeminiImageModelParams() diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py new file mode 100644 index 00000000000..d02b9fdc356 --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 17:57 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class GeminiLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class GeminiLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.invoke([HumanMessage(content=gettext('Hello'))]) + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return GeminiLLMModelParams() diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py new file mode 100644 index 00000000000..0092d955e29 --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py @@ -0,0 +1,48 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class GeminiSTTModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py b/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py index 5ddddf782af..0771b0c128d 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py @@ -7,93 +7,91 @@ @Date :5/13/24 7:47 AM """ import os -from typing import Dict -from langchain.schema import HumanMessage - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \ - ModelInfo, ModelTypeConst, ValidCode -from setting.models_provider.impl.gemini_model_provider.model.gemini_chat_model import GeminiChatModel +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ + ModelInfoManage +from setting.models_provider.impl.gemini_model_provider.credential.embedding import GeminiEmbeddingCredential +from setting.models_provider.impl.gemini_model_provider.credential.image import GeminiImageModelCredential +from setting.models_provider.impl.gemini_model_provider.credential.llm import GeminiLLMModelCredential +from setting.models_provider.impl.gemini_model_provider.credential.stt import GeminiSTTModelCredential +from setting.models_provider.impl.gemini_model_provider.model.embedding import GeminiEmbeddingModel +from setting.models_provider.impl.gemini_model_provider.model.image import GeminiImage +from setting.models_provider.impl.gemini_model_provider.model.llm import GeminiChatModel +from setting.models_provider.impl.gemini_model_provider.model.stt import GeminiSpeechToText from smartdoc.conf import PROJECT_DIR - - -class GeminiLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = GeminiModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - - for key in ['api_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = GeminiModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_key = forms.PasswordInputField('API Key', required=True) +from django.utils.translation import gettext as _ gemini_llm_model_credential = GeminiLLMModelCredential() - -model_dict = { - 'gemini-1.0-pro': ModelInfo('gemini-1.0-pro', '最新的Gemini 1.0 Pro模型,随Google更新而更新', - ModelTypeConst.LLM, - gemini_llm_model_credential, - ), - 'gemini-1.0-pro-vision': ModelInfo('gemini-1.0-pro-vision', '最新的Gemini 1.0 Pro Vision模型,随Google更新而更新', - ModelTypeConst.LLM, - gemini_llm_model_credential, - ), -} +gemini_image_model_credential = GeminiImageModelCredential() +gemini_stt_model_credential = GeminiSTTModelCredential() +gemini_embedding_model_credential = GeminiEmbeddingCredential() + +model_info_list = [ + ModelInfo('gemini-1.0-pro', _('Latest Gemini 1.0 Pro model, updated with Google update'), + ModelTypeConst.LLM, + gemini_llm_model_credential, + GeminiChatModel), + ModelInfo('gemini-1.0-pro-vision', _('Latest Gemini 1.0 Pro Vision model, updated with Google update'), + ModelTypeConst.LLM, + gemini_llm_model_credential, + GeminiChatModel), +] + +model_image_info_list = [ + ModelInfo('gemini-1.5-flash', _('Latest Gemini 1.5 Flash model, updated with Google updates'), + ModelTypeConst.IMAGE, + gemini_image_model_credential, + GeminiImage), + ModelInfo('gemini-1.5-pro', _('Latest Gemini 1.5 Flash model, updated with Google updates'), + ModelTypeConst.IMAGE, + gemini_image_model_credential, + GeminiImage), +] + +model_stt_info_list = [ + ModelInfo('gemini-1.5-flash', _('Latest Gemini 1.5 Flash model, updated with Google updates'), + ModelTypeConst.STT, + gemini_stt_model_credential, + GeminiSpeechToText), + ModelInfo('gemini-1.5-pro', _('Latest Gemini 1.5 Flash model, updated with Google updates'), + ModelTypeConst.STT, + gemini_stt_model_credential, + GeminiSpeechToText), +] + +model_embedding_info_list = [ + ModelInfo('models/embedding-001', '', + ModelTypeConst.EMBEDDING, + gemini_embedding_model_credential, + GeminiEmbeddingModel), + ModelInfo('models/text-embedding-004', '', + ModelTypeConst.EMBEDDING, + gemini_embedding_model_credential, + GeminiEmbeddingModel), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_model_info_list(model_image_info_list) + .append_model_info_list(model_stt_info_list) + .append_model_info_list(model_embedding_info_list) + .append_default_model_info(model_info_list[0]) + .append_default_model_info(model_image_info_list[0]) + .append_default_model_info(model_stt_info_list[0]) + .append_default_model_info(model_embedding_info_list[0]) + .build() +) class GeminiModelProvider(IModelProvider): - def get_dialogue_number(self): - return 3 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], - **model_kwargs) -> GeminiChatModel: - gemini_chat = GeminiChatModel( - model=model_name, - google_api_key=model_credential.get('api_key') - ) - return gemini_chat - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return gemini_llm_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): return ModelProvideInfo(provider='model_gemini_provider', name='Gemini', icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'gemini_model_provider', 'icon', 'gemini_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg b/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg index 00c48a35944..3ff8bdaf4f4 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg +++ b/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg @@ -1,10 +1,2 @@ - - - - - - - - - - + + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/embedding.py b/apps/setting/models_provider/impl/gemini_model_provider/model/embedding.py new file mode 100644 index 00000000000..5d82b07e99d --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/model/embedding.py @@ -0,0 +1,22 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 17:44 + @desc: +""" +from typing import Dict + +from langchain_google_genai import GoogleGenerativeAIEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class GeminiEmbeddingModel(MaxKBBaseModel, GoogleGenerativeAIEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return GeminiEmbeddingModel( + google_api_key=model_credential.get('api_key'), + model=model_name, + ) diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/gemini_chat_model.py b/apps/setting/models_provider/impl/gemini_model_provider/model/gemini_chat_model.py deleted file mode 100644 index 7a972d9d531..00000000000 --- a/apps/setting/models_provider/impl/gemini_model_provider/model/gemini_chat_model.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -""" -@Project :MaxKB -@File :gemini_chat_model.py -@Author :Brian Yang -@Date :5/13/24 7:40 AM -""" -from typing import List - -from langchain_core.messages import BaseMessage, get_buffer_string -from langchain_google_genai import ChatGoogleGenerativeAI - -from common.config.tokenizer_manage_config import TokenizerManage - - -class GeminiChatModel(ChatGoogleGenerativeAI): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - try: - return super().get_num_tokens_from_messages(messages) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - try: - return super().get_num_tokens(text) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/image.py b/apps/setting/models_provider/impl/gemini_model_provider/model/image.py new file mode 100644 index 00000000000..2e48a81b23b --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/model/image.py @@ -0,0 +1,24 @@ +from typing import Dict + +from langchain_google_genai import ChatGoogleGenerativeAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class GeminiImage(MaxKBBaseModel, ChatGoogleGenerativeAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return GeminiImage( + model=model_name, + google_api_key=model_credential.get('api_key'), + streaming=True, + **optional_params, + ) diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py new file mode 100644 index 00000000000..af23d0341a4 --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :MaxKB +@File :llm.py +@Author :Brian Yang +@Date :5/13/24 7:40 AM +""" +from typing import List, Dict, Optional, Sequence, Union, Any, Iterator, cast + +from google.ai.generativelanguage_v1 import GenerateContentResponse +from google.ai.generativelanguage_v1beta.types import ( + Tool as GoogleTool, +) +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.outputs import ChatGenerationChunk +from langchain_google_genai import ChatGoogleGenerativeAI +from langchain_google_genai._function_utils import _ToolConfigDict, _ToolDict +from langchain_google_genai.chat_models import _chat_with_retry, _response_to_result, \ + _FunctionDeclarationType +from langchain_google_genai._common import ( + SafetySettingDict, +) + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class GeminiChatModel(MaxKBBaseModel, ChatGoogleGenerativeAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + + gemini_chat = GeminiChatModel( + model=model_name, + google_api_key=model_credential.get('api_key'), + **optional_params + ) + return gemini_chat + + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: + return self.__dict__.get('_last_generation_info') + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + try: + return self.get_last_generation_info().get('input_tokens', 0) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + try: + return self.get_last_generation_info().get('output_tokens', 0) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + *, + tools: Optional[Sequence[Union[_ToolDict, GoogleTool]]] = None, + functions: Optional[Sequence[_FunctionDeclarationType]] = None, + safety_settings: Optional[SafetySettingDict] = None, + tool_config: Optional[Union[Dict, _ToolConfigDict]] = None, + generation_config: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + request = self._prepare_request( + messages, + stop=stop, + tools=tools, + functions=functions, + safety_settings=safety_settings, + tool_config=tool_config, + generation_config=generation_config, + ) + response: GenerateContentResponse = _chat_with_retry( + request=request, + generation_method=self.client.stream_generate_content, + **kwargs, + metadata=self.default_metadata, + ) + for chunk in response: + _chat_result = _response_to_result(chunk, stream=True) + gen = cast(ChatGenerationChunk, _chat_result.generations[0]) + if gen.message: + token_usage = gen.message.usage_metadata + self.__dict__.setdefault('_last_generation_info', {}).update(token_usage) + if run_manager: + run_manager.on_llm_new_token(gen.text) + yield gen diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py b/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py new file mode 100644 index 00000000000..5d559ac0065 --- /dev/null +++ b/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py @@ -0,0 +1,57 @@ +from typing import Dict + +from django.utils.translation import gettext as _ +from langchain_core.messages import HumanMessage +from langchain_google_genai import ChatGoogleGenerativeAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class GeminiSpeechToText(MaxKBBaseModel, BaseSpeechToText): + api_key: str + model: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return GeminiSpeechToText( + model=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + client = ChatGoogleGenerativeAI( + model=self.model, + google_api_key=self.api_key + ) + response_list = client.invoke(_('Hello')) + # print(response_list) + + def speech_to_text(self, audio_file): + client = ChatGoogleGenerativeAI( + model=self.model, + google_api_key=self.api_key + ) + audio_data = audio_file.read() + msg = HumanMessage(content=[ + {'type': 'text', 'text': _('convert audio to text')}, + {"type": "media", 'mime_type': 'audio/mp3', "data": audio_data} + ]) + res = client.invoke([msg]) + return res.content diff --git a/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py new file mode 100644 index 00000000000..9fbdc21f961 --- /dev/null +++ b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py @@ -0,0 +1,77 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:06 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class KimiLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.3, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class KimiLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return KimiLLMModelParams() diff --git a/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg b/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg index 80bfcabffb3..8bd2a78fe4f 100644 --- a/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg +++ b/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg @@ -1,9 +1 @@ - - - - - - - - - \ No newline at end of file + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py b/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py index 6394e5902b2..1347df46c64 100644 --- a/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py +++ b/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py @@ -7,103 +7,36 @@ @desc: """ import os -from typing import Dict -from langchain.schema import HumanMessage -from langchain.chat_models.base import BaseChatModel - - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \ - ModelInfo, \ - ModelTypeConst, ValidCode +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.kimi_model_provider.credential.llm import KimiLLMModelCredential +from setting.models_provider.impl.kimi_model_provider.model.llm import KimiChatModel from smartdoc.conf import PROJECT_DIR -from setting.models_provider.impl.kimi_model_provider.model.kimi_chat_model import KimiChatModel - - - - -class KimiLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = KimiModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - - for key in ['api_base', 'api_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - # llm_kimi = Moonshot( - # model_name=model_name, - # base_url=model_credential['api_base'], - # moonshot_api_key=model_credential['api_key'] - # ) - - model = KimiModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_base = forms.TextInputField('API 域名', required=True) - api_key = forms.PasswordInputField('API Key', required=True) - kimi_llm_model_credential = KimiLLMModelCredential() -model_dict = { - 'moonshot-v1-8k': ModelInfo('moonshot-v1-8k', '', ModelTypeConst.LLM, kimi_llm_model_credential, - ), - 'moonshot-v1-32k': ModelInfo('moonshot-v1-32k', '', ModelTypeConst.LLM, kimi_llm_model_credential, - ), - 'moonshot-v1-128k': ModelInfo('moonshot-v1-128k', '', ModelTypeConst.LLM, kimi_llm_model_credential, - ) -} +moonshot_v1_8k = ModelInfo('moonshot-v1-8k', '', ModelTypeConst.LLM, kimi_llm_model_credential, + KimiChatModel) +moonshot_v1_32k = ModelInfo('moonshot-v1-32k', '', ModelTypeConst.LLM, kimi_llm_model_credential, + KimiChatModel) +moonshot_v1_128k = ModelInfo('moonshot-v1-128k', '', ModelTypeConst.LLM, kimi_llm_model_credential, + KimiChatModel) + +model_info_manage = ModelInfoManage.builder().append_model_info(moonshot_v1_8k).append_model_info( + moonshot_v1_32k).append_default_model_info(moonshot_v1_128k).append_default_model_info(moonshot_v1_8k).build() class KimiModelProvider(IModelProvider): + def get_model_info_manage(self): + return model_info_manage + def get_dialogue_number(self): return 3 - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel: - kimi_chat_open_ai = KimiChatModel( - openai_api_base=model_credential['api_base'], - openai_api_key=model_credential['api_key'], - model_name=model_name, - ) - return kimi_chat_open_ai - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return kimi_llm_model_credential - def get_model_provide_info(self): return ModelProvideInfo(provider='model_kimi_provider', name='Kimi', icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'kimi_model_provider', 'icon', 'kimi_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py b/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py deleted file mode 100644 index deee11a020d..00000000000 --- a/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: kimi_chat_model.py - @date:2023/11/10 17:45 - @desc: -""" -from typing import List - -from langchain_community.chat_models import ChatOpenAI -from langchain_core.messages import BaseMessage, get_buffer_string - -from common.config.tokenizer_manage_config import TokenizerManage - - -class KimiChatModel(ChatOpenAI): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py new file mode 100644 index 00000000000..c0ce2ec029a --- /dev/null +++ b/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py @@ -0,0 +1,30 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2023/11/10 17:45 + @desc: +""" +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class KimiChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + kimi_chat_open_ai = KimiChatModel( + openai_api_base=model_credential['api_base'], + openai_api_key=model_credential['api_key'], + model_name=model_name, + extra_body=optional_params, + ) + return kimi_chat_open_ai diff --git a/apps/setting/models_provider/impl/local_model_provider/__init__.py b/apps/setting/models_provider/impl/local_model_provider/__init__.py new file mode 100644 index 00000000000..90a8d72c352 --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: __init__.py + @date:2024/7/10 17:48 + @desc: +""" diff --git a/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py new file mode 100644 index 00000000000..bbb431a6b3f --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/11 11:06 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding + + +class LocalEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + if not model_type == 'EMBEDDING': + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['cache_folder']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential) + model.embed_query(gettext('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return model + + cache_folder = forms.TextInputField(_('Model catalog'), required=True) diff --git a/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py new file mode 100644 index 00000000000..4c1715a72c9 --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py @@ -0,0 +1,54 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py + @date:2024/9/3 14:33 + @desc: +""" +import traceback +from typing import Dict + +from langchain_core.documents import Document + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.local_model_provider.model.reranker import LocalBaseReranker +from django.utils.translation import gettext_lazy as _, gettext + + +class LocalRerankerCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + if not model_type == 'RERANKER': + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['cache_dir']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model: LocalBaseReranker = provider.get_model(model_type, model_name, model_credential) + model.compress_documents([Document(page_content=gettext('Hello'))], gettext('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return model + + cache_dir = forms.TextInputField(_('Model catalog'), required=True) diff --git a/apps/setting/models_provider/impl/local_model_provider/icon/local_icon_svg b/apps/setting/models_provider/impl/local_model_provider/icon/local_icon_svg new file mode 100644 index 00000000000..62930faabd5 --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/icon/local_icon_svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py b/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py new file mode 100644 index 00000000000..b104e789c93 --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py @@ -0,0 +1,41 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: zhipu_model_provider.py + @date:2024/04/19 13:5 + @desc: +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ + ModelInfoManage +from setting.models_provider.impl.local_model_provider.credential.embedding import LocalEmbeddingCredential +from setting.models_provider.impl.local_model_provider.credential.reranker import LocalRerankerCredential +from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding +from setting.models_provider.impl.local_model_provider.model.reranker import LocalReranker +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +embedding_text2vec_base_chinese = ModelInfo('shibing624/text2vec-base-chinese', '', ModelTypeConst.EMBEDDING, + LocalEmbeddingCredential(), LocalEmbedding) +bge_reranker_v2_m3 = ModelInfo('BAAI/bge-reranker-v2-m3', '', ModelTypeConst.RERANKER, + LocalRerankerCredential(), LocalReranker) + +model_info_manage = (ModelInfoManage.builder().append_model_info(embedding_text2vec_base_chinese) + .append_default_model_info(embedding_text2vec_base_chinese) + .append_model_info(bge_reranker_v2_m3) + .append_default_model_info(bge_reranker_v2_m3) + .build()) + + +class LocalModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_local_provider', name=_('local model'), icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'local_model_provider', 'icon', + 'local_icon_svg'))) diff --git a/apps/setting/models_provider/impl/local_model_provider/model/embedding.py b/apps/setting/models_provider/impl/local_model_provider/model/embedding.py new file mode 100644 index 00000000000..4d6c65b9f68 --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/model/embedding.py @@ -0,0 +1,62 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/11 14:06 + @desc: +""" +from typing import Dict, List + +import requests +from langchain_core.embeddings import Embeddings +from pydantic import BaseModel +from langchain_huggingface import HuggingFaceEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from smartdoc.const import CONFIG + + +class WebLocalEmbedding(MaxKBBaseModel, BaseModel, Embeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + pass + + model_id: str = None + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.model_id = kwargs.get('model_id', None) + + def embed_query(self, text: str) -> List[float]: + bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}' + res = requests.post(f'{CONFIG.get("LOCAL_MODEL_PROTOCOL")}://{bind}/api/model/{self.model_id}/embed_query', + {'text': text}) + result = res.json() + if result.get('code', 500) == 200: + return result.get('data') + raise Exception(result.get('message')) + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}' + res = requests.post(f'{CONFIG.get("LOCAL_MODEL_PROTOCOL")}://{bind}/api/model/{self.model_id}/embed_documents', + {'texts': texts}) + result = res.json() + if result.get('code', 500) == 200: + return result.get('data') + raise Exception(result.get('message')) + + +class LocalEmbedding(MaxKBBaseModel, HuggingFaceEmbeddings): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + if model_kwargs.get('use_local', True): + return LocalEmbedding(model_name=model_name, cache_folder=model_credential.get('cache_folder'), + model_kwargs={'device': model_credential.get('device')}, + encode_kwargs={'normalize_embeddings': True} + ) + return WebLocalEmbedding(model_name=model_name, cache_folder=model_credential.get('cache_folder'), + model_kwargs={'device': model_credential.get('device')}, + encode_kwargs={'normalize_embeddings': True}, + **model_kwargs) diff --git a/apps/setting/models_provider/impl/local_model_provider/model/reranker.py b/apps/setting/models_provider/impl/local_model_provider/model/reranker.py new file mode 100644 index 00000000000..b941625b0e6 --- /dev/null +++ b/apps/setting/models_provider/impl/local_model_provider/model/reranker.py @@ -0,0 +1,101 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py.py + @date:2024/9/2 16:42 + @desc: +""" +from typing import Sequence, Optional, Dict, Any, ClassVar + +import requests +import torch +from langchain_core.callbacks import Callbacks +from langchain_core.documents import BaseDocumentCompressor, Document +from transformers import AutoModelForSequenceClassification, AutoTokenizer + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from smartdoc.const import CONFIG + + +class LocalReranker(MaxKBBaseModel): + def __init__(self, model_name, top_n=3, cache_dir=None): + super().__init__() + self.model_name = model_name + self.cache_dir = cache_dir + self.top_n = top_n + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + if model_kwargs.get('use_local', True): + return LocalBaseReranker(model_name=model_name, cache_dir=model_credential.get('cache_dir'), + model_kwargs={'device': model_credential.get('device', 'cpu')} + + ) + return WebLocalBaseReranker(model_name=model_name, cache_dir=model_credential.get('cache_dir'), + model_kwargs={'device': model_credential.get('device')}, + **model_kwargs) + + +class WebLocalBaseReranker(MaxKBBaseModel, BaseDocumentCompressor): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + pass + + model_id: str = None + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.model_id = kwargs.get('model_id', None) + + def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \ + Sequence[Document]: + if documents is None or len(documents) == 0: + return [] + bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}' + res = requests.post( + f'{CONFIG.get("LOCAL_MODEL_PROTOCOL")}://{bind}/api/model/{self.model_id}/compress_documents', + json={'documents': [{'page_content': document.page_content, 'metadata': document.metadata} for document in + documents], 'query': query}, headers={'Content-Type': 'application/json'}) + result = res.json() + if result.get('code', 500) == 200: + return [Document(page_content=document.get('page_content'), metadata=document.get('metadata')) for document + in result.get('data')] + raise Exception(result.get('message')) + + +class LocalBaseReranker(MaxKBBaseModel, BaseDocumentCompressor): + client: Any = None + tokenizer: Any = None + model: Optional[str] = None + cache_dir: Optional[str] = None + model_kwargs: Any = {} + + def __init__(self, model_name, cache_dir=None, **model_kwargs): + super().__init__() + self.model = model_name + self.cache_dir = cache_dir + self.model_kwargs = model_kwargs + self.client = AutoModelForSequenceClassification.from_pretrained(self.model, cache_dir=self.cache_dir) + self.tokenizer = AutoTokenizer.from_pretrained(self.model, cache_dir=self.cache_dir) + self.client = self.client.to(self.model_kwargs.get('device', 'cpu')) + self.client.eval() + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return LocalBaseReranker(model_name, cache_dir=model_credential.get('cache_dir'), **model_kwargs) + + def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \ + Sequence[Document]: + if documents is None or len(documents) == 0: + return [] + with torch.no_grad(): + inputs = self.tokenizer([[query, document.page_content] for document in documents], padding=True, + truncation=True, return_tensors='pt', max_length=512) + scores = [torch.sigmoid(s).float().item() for s in + self.client(**inputs, return_dict=True).logits.view(-1, ).float()] + result = [Document(page_content=documents[index].page_content, metadata={'relevance_score': scores[index]}) + for index + in range(len(documents))] + result.sort(key=lambda row: row.metadata.get('relevance_score'), reverse=True) + return result diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py new file mode 100644 index 00000000000..c422dba1c6e --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py @@ -0,0 +1,49 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 15:10 + @desc: +""" +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding + + +class OllamaEmbeddingModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base')) + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) + exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if + model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] + if len(exist) == 0: + raise AppApiException(ValidCode.model_not_fount, + _('The model does not exist, please download the model first')) + model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return model_info + + def build_model(self, model_info: Dict[str, object]): + for key in ['model']: + if key not in model_info: + raise AppApiException(500, _('{key} is required').format(key=key)) + return self + + api_base = forms.TextInputField('API URL', required=True) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py new file mode 100644 index 00000000000..2b50067b923 --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py @@ -0,0 +1,56 @@ +# coding=utf-8 +from typing import Dict + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class OllamaImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class OllamaImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base')) + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid')) + exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if + model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] + if len(exist) == 0: + raise AppApiException(ValidCode.model_not_fount, + gettext('The model does not exist, please download the model first')) + + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return OllamaImageModelParams() diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py new file mode 100644 index 00000000000..add06621937 --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py @@ -0,0 +1,70 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:19 + @desc: +""" +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class OllamaLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.3, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + num_predict = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class OllamaLLMModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base')) + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid')) + exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if + model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] + if len(exist) == 0: + raise AppApiException(ValidCode.model_not_fount, + gettext('The model does not exist, please download the model first')) + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))} + + def build_model(self, model_info: Dict[str, object]): + for key in ['api_key', 'model']: + if key not in model_info: + raise AppApiException(500, gettext('{key} is required').format(key=key)) + self.api_key = model_info.get('api_key') + return self + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return OllamaLLMModelParams() diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py new file mode 100644 index 00000000000..c2825aacb42 --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py @@ -0,0 +1,66 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 15:10 + @desc: +""" +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.ollama_model_provider.model.reranker import OllamaReranker +from langchain_core.documents import BaseDocumentCompressor, Document +from django.utils.translation import gettext_lazy as _, gettext + + +class OllamaReRankModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + if not model_type == 'RERANKER': + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base')) + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) + exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if + model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] + if len(exist) == 0: + raise AppApiException(ValidCode.model_not_fount, + _('The model does not exist, please download the model first')) + + try: + model: OllamaReranker = provider.get_model(model_type, model_name, model_credential) + model.compress_documents([Document(page_content=gettext('Hello'))], gettext('Hello')) + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return model_info + + def build_model(self, model_info: Dict[str, object]): + for key in ['model']: + if key not in model_info: + raise AppApiException(500, _('{key} is required').format(key=key)) + return self + + api_base = forms.TextInputField('API URL', required=True) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/embedding.py b/apps/setting/models_provider/impl/ollama_model_provider/model/embedding.py new file mode 100644 index 00000000000..d1a68ebc7ae --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/embedding.py @@ -0,0 +1,48 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 15:02 + @desc: +""" +from typing import Dict, List + +from langchain_community.embeddings import OllamaEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class OllamaEmbedding(MaxKBBaseModel, OllamaEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return OllamaEmbedding( + model=model_name, + base_url=model_credential.get('api_base'), + ) + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Embed documents using an Ollama deployed embedding model. + + Args: + texts: The list of texts to embed. + + Returns: + List of embeddings, one for each text. + """ + instruction_pairs = [f"{text}" for text in texts] + embeddings = self._embed(instruction_pairs) + return embeddings + + def embed_query(self, text: str) -> List[float]: + """Embed a query using a Ollama deployed embedding model. + + Args: + text: The text to embed. + + Returns: + Embeddings for the text. + """ + instruction_pair = f"{text}" + embedding = self._embed([instruction_pair])[0] + return embedding diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py new file mode 100644 index 00000000000..215ce0130d7 --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py @@ -0,0 +1,32 @@ +from typing import Dict +from urllib.parse import urlparse, ParseResult + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + +class OllamaImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + api_base = model_credential.get('api_base', '') + base_url = get_base_url(api_base) + base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1') + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return OllamaImage( + model_name=model_name, + openai_api_base=base_url, + openai_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py new file mode 100644 index 00000000000..6cd291ff3cc --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py @@ -0,0 +1,48 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/3/6 11:48 + @desc: +""" +from typing import List, Dict +from urllib.parse import urlparse, ParseResult + +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_ollama.chat_models import ChatOllama + + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + +class OllamaChatModel(MaxKBBaseModel, ChatOllama): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + api_base = model_credential.get('api_base', '') + base_url = get_base_url(api_base) + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + + return OllamaChatModel(model=model_name, base_url=base_url, + stream=True, **optional_params) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/ollama_chat_model.py b/apps/setting/models_provider/impl/ollama_model_provider/model/ollama_chat_model.py deleted file mode 100644 index 86c5219d4a0..00000000000 --- a/apps/setting/models_provider/impl/ollama_model_provider/model/ollama_chat_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: ollama_chat_model.py - @date:2024/3/6 11:48 - @desc: -""" -from typing import List - -from langchain_community.chat_models import ChatOpenAI -from langchain_core.messages import BaseMessage, get_buffer_string - -from common.config.tokenizer_manage_config import TokenizerManage - - -class OllamaChatModel(ChatOpenAI): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py b/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py new file mode 100644 index 00000000000..9704537a54a --- /dev/null +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py @@ -0,0 +1,49 @@ +from typing import Sequence, Optional, Any, Dict + +from langchain_community.embeddings import OllamaEmbeddings +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from setting.models_provider.base_model_provider import MaxKBBaseModel +from sklearn.metrics.pairwise import cosine_similarity +from pydantic import BaseModel, Field + + +class OllamaReranker(MaxKBBaseModel, OllamaEmbeddings, BaseModel): + top_n: Optional[int] = Field(3, description="Number of top documents to return") + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return OllamaReranker( + model=model_name, + base_url=model_credential.get('api_base'), + **optional_params + ) + + def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \ + Sequence[Document]: + """Rank documents based on their similarity to the query. + + Args: + query: The query text. + documents: The list of document texts to rank. + + Returns: + List of documents sorted by relevance to the query. + """ + # 获取查询和文档的嵌入 + query_embedding = self.embed_query(query) + documents = [doc.page_content for doc in documents] + document_embeddings = self.embed_documents(documents) + # 计算相似度 + similarities = cosine_similarity([query_embedding], document_embeddings)[0] + ranked_docs = [(doc,_) for _, doc in sorted(zip(similarities, documents), reverse=True)][:self.top_n] + return [ + Document( + page_content=doc, # 第一个值是文档内容 + metadata={'relevance_score': score} # 第二个值是相似度分数 + ) + for doc, score in ranked_docs + ] + + diff --git a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py index 73239921e45..f9de848e2fe 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py @@ -12,120 +12,204 @@ from urllib.parse import urlparse, ParseResult import requests -from langchain.chat_models.base import BaseChatModel - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ - BaseModelCredential, DownModelChunk, DownModelChunkStatus, ValidCode -from setting.models_provider.impl.ollama_model_provider.model.ollama_chat_model import OllamaChatModel + BaseModelCredential, DownModelChunk, DownModelChunkStatus, ValidCode, ModelInfoManage +from setting.models_provider.impl.ollama_model_provider.credential.embedding import OllamaEmbeddingModelCredential +from setting.models_provider.impl.ollama_model_provider.credential.image import OllamaImageModelCredential +from setting.models_provider.impl.ollama_model_provider.credential.llm import OllamaLLMModelCredential +from setting.models_provider.impl.ollama_model_provider.credential.reranker import OllamaReRankModelCredential +from setting.models_provider.impl.ollama_model_provider.model.embedding import OllamaEmbedding +from setting.models_provider.impl.ollama_model_provider.model.image import OllamaImage +from setting.models_provider.impl.ollama_model_provider.model.llm import OllamaChatModel +from setting.models_provider.impl.ollama_model_provider.model.reranker import OllamaReranker from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ "" - -class OllamaLLMModelCredential(BaseForm, BaseModelCredential): - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = OllamaModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - try: - model_list = OllamaModelProvider.get_base_model_list(model_credential.get('api_base')) - except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") - exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if - model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] - if len(exist) == 0: - raise AppApiException(ValidCode.model_not_fount, "模型不存在,请先下载模型") - return True - - def encryption_dict(self, model_info: Dict[str, object]): - return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))} - - def build_model(self, model_info: Dict[str, object]): - for key in ['api_key', 'model']: - if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') - self.api_key = model_info.get('api_key') - return self - - api_base = forms.TextInputField('API 域名', required=True) - api_key = forms.PasswordInputField('API Key', required=True) - - ollama_llm_model_credential = OllamaLLMModelCredential() +model_info_list = [ + ModelInfo( + 'deepseek-r1:1.5b', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'deepseek-r1:7b', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'deepseek-r1:8b', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'deepseek-r1:14b', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'deepseek-r1:32b', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), -model_dict = { - 'llama2': ModelInfo( + ModelInfo( 'llama2', - 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'llama2:13b': ModelInfo( + _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 7B pretrained models. Links to other models can be found in the index at the bottom.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'llama2:13b', - 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'llama2:70b': ModelInfo( + _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 13B pretrained models. Links to other models can be found in the index at the bottom.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'llama2:70b', - 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'llama2-chinese:13b': ModelInfo( + _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 70B pretrained models. Links to other models can be found in the index at the bottom.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'llama2-chinese:13b', - '由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'llama3:8b': ModelInfo( + _('Since the Chinese alignment of Llama2 itself is weak, we use the Chinese instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so that it has strong Chinese conversation capabilities.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'llama3:8b', - 'Meta Llama 3:迄今为止最有能力的公开产品LLM。8亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'llama3:70b': ModelInfo( + _('Meta Llama 3: The most capable public product LLM to date. 8 billion parameters.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'llama3:70b', - 'Meta Llama 3:迄今为止最有能力的公开产品LLM。70亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:0.5b': ModelInfo( + _('Meta Llama 3: The most capable public product LLM to date. 70 billion parameters.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:0.5b', - 'qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。0.5亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:1.8b': ModelInfo( + _("Compared with previous versions, qwen 1.5 0.5b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 500 million parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:1.8b', - 'qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。1.8亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:4b': ModelInfo( + _("Compared with previous versions, qwen 1.5 1.8b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 1.8 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:4b', - 'qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。4亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:7b': ModelInfo( + _("Compared with previous versions, qwen 1.5 4b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 4 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + + ModelInfo( 'qwen:7b', - 'qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语1言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。7亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:14b': ModelInfo( + _("Compared with previous versions, qwen 1.5 7b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 7 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:14b', - 'qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。14亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:32b': ModelInfo( + _("Compared with previous versions, qwen 1.5 14b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 14 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:32b', - 'qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。32亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:72b': ModelInfo( + _("Compared with previous versions, qwen 1.5 32b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 32 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:72b', - 'qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。72亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'qwen:110b': ModelInfo( + _("Compared with previous versions, qwen 1.5 72b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 72 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( 'qwen:110b', - 'qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。110亿参数。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'phi3': ModelInfo( + _("Compared with previous versions, qwen 1.5 110b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 110 billion parameters."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2:72b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2:57b-a14b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2:7b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:72b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:32b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:14b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:7b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:1.5b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:0.5b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5:3b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'phi3', + _("Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open model."), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), +] +ollama_embedding_model_credential = OllamaEmbeddingModelCredential() +ollama_image_model_credential = OllamaImageModelCredential() +ollama_reranker_model_credential = OllamaReRankModelCredential() +embedding_model_info = [ + ModelInfo( + 'nomic-embed-text', + _('A high-performance open embedding model with a large token context window.'), + ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding), +] +reranker_model_info = [ + ModelInfo( + 'linux6200/bge-reranker-v2-m3', + '', + ModelTypeConst.RERANKER, ollama_reranker_model_credential, OllamaReranker), +] + +image_model_info = [ + ModelInfo( + 'llava:7b', + '', + ModelTypeConst.IMAGE, ollama_image_model_credential, OllamaImage), + ModelInfo( + 'llava:13b', + '', + ModelTypeConst.IMAGE, ollama_image_model_credential, OllamaImage), + ModelInfo( + 'llava:34b', + '', + ModelTypeConst.IMAGE, ollama_image_model_credential, OllamaImage), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_model_info_list(embedding_model_info) + .append_default_model_info(ModelInfo( 'phi3', - 'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。', - ModelTypeConst.LLM, ollama_llm_model_credential), -} + _('Phi-3 Mini is Microsoft\'s 3.8B parameter, lightweight, state-of-the-art open model.'), + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel)) + .append_default_model_info(ModelInfo( + 'nomic-embed-text', + _('A high-performance open embedding model with a large token context window.'), + ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding), ) + .append_model_info_list(image_model_info) + .append_default_model_info(image_model_info[0]) + .append_model_info_list(reranker_model_info) + .append_default_model_info(reranker_model_info[0]) + .build() +) def get_base_url(url: str): parse = urlparse(url) - return ParseResult(scheme=parse.scheme, netloc=parse.netloc, path='', params='', - query='', - fragment='').geturl() + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url def convert_to_down_model_chunk(row_str: str, chunk_index: int): @@ -162,42 +246,20 @@ def convert(response_stream) -> Iterator[DownModelChunk]: temp = "" if len(temp) > 0: - print(temp) rows = [t for t in temp.split("\n") if len(t) > 0] for row in rows: yield convert_to_down_model_chunk(row, index) class OllamaModelProvider(IModelProvider): + def get_model_info_manage(self): + return model_info_manage + def get_model_provide_info(self): return ModelProvideInfo(provider='model_ollama_provider', name='Ollama', icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'ollama_model_provider', 'icon', 'ollama_icon_svg'))) - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] - - def get_model_list(self, model_type): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - # 如果使用模型不在配置中,则使用默认认证 - return ollama_llm_model_credential - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel: - api_base = model_credential.get('api_base') - base_url = get_base_url(api_base) - return OllamaChatModel(model=model_name, openai_api_base=(base_url + '/v1'), - openai_api_key=model_credential.get('api_key')) - - def get_dialogue_number(self): - return 2 - @staticmethod def get_base_model_list(api_base): base_url = get_base_url(api_base) @@ -206,7 +268,7 @@ def get_base_model_list(api_base): return r.json() def down_model(self, model_type: str, model_name, model_credential: Dict[str, object]) -> Iterator[DownModelChunk]: - api_base = model_credential.get('api_base') + api_base = model_credential.get('api_base', '') base_url = get_base_url(api_base) r = requests.request( method="POST", diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py new file mode 100644 index 00000000000..31f18451864 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 16:45 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class OpenAIEmbeddingCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/image.py b/apps/setting/models_provider/impl/openai_model_provider/credential/image.py new file mode 100644 index 00000000000..7cd7197f721 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/image.py @@ -0,0 +1,74 @@ +# coding=utf-8 +import base64 +import os +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class OpenAIImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class OpenAIImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return OpenAIImageModelParams() diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py new file mode 100644 index 00000000000..eb862264899 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py @@ -0,0 +1,80 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:32 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage +from openai import BadRequestError + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class OpenAILLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class OpenAILLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException) or isinstance(e, BadRequestError): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return OpenAILLMModelParams() diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py b/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py new file mode 100644 index 00000000000..e198238b123 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py @@ -0,0 +1,49 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class OpenAISTTModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py b/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py new file mode 100644 index 00000000000..cd2e342cd22 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py @@ -0,0 +1,90 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class OpenAITTIModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), + _('The image generation endpoint allows you to create raw images based on text prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 or 1792x1024 pixels.')), + required=True, + default_value='1024x1024', + option_list=[ + {'value': '1024x1024', 'label': '1024x1024'}, + {'value': '1024x1792', 'label': '1024x1792'}, + {'value': '1792x1024', 'label': '1792x1024'}, + ], + text_field='label', + value_field='value' + ) + + quality = forms.SingleSelect( + TooltipLabel(_('Picture quality'), _(''' +By default, images are produced in standard quality, but with DALL·E 3 you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest. + ''')), + required=True, + default_value='standard', + option_list=[ + {'value': 'standard', 'label': 'standard'}, + {'value': 'hd', 'label': 'hd'}, + ], + text_field='label', + value_field='value' + ) + + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), + _('You can use DALL·E 3 to request 1 image at a time (requesting more images by issuing parallel requests), or use DALL·E 2 with the n parameter to request up to 10 images at a time.')), + required=True, default_value=1, + _min=1, + _max=10, + _step=1, + precision=0) + + +class OpenAITextToImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return OpenAITTIModelParams() diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py b/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py new file mode 100644 index 00000000000..57059197501 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py @@ -0,0 +1,68 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class OpenAITTSModelGeneralParams(BaseForm): + # alloy, echo, fable, onyx, nova, shimmer + voice = forms.SingleSelect( + TooltipLabel('Voice', + _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')), + required=True, default_value='alloy', + text_field='value', + value_field='value', + option_list=[ + {'text': 'alloy', 'value': 'alloy'}, + {'text': 'echo', 'value': 'echo'}, + {'text': 'fable', 'value': 'fable'}, + {'text': 'onyx', 'value': 'onyx'}, + {'text': 'nova', 'value': 'nova'}, + {'text': 'shimmer', 'value': 'shimmer'}, + ]) + + +class OpenAITTSModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return OpenAITTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/embedding.py b/apps/setting/models_provider/impl/openai_model_provider/model/embedding.py new file mode 100644 index 00000000000..f95e78188f0 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/model/embedding.py @@ -0,0 +1,39 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 17:44 + @desc: +""" +from typing import Dict, List + +import openai + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class OpenAIEmbeddingModel(MaxKBBaseModel): + model_name: str + + def __init__(self, api_key, base_url, model_name: str): + self.client = openai.OpenAI(api_key=api_key, base_url=base_url).embeddings + self.model_name = model_name + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return OpenAIEmbeddingModel( + api_key=model_credential.get('api_key'), + model_name=model_name, + base_url=model_credential.get('api_base'), + ) + + def embed_query(self, text: str): + res = self.embed_documents([text]) + return res[0] + + def embed_documents( + self, texts: List[str], chunk_size: int | None = None + ) -> List[List[float]]: + res = self.client.create(input=texts, model=self.model_name, encoding_format="float") + return [e.embedding for e in res.data] diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/image.py b/apps/setting/models_provider/impl/openai_model_provider/model/image.py new file mode 100644 index 00000000000..7ac0906a786 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/model/image.py @@ -0,0 +1,20 @@ +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return OpenAIImage( + model_name=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/llm.py b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py new file mode 100644 index 00000000000..1893852100b --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py @@ -0,0 +1,57 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/18 15:28 + @desc: +""" +from typing import List, Dict + +from langchain_core.messages import BaseMessage, get_buffer_string + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + streaming = model_kwargs.get('streaming', True) + if 'o1' in model_name: + streaming = False + azure_chat_open_ai = OpenAIChatModel( + model=model_name, + base_url=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + extra_body=optional_params, + streaming=streaming, + custom_get_token_ids=custom_get_token_ids + ) + return azure_chat_open_ai + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py b/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py deleted file mode 100644 index 7271fe8adf9..00000000000 --- a/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: openai_chat_model.py - @date:2024/4/18 15:28 - @desc: -""" -from typing import List - -from langchain_core.messages import BaseMessage, get_buffer_string -from langchain_openai import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage - - -class OpenAIChatModel(ChatOpenAI): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - try: - return super().get_num_tokens_from_messages(messages) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - try: - return super().get_num_tokens(text) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/stt.py b/apps/setting/models_provider/impl/openai_model_provider/model/stt.py new file mode 100644 index 00000000000..0b5f9a4b932 --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/model/stt.py @@ -0,0 +1,59 @@ +import asyncio +import io +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class OpenAISpeechToText(MaxKBBaseModel, BaseSpeechToText): + api_base: str + api_key: str + model: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return OpenAISpeechToText( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def speech_to_text(self, audio_file): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + audio_data = audio_file.read() + buffer = io.BytesIO(audio_data) + buffer.name = "file.mp3" # this is the important line + res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer) + return res.text + diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/tti.py b/apps/setting/models_provider/impl/openai_model_provider/model/tti.py new file mode 100644 index 00000000000..942afcf9f0d --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/model/tti.py @@ -0,0 +1,58 @@ +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class OpenAITextToImage(MaxKBBaseModel, BaseTextToImage): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return OpenAITextToImage( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def is_cache_model(self): + return False + + def check_auth(self): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + response_list = chat.models.with_raw_response.list() + + # self.generate_image('生成一个小猫图片') + + def generate_image(self, prompt: str, negative_prompt: str = None): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + res = chat.images.generate(model=self.model, prompt=prompt, **self.params) + file_urls = [] + for content in res.data: + url = content.url + file_urls.append(url) + + return file_urls diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/tts.py b/apps/setting/models_provider/impl/openai_model_provider/model/tts.py new file mode 100644 index 00000000000..0eeab1d7cce --- /dev/null +++ b/apps/setting/models_provider/impl/openai_model_provider/model/tts.py @@ -0,0 +1,64 @@ +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class OpenAITextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'voice': 'alloy'}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return OpenAITextToSpeech( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def text_to_speech(self, text): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + text = _remove_empty_lines(text) + with client.audio.speech.with_streaming_response.create( + model=self.model, + input=text, + **self.params + ) as response: + return response.read() + + def is_cache_model(self): + return False diff --git a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py index 324e851cd19..a06d3b75f08 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py +++ b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py @@ -7,127 +7,141 @@ @desc: """ import os -from typing import Dict -from langchain.schema import HumanMessage - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \ - ModelInfo, \ - ModelTypeConst, ValidCode -from setting.models_provider.impl.openai_model_provider.model.openai_chat_model import OpenAIChatModel +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.openai_model_provider.credential.embedding import OpenAIEmbeddingCredential +from setting.models_provider.impl.openai_model_provider.credential.image import OpenAIImageModelCredential +from setting.models_provider.impl.openai_model_provider.credential.llm import OpenAILLMModelCredential +from setting.models_provider.impl.openai_model_provider.credential.stt import OpenAISTTModelCredential +from setting.models_provider.impl.openai_model_provider.credential.tti import OpenAITextToImageModelCredential +from setting.models_provider.impl.openai_model_provider.credential.tts import OpenAITTSModelCredential +from setting.models_provider.impl.openai_model_provider.model.embedding import OpenAIEmbeddingModel +from setting.models_provider.impl.openai_model_provider.model.image import OpenAIImage +from setting.models_provider.impl.openai_model_provider.model.llm import OpenAIChatModel +from setting.models_provider.impl.openai_model_provider.model.stt import OpenAISpeechToText +from setting.models_provider.impl.openai_model_provider.model.tti import OpenAITextToImage +from setting.models_provider.impl.openai_model_provider.model.tts import OpenAITextToSpeech from smartdoc.conf import PROJECT_DIR - - -class OpenAILLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = OpenAIModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - - for key in ['api_base', 'api_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = OpenAIModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_base = forms.TextInputField('API 域名', required=True) - api_key = forms.PasswordInputField('API Key', required=True) - +from django.utils.translation import gettext_lazy as _ openai_llm_model_credential = OpenAILLMModelCredential() - -model_dict = { - 'gpt-3.5-turbo': ModelInfo('gpt-3.5-turbo', '最新的gpt-3.5-turbo,随OpenAI调整而更新', ModelTypeConst.LLM, - openai_llm_model_credential, - ), - 'gpt-4': ModelInfo('gpt-4', '最新的gpt-4,随OpenAI调整而更新', ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-4o': ModelInfo('gpt-4o', '最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新', - ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-4-turbo': ModelInfo('gpt-4-turbo', '最新的gpt-4-turbo,随OpenAI调整而更新', ModelTypeConst.LLM, - openai_llm_model_credential, - ), - 'gpt-4-turbo-preview': ModelInfo('gpt-4-turbo-preview', '最新的gpt-4-turbo-preview,随OpenAI调整而更新', - ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-3.5-turbo-0125': ModelInfo('gpt-3.5-turbo-0125', - '2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM, - openai_llm_model_credential, - ), - 'gpt-3.5-turbo-1106': ModelInfo('gpt-3.5-turbo-1106', - '2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM, - openai_llm_model_credential, - ), - 'gpt-3.5-turbo-0613': ModelInfo('gpt-3.5-turbo-0613', - '[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用', - ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-4o-2024-05-13': ModelInfo('gpt-4o-2024-05-13', - '2024年5月13日的gpt-4o快照,支持上下文长度128,000 tokens', - ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-4-turbo-2024-04-09': ModelInfo('gpt-4-turbo-2024-04-09', - '2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens', - ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-4-0125-preview': ModelInfo('gpt-4-0125-preview', '2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens', - ModelTypeConst.LLM, openai_llm_model_credential, - ), - 'gpt-4-1106-preview': ModelInfo('gpt-4-1106-preview', '2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens', - ModelTypeConst.LLM, openai_llm_model_credential, - ), -} +openai_stt_model_credential = OpenAISTTModelCredential() +openai_tts_model_credential = OpenAITTSModelCredential() +openai_image_model_credential = OpenAIImageModelCredential() +openai_tti_model_credential = OpenAITextToImageModelCredential() +model_info_list = [ + ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, + openai_llm_model_credential, OpenAIChatModel + ), + ModelInfo('gpt-4', _('Latest gpt-4, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4o', _('The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI adjustments'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4o-mini', _('The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI adjustments'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4-turbo', _('The latest gpt-4-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, + openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4-turbo-preview', _('The latest gpt-4-turbo-preview, updated with OpenAI adjustments'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-3.5-turbo-0125', + _('gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 tokens'), ModelTypeConst.LLM, + openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-3.5-turbo-1106', + _('gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 tokens'), ModelTypeConst.LLM, + openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-3.5-turbo-0613', + _('[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June 13, 2024'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4o-2024-05-13', + _('gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4-turbo-2024-04-09', + _('gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 tokens'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4-0125-preview', _('gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 tokens'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('gpt-4-1106-preview', _('gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 tokens'), + ModelTypeConst.LLM, openai_llm_model_credential, + OpenAIChatModel), + ModelInfo('whisper-1', '', + ModelTypeConst.STT, openai_stt_model_credential, + OpenAISpeechToText), + ModelInfo('tts-1', '', + ModelTypeConst.TTS, openai_tts_model_credential, + OpenAITextToSpeech) +] +open_ai_embedding_credential = OpenAIEmbeddingCredential() +model_info_embedding_list = [ + ModelInfo('text-embedding-ada-002', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + OpenAIEmbeddingModel), + ModelInfo('text-embedding-3-small', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + OpenAIEmbeddingModel), + ModelInfo('text-embedding-3-large', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + OpenAIEmbeddingModel) +] + +model_info_image_list = [ + ModelInfo('gpt-4o', _('The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI adjustments'), + ModelTypeConst.IMAGE, openai_image_model_credential, + OpenAIImage), + ModelInfo('gpt-4o-mini', _('The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI adjustments'), + ModelTypeConst.IMAGE, openai_image_model_credential, + OpenAIImage), +] + +model_info_tti_list = [ + ModelInfo('dall-e-2', '', + ModelTypeConst.TTI, openai_tti_model_credential, + OpenAITextToImage), + ModelInfo('dall-e-3', '', + ModelTypeConst.TTI, openai_tti_model_credential, + OpenAITextToImage), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info(ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, + openai_llm_model_credential, OpenAIChatModel + )) + .append_model_info_list(model_info_embedding_list) + .append_default_model_info(model_info_embedding_list[0]) + .append_model_info_list(model_info_image_list) + .append_default_model_info(model_info_image_list[0]) + .append_model_info_list(model_info_tti_list) + .append_default_model_info(model_info_tti_list[0]) + .append_default_model_info(ModelInfo('whisper-1', '', + ModelTypeConst.STT, openai_stt_model_credential, + OpenAISpeechToText) + ) + .append_default_model_info(ModelInfo('tts-1', '', + ModelTypeConst.TTS, openai_tts_model_credential, + OpenAITextToSpeech)) + .build() +) class OpenAIModelProvider(IModelProvider): - def get_dialogue_number(self): - return 3 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> OpenAIChatModel: - azure_chat_open_ai = OpenAIChatModel( - model=model_name, - openai_api_base=model_credential.get('api_base'), - openai_api_key=model_credential.get('api_key') - ) - return azure_chat_open_ai - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return openai_llm_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): return ModelProvideInfo(provider='model_openai_provider', name='OpenAI', icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'openai_model_provider', 'icon', 'openai_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py new file mode 100644 index 00000000000..3f3caafa0fd --- /dev/null +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=1.0, + _min=0.1, + _max=1.9, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class QwenVLModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py new file mode 100644 index 00000000000..6aced3340a2 --- /dev/null +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py @@ -0,0 +1,76 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=1.0, + _min=0.1, + _max=1.9, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class OpenAILLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py new file mode 100644 index 00000000000..cc904fe226f --- /dev/null +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py @@ -0,0 +1,98 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')), + required=True, + default_value='1024*1024', + option_list=[ + {'value': '1024*1024', 'label': '1024*1024'}, + {'value': '720*1280', 'label': '720*1280'}, + {'value': '768*1152', 'label': '768*1152'}, + {'value': '1280*720', 'label': '1280*720'}, + ], + text_field='label', + value_field='value') + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')), + required=True, default_value=1, + _min=1, + _max=4, + _step=1, + precision=0) + style = forms.SingleSelect( + TooltipLabel(_('Style'), _('Specify the style of generated images')), + required=True, + default_value='', + option_list=[ + {'value': '', 'label': _('Default value, the image style is randomly output by the model')}, + {'value': '', 'label': _('photography')}, + {'value': '', 'label': _('Portraits')}, + {'value': '<3d cartoon>', 'label': _('3D cartoon')}, + {'value': '', 'label': _('animation')}, + {'value': '', 'label': _('painting')}, + {'value': '', 'label': _('watercolor')}, + {'value': '', 'label': _('sketch')}, + {'value': '', 'label': _('Chinese painting')}, + {'value': '', 'label': _('flat illustration')}, + ], + text_field='label', + value_field='value' + ) + + +class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py new file mode 100644 index 00000000000..bf3af0e3484 --- /dev/null +++ b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py @@ -0,0 +1,25 @@ +# coding=utf-8 + +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + chat_tong_yi = QwenVLChatModel( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + streaming=True, + stream_usage=True, + extra_body=optional_params + ) + return chat_tong_yi diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py new file mode 100644 index 00000000000..c4df28af9bb --- /dev/null +++ b/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py @@ -0,0 +1,31 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/28 11:44 + @desc: +""" +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + chat_tong_yi = QwenChatModel( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + streaming=True, + stream_usage=True, + extra_body=optional_params + ) + return chat_tong_yi diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/qwen_chat_model.py b/apps/setting/models_provider/impl/qwen_model_provider/model/qwen_chat_model.py deleted file mode 100644 index d3894d1d0aa..00000000000 --- a/apps/setting/models_provider/impl/qwen_model_provider/model/qwen_chat_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: qwen_chat_model.py - @date:2024/4/28 11:44 - @desc: -""" -from typing import List - -from langchain_community.chat_models import ChatTongyi -from langchain_core.messages import BaseMessage, get_buffer_string - -from common.config.tokenizer_manage_config import TokenizerManage - - -class QwenChatModel(ChatTongyi): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py b/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py new file mode 100644 index 00000000000..c39e1b3a7fc --- /dev/null +++ b/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py @@ -0,0 +1,59 @@ +# coding=utf-8 +from http import HTTPStatus +from typing import Dict + +from dashscope import ImageSynthesis +from django.utils.translation import gettext +from langchain_community.chat_models import ChatTongyi +from langchain_core.messages import HumanMessage + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): + api_key: str + model_name: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.model_name = kwargs.get('model_name') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024*1024', 'style': '', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + chat_tong_yi = QwenTextToImageModel( + model_name=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + return chat_tong_yi + + def is_cache_model(self): + return False + + def check_auth(self): + chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max') + chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])]) + + def generate_image(self, prompt: str, negative_prompt: str = None): + # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + rsp = ImageSynthesis.call(api_key=self.api_key, + model=self.model_name, + prompt=prompt, + negative_prompt=negative_prompt, + **self.params) + file_urls = [] + if rsp.status_code == HTTPStatus.OK: + for result in rsp.output.results: + file_urls.append(result.url) + else: + print('sync_call Failed, status_code: %s, code: %s, message: %s' % + (rsp.status_code, rsp.code, rsp.message)) + return file_urls diff --git a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py index 179f90368b0..48328116356 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py @@ -7,87 +7,59 @@ @desc: """ import os -from typing import Dict -from langchain.schema import HumanMessage -from langchain_community.chat_models.tongyi import ChatTongyi - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \ - ModelInfo, IModelProvider, ValidCode -from setting.models_provider.impl.qwen_model_provider.model.qwen_chat_model import QwenChatModel +from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ + ModelInfoManage +from setting.models_provider.impl.qwen_model_provider.credential.image import QwenVLModelCredential +from setting.models_provider.impl.qwen_model_provider.credential.llm import OpenAILLMModelCredential +from setting.models_provider.impl.qwen_model_provider.credential.tti import QwenTextToImageModelCredential +from setting.models_provider.impl.qwen_model_provider.model.image import QwenVLChatModel + +from setting.models_provider.impl.qwen_model_provider.model.llm import QwenChatModel +from setting.models_provider.impl.qwen_model_provider.model.tti import QwenTextToImageModel from smartdoc.conf import PROJECT_DIR - - -class OpenAILLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = QwenModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - for key in ['api_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = QwenModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_key = forms.PasswordInputField('API Key', required=True) - +from django.utils.translation import gettext as _ qwen_model_credential = OpenAILLMModelCredential() - -model_dict = { - 'qwen-turbo': ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential), - 'qwen-plus': ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential), - 'qwen-max': ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential) -} +qwenvl_model_credential = QwenVLModelCredential() +qwentti_model_credential = QwenTextToImageModelCredential() + +module_info_list = [ + ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel), + ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel), + ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel) +] +module_info_vl_list = [ + ModelInfo('qwen-vl-max', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), + ModelInfo('qwen-vl-max-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), + ModelInfo('qwen-vl-plus-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), +] +module_info_tti_list = [ + ModelInfo('wanx-v1', + _('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'), + ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(module_info_list) + .append_default_model_info( + ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel)) + .append_model_info_list(module_info_vl_list) + .append_default_model_info(module_info_vl_list[0]) + .append_model_info_list(module_info_tti_list) + .append_default_model_info(module_info_tti_list[0]) + .build() +) class QwenModelProvider(IModelProvider): - def get_dialogue_number(self): - return 3 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatTongyi: - chat_tong_yi = QwenChatModel( - model_name=model_name, - dashscope_api_key=model_credential.get('api_key') - ) - return chat_tong_yi - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return qwen_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_qwen_provider', name='通义千问', icon=get_file_content( + return ModelProvideInfo(provider='model_qwen_provider', name=_('Tongyi Qianwen'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'qwen_model_provider', 'icon', 'qwen_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/regolo_model_provider/__init__.py b/apps/setting/models_provider/impl/regolo_model_provider/__init__.py new file mode 100644 index 00000000000..2dc4ab10db4 --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/3/28 16:25 + @desc: +""" diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/embedding.py new file mode 100644 index 00000000000..ddea7fed52d --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/embedding.py @@ -0,0 +1,52 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 16:45 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class RegoloEmbeddingCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/image.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/image.py new file mode 100644 index 00000000000..5975c774806 --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/image.py @@ -0,0 +1,74 @@ +# coding=utf-8 +import base64 +import os +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class RegoloImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class RegoloImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return RegoloImageModelParams() diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/llm.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/llm.py new file mode 100644 index 00000000000..60eb4ff0abf --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/llm.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:32 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class RegoloLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class RegoloLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return RegoloLLMModelParams() diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/tti.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/tti.py new file mode 100644 index 00000000000..88f46ce4143 --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/tti.py @@ -0,0 +1,89 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class RegoloTTIModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), + _('The image generation endpoint allows you to create raw images based on text prompts. ')), + required=True, + default_value='1024x1024', + option_list=[ + {'value': '1024x1024', 'label': '1024x1024'}, + {'value': '1024x1792', 'label': '1024x1792'}, + {'value': '1792x1024', 'label': '1792x1024'}, + ], + text_field='label', + value_field='value' + ) + + quality = forms.SingleSelect( + TooltipLabel(_('Picture quality'), _(''' +By default, images are produced in standard quality. + ''')), + required=True, + default_value='standard', + option_list=[ + {'value': 'standard', 'label': 'standard'}, + {'value': 'hd', 'label': 'hd'}, + ], + text_field='label', + value_field='value' + ) + + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), + _('1 as default')), + required=True, default_value=1, + _min=1, + _max=10, + _step=1, + precision=0) + + +class RegoloTextToImageModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return RegoloTTIModelParams() diff --git a/apps/setting/models_provider/impl/regolo_model_provider/icon/regolo_icon_svg b/apps/setting/models_provider/impl/regolo_model_provider/icon/regolo_icon_svg new file mode 100644 index 00000000000..b69154451ad --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/icon/regolo_icon_svg @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/embedding.py b/apps/setting/models_provider/impl/regolo_model_provider/model/embedding.py new file mode 100644 index 00000000000..b067b8eff29 --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/model/embedding.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 17:44 + @desc: +""" +from typing import Dict + +from langchain_community.embeddings import OpenAIEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class RegoloEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return RegoloEmbeddingModel( + api_key=model_credential.get('api_key'), + model=model_name, + openai_api_base="https://api.regolo.ai/v1", + ) diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/image.py b/apps/setting/models_provider/impl/regolo_model_provider/model/image.py new file mode 100644 index 00000000000..f16768fad1e --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/model/image.py @@ -0,0 +1,19 @@ +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class RegoloImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return RegoloImage( + model_name=model_name, + openai_api_base="https://api.regolo.ai/v1", + openai_api_key=model_credential.get('api_key'), + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/llm.py b/apps/setting/models_provider/impl/regolo_model_provider/model/llm.py new file mode 100644 index 00000000000..126a756a20d --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/model/llm.py @@ -0,0 +1,38 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/18 15:28 + @desc: +""" +from typing import List, Dict + +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_openai.chat_models import ChatOpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class RegoloChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return RegoloChatModel( + model=model_name, + openai_api_base="https://api.regolo.ai/v1", + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/tti.py b/apps/setting/models_provider/impl/regolo_model_provider/model/tti.py new file mode 100644 index 00000000000..a92527295ac --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/model/tti.py @@ -0,0 +1,58 @@ +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class RegoloTextToImage(MaxKBBaseModel, BaseTextToImage): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = "https://api.regolo.ai/v1" + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return RegoloTextToImage( + model=model_name, + api_base="https://api.regolo.ai/v1", + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def is_cache_model(self): + return False + + def check_auth(self): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + response_list = chat.models.with_raw_response.list() + + # self.generate_image('生成一个小猫图片') + + def generate_image(self, prompt: str, negative_prompt: str = None): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + res = chat.images.generate(model=self.model, prompt=prompt, **self.params) + file_urls = [] + for content in res.data: + url = content.url + file_urls.append(url) + + return file_urls diff --git a/apps/setting/models_provider/impl/regolo_model_provider/regolo_model_provider.py b/apps/setting/models_provider/impl/regolo_model_provider/regolo_model_provider.py new file mode 100644 index 00000000000..a5e7dc36550 --- /dev/null +++ b/apps/setting/models_provider/impl/regolo_model_provider/regolo_model_provider.py @@ -0,0 +1,89 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: openai_model_provider.py + @date:2024/3/28 16:26 + @desc: +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.regolo_model_provider.credential.embedding import \ + RegoloEmbeddingCredential +from setting.models_provider.impl.regolo_model_provider.credential.llm import RegoloLLMModelCredential +from setting.models_provider.impl.regolo_model_provider.credential.tti import \ + RegoloTextToImageModelCredential +from setting.models_provider.impl.regolo_model_provider.model.embedding import RegoloEmbeddingModel +from setting.models_provider.impl.regolo_model_provider.model.llm import RegoloChatModel +from setting.models_provider.impl.regolo_model_provider.model.tti import RegoloTextToImage +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +openai_llm_model_credential = RegoloLLMModelCredential() +openai_tti_model_credential = RegoloTextToImageModelCredential() +model_info_list = [ + ModelInfo('Phi-4', '', ModelTypeConst.LLM, + openai_llm_model_credential, RegoloChatModel + ), + ModelInfo('DeepSeek-R1-Distill-Qwen-32B', '', ModelTypeConst.LLM, + openai_llm_model_credential, + RegoloChatModel), + ModelInfo('maestrale-chat-v0.4-beta', '', + ModelTypeConst.LLM, openai_llm_model_credential, + RegoloChatModel), + ModelInfo('Llama-3.3-70B-Instruct', + '', + ModelTypeConst.LLM, openai_llm_model_credential, + RegoloChatModel), + ModelInfo('Llama-3.1-8B-Instruct', + '', + ModelTypeConst.LLM, openai_llm_model_credential, + RegoloChatModel), + ModelInfo('DeepSeek-Coder-6.7B-Instruct', '', + ModelTypeConst.LLM, openai_llm_model_credential, + RegoloChatModel) +] +open_ai_embedding_credential = RegoloEmbeddingCredential() +model_info_embedding_list = [ + ModelInfo('gte-Qwen2', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + RegoloEmbeddingModel), +] + +model_info_tti_list = [ + ModelInfo('FLUX.1-dev', '', + ModelTypeConst.TTI, openai_tti_model_credential, + RegoloTextToImage), + ModelInfo('sdxl-turbo', '', + ModelTypeConst.TTI, openai_tti_model_credential, + RegoloTextToImage), +] +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info( + ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, + openai_llm_model_credential, RegoloChatModel + )) + .append_model_info_list(model_info_embedding_list) + .append_default_model_info(model_info_embedding_list[0]) + .append_model_info_list(model_info_tti_list) + .append_default_model_info(model_info_tti_list[0]) + + .build() +) + + +class RegoloModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_regolo_provider', name='Regolo', icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'regolo_model_provider', + 'icon', + 'regolo_icon_svg'))) diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/__init__.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/__init__.py new file mode 100644 index 00000000000..2dc4ab10db4 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/3/28 16:25 + @desc: +""" diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py new file mode 100644 index 00000000000..1a4f8d1d3ee --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 16:45 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class SiliconCloudEmbeddingCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py new file mode 100644 index 00000000000..cb6c2ee9cd3 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py @@ -0,0 +1,74 @@ +# coding=utf-8 +import base64 +import os +import traceback +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _, gettext + + +class SiliconCloudImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class SiliconCloudImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return SiliconCloudImageModelParams() diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py new file mode 100644 index 00000000000..a7333eb4747 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py @@ -0,0 +1,79 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:32 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class SiliconCloudLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class SiliconCloudLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return SiliconCloudLLMModelParams() diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py new file mode 100644 index 00000000000..834aaf12034 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py + @date:2024/9/9 17:51 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ +from langchain_core.documents import Document + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.siliconCloud_model_provider.model.reranker import SiliconCloudReranker + + +class SiliconCloudRerankerCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + if not model_type == 'RERANKER': + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model: SiliconCloudReranker = provider.get_model(model_type, model_name, model_credential) + model.compress_documents([Document(page_content=_('Hello'))], _('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py new file mode 100644 index 00000000000..cba2e5d8736 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py @@ -0,0 +1,49 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class SiliconCloudSTTModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py new file mode 100644 index 00000000000..860bad4b9e2 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py @@ -0,0 +1,90 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class SiliconCloudTTIModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), + _('The image generation endpoint allows you to create raw images based on text prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 or 1792x1024 pixels.')), + required=True, + default_value='1024x1024', + option_list=[ + {'value': '1024x1024', 'label': '1024x1024'}, + {'value': '1024x1792', 'label': '1024x1792'}, + {'value': '1792x1024', 'label': '1792x1024'}, + ], + text_field='label', + value_field='value' + ) + + quality = forms.SingleSelect( + TooltipLabel(_('Picture quality'), _(''' +By default, images are produced in standard quality, but with DALL·E 3 you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest. + ''')), + required=True, + default_value='standard', + option_list=[ + {'value': 'standard', 'label': 'standard'}, + {'value': 'hd', 'label': 'hd'}, + ], + text_field='label', + value_field='value' + ) + + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), + _('You can use DALL·E 3 to request 1 image at a time (requesting more images by issuing parallel requests), or use DALL·E 2 with the n parameter to request up to 10 images at a time.')), + required=True, default_value=1, + _min=1, + _max=10, + _step=1, + precision=0) + + +class SiliconCloudTextToImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return SiliconCloudTTIModelParams() diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py new file mode 100644 index 00000000000..ffe003e9a42 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py @@ -0,0 +1,68 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class SiliconCloudTTSModelGeneralParams(BaseForm): + # alloy, echo, fable, onyx, nova, shimmer + voice = forms.SingleSelect( + TooltipLabel('Voice', + _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')), + required=True, default_value='alloy', + text_field='value', + value_field='value', + option_list=[ + {'text': 'alloy', 'value': 'alloy'}, + {'text': 'echo', 'value': 'echo'}, + {'text': 'fable', 'value': 'fable'}, + {'text': 'onyx', 'value': 'onyx'}, + {'text': 'nova', 'value': 'nova'}, + {'text': 'shimmer', 'value': 'shimmer'}, + ]) + + +class SiliconCloudTTSModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return SiliconCloudTTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/icon/siliconCloud_icon_svg b/apps/setting/models_provider/impl/siliconCloud_model_provider/icon/siliconCloud_icon_svg new file mode 100644 index 00000000000..339fff751d2 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/icon/siliconCloud_icon_svg @@ -0,0 +1,5 @@ + + + + + diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/embedding.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/embedding.py new file mode 100644 index 00000000000..e8b6c0f5cf1 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/embedding.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 17:44 + @desc: +""" +from typing import Dict + +from langchain_community.embeddings import OpenAIEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class SiliconCloudEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return SiliconCloudEmbeddingModel( + api_key=model_credential.get('api_key'), + model=model_name, + openai_api_base=model_credential.get('api_base'), + ) diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py new file mode 100644 index 00000000000..2ec0689d4d2 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py @@ -0,0 +1,20 @@ +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return SiliconCloudImage( + model_name=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py new file mode 100644 index 00000000000..6fb0c7816fa --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py @@ -0,0 +1,38 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/18 15:28 + @desc: +""" +from typing import List, Dict + +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_openai.chat_models import ChatOpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class SiliconCloudChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return SiliconCloudChatModel( + model=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py new file mode 100644 index 00000000000..ef85cec5867 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py @@ -0,0 +1,74 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: siliconcloud_reranker.py + @date:2024/9/10 9:45 + @desc: SiliconCloud 文档重排封装 +""" + +from typing import Sequence, Optional, Any, Dict +import requests + +from langchain_core.callbacks import Callbacks +from langchain_core.documents import BaseDocumentCompressor, Document + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from django.utils.translation import gettext as _ + + +class SiliconCloudReranker(MaxKBBaseModel, BaseDocumentCompressor): + api_base: Optional[str] + """SiliconCloud API URL""" + model: Optional[str] + """SiliconCloud 重排模型 ID""" + api_key: Optional[str] + """API Key""" + + top_n: Optional[int] = 3 # 取前 N 个最相关的结果 + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return SiliconCloudReranker( + api_base=model_credential.get('api_base'), + model=model_name, + api_key=model_credential.get('api_key'), + top_n=model_kwargs.get('top_n', 3) + ) + + def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \ + Sequence[Document]: + if not documents: + return [] + + # 预处理文本 + texts = [doc.page_content for doc in documents] + + # 发送请求到 SiliconCloud API + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + payload = { + "model": self.model, + "query": query, + "documents": texts, + "top_n": self.top_n, + "return_documents": True, + } + + response = requests.post(f"{self.api_base}/rerank", json=payload, headers=headers) + + if response.status_code != 200: + raise RuntimeError(f"SiliconCloud API 请求失败: {response.text}") + + res = response.json() + + # 解析返回结果 + return [ + Document( + page_content=item.get('document', {}).get('text', ''), + metadata={'relevance_score': item.get('relevance_score')} + ) + for item in res.get('results', []) + ] diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/stt.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/stt.py new file mode 100644 index 00000000000..4bb07da2135 --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/stt.py @@ -0,0 +1,59 @@ +import asyncio +import io +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class SiliconCloudSpeechToText(MaxKBBaseModel, BaseSpeechToText): + api_base: str + api_key: str + model: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return SiliconCloudSpeechToText( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def speech_to_text(self, audio_file): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + audio_data = audio_file.read() + buffer = io.BytesIO(audio_data) + buffer.name = "file.mp3" # this is the important line + res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer) + return res.text + diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tti.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tti.py new file mode 100644 index 00000000000..6849753c19d --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tti.py @@ -0,0 +1,58 @@ +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class SiliconCloudTextToImage(MaxKBBaseModel, BaseTextToImage): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return SiliconCloudTextToImage( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def is_cache_model(self): + return False + + def check_auth(self): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + response_list = chat.models.with_raw_response.list() + + # self.generate_image('生成一个小猫图片') + + def generate_image(self, prompt: str, negative_prompt: str = None): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + res = chat.images.generate(model=self.model, prompt=prompt, **self.params) + file_urls = [] + for content in res.data: + url = content.url + file_urls.append(url) + + return file_urls diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tts.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tts.py new file mode 100644 index 00000000000..1b17cbb825a --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tts.py @@ -0,0 +1,64 @@ +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class SiliconCloudTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'voice': 'alloy'}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return SiliconCloudTextToSpeech( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def text_to_speech(self, text): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + text = _remove_empty_lines(text) + with client.audio.speech.with_streaming_response.create( + model=self.model, + input=text, + **self.params + ) as response: + return response.read() + + def is_cache_model(self): + return False diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py new file mode 100644 index 00000000000..a8e005ddcab --- /dev/null +++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py @@ -0,0 +1,137 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: openai_model_provider.py + @date:2024/3/28 16:26 + @desc: +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.siliconCloud_model_provider.credential.embedding import \ + SiliconCloudEmbeddingCredential +from setting.models_provider.impl.siliconCloud_model_provider.credential.llm import SiliconCloudLLMModelCredential +from setting.models_provider.impl.siliconCloud_model_provider.credential.reranker import SiliconCloudRerankerCredential +from setting.models_provider.impl.siliconCloud_model_provider.credential.stt import SiliconCloudSTTModelCredential +from setting.models_provider.impl.siliconCloud_model_provider.credential.tti import \ + SiliconCloudTextToImageModelCredential +from setting.models_provider.impl.siliconCloud_model_provider.model.embedding import SiliconCloudEmbeddingModel +from setting.models_provider.impl.siliconCloud_model_provider.model.llm import SiliconCloudChatModel +from setting.models_provider.impl.siliconCloud_model_provider.model.reranker import SiliconCloudReranker +from setting.models_provider.impl.siliconCloud_model_provider.model.stt import SiliconCloudSpeechToText +from setting.models_provider.impl.siliconCloud_model_provider.model.tti import SiliconCloudTextToImage +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +openai_llm_model_credential = SiliconCloudLLMModelCredential() +openai_stt_model_credential = SiliconCloudSTTModelCredential() +openai_reranker_model_credential = SiliconCloudRerankerCredential() +openai_tti_model_credential = SiliconCloudTextToImageModelCredential() +model_info_list = [ + ModelInfo('deepseek-ai/DeepSeek-R1-Distill-Llama-8B', '', ModelTypeConst.LLM, + openai_llm_model_credential, SiliconCloudChatModel + ), + ModelInfo('deepseek-ai/DeepSeek-R1-Distill-Qwen-7B', '', ModelTypeConst.LLM, + openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B', '', + ModelTypeConst.LLM, openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('Qwen/Qwen2.5-7B-Instruct', + '', + ModelTypeConst.LLM, openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('Qwen/Qwen2.5-Coder-7B-Instruct', '', + ModelTypeConst.LLM, openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('internlm/internlm2_5-7b-chat', '', + ModelTypeConst.LLM, openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('Qwen/Qwen2-1.5B-Instruct', '', + ModelTypeConst.LLM, openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('THUDM/glm-4-9b-chat', '', + ModelTypeConst.LLM, openai_llm_model_credential, + SiliconCloudChatModel), + ModelInfo('FunAudioLLM/SenseVoiceSmall', '', + ModelTypeConst.STT, openai_stt_model_credential, + SiliconCloudSpeechToText), +] +open_ai_embedding_credential = SiliconCloudEmbeddingCredential() +model_info_embedding_list = [ + ModelInfo('netease-youdao/bce-embedding-base_v1', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + SiliconCloudEmbeddingModel), + ModelInfo('BAAI/bge-m3', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + SiliconCloudEmbeddingModel), + ModelInfo('BAAI/bge-large-en-v1.5', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + SiliconCloudEmbeddingModel), + ModelInfo('BAAI/bge-large-zh-v1.5', '', + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + SiliconCloudEmbeddingModel), +] + +model_info_tti_list = [ + ModelInfo('deepseek-ai/Janus-Pro-7B', '', + ModelTypeConst.TTI, openai_tti_model_credential, + SiliconCloudTextToImage), + ModelInfo('stabilityai/stable-diffusion-3-5-large', '', + ModelTypeConst.TTI, openai_tti_model_credential, + SiliconCloudTextToImage), + ModelInfo('black-forest-labs/FLUX.1-schnell', '', + ModelTypeConst.TTI, openai_tti_model_credential, + SiliconCloudTextToImage), + ModelInfo('stabilityai/stable-diffusion-3-medium', '', + ModelTypeConst.TTI, openai_tti_model_credential, + SiliconCloudTextToImage), + ModelInfo('stabilityai/stable-diffusion-xl-base-1.0', '', + ModelTypeConst.TTI, openai_tti_model_credential, + SiliconCloudTextToImage), + ModelInfo('stabilityai/stable-diffusion-2-1', '', + ModelTypeConst.TTI, openai_tti_model_credential, + SiliconCloudTextToImage), +] +model_rerank_list = [ + ModelInfo('netease-youdao/bce-reranker-base_v1', '', ModelTypeConst.RERANKER, + openai_reranker_model_credential, SiliconCloudReranker + ), + ModelInfo('BAAI/bge-reranker-v2-m3', '', ModelTypeConst.RERANKER, + openai_reranker_model_credential, SiliconCloudReranker + ), +] +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info( + ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, + openai_llm_model_credential, SiliconCloudChatModel + )) + .append_model_info_list(model_info_embedding_list) + .append_default_model_info(model_info_embedding_list[0]) + .append_model_info_list(model_info_tti_list) + .append_default_model_info(model_info_tti_list[0]) + .append_default_model_info(ModelInfo('whisper-1', '', + ModelTypeConst.STT, openai_stt_model_credential, + SiliconCloudSpeechToText)) + .append_model_info_list(model_rerank_list) + .append_default_model_info(model_rerank_list[0]) + + .build() +) + + +class SiliconCloudModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_siliconCloud_provider', name='SILICONFLOW', icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'siliconCloud_model_provider', + 'icon', + 'siliconCloud_icon_svg'))) diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/__init__.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/__init__.py new file mode 100644 index 00000000000..2dc4ab10db4 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/3/28 16:25 + @desc: +""" diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py new file mode 100644 index 00000000000..8d8e52d27ee --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py @@ -0,0 +1,79 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:32 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class TencentCloudLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class TencentCloudLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return TencentCloudLLMModelParams() diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/icon/tencent_cloud_icon_svg b/apps/setting/models_provider/impl/tencent_cloud_model_provider/icon/tencent_cloud_icon_svg new file mode 100644 index 00000000000..ff559eaff44 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/icon/tencent_cloud_icon_svg @@ -0,0 +1,15 @@ + + + + + + + \ No newline at end of file diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py new file mode 100644 index 00000000000..cfcdf7aca21 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py @@ -0,0 +1,39 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/18 15:28 + @desc: +""" +from typing import List, Dict + +from langchain_core.messages import BaseMessage, get_buffer_string + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + azure_chat_open_ai = TencentCloudChatModel( + model=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params, + custom_get_token_ids=custom_get_token_ids + ) + return azure_chat_open_ai diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py new file mode 100644 index 00000000000..2781165b234 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py @@ -0,0 +1,61 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: openai_model_provider.py + @date:2024/3/28 16:26 + @desc: +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \ + ModelTypeConst, ModelInfoManage +from setting.models_provider.impl.openai_model_provider.credential.embedding import OpenAIEmbeddingCredential +from setting.models_provider.impl.openai_model_provider.credential.image import OpenAIImageModelCredential +from setting.models_provider.impl.openai_model_provider.credential.llm import OpenAILLMModelCredential +from setting.models_provider.impl.openai_model_provider.credential.stt import OpenAISTTModelCredential +from setting.models_provider.impl.openai_model_provider.credential.tti import OpenAITextToImageModelCredential +from setting.models_provider.impl.openai_model_provider.credential.tts import OpenAITTSModelCredential +from setting.models_provider.impl.openai_model_provider.model.embedding import OpenAIEmbeddingModel +from setting.models_provider.impl.openai_model_provider.model.image import OpenAIImage +from setting.models_provider.impl.openai_model_provider.model.llm import OpenAIChatModel +from setting.models_provider.impl.openai_model_provider.model.stt import OpenAISpeechToText +from setting.models_provider.impl.openai_model_provider.model.tti import OpenAITextToImage +from setting.models_provider.impl.openai_model_provider.model.tts import OpenAITextToSpeech +from setting.models_provider.impl.tencent_cloud_model_provider.credential.llm import TencentCloudLLMModelCredential +from setting.models_provider.impl.tencent_cloud_model_provider.model.llm import TencentCloudChatModel +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ + +openai_llm_model_credential = TencentCloudLLMModelCredential() +model_info_list = [ + ModelInfo('deepseek-v3', '', ModelTypeConst.LLM, + openai_llm_model_credential, TencentCloudChatModel + ), + ModelInfo('deepseek-r1', '', ModelTypeConst.LLM, + openai_llm_model_credential, TencentCloudChatModel + ), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info( + ModelInfo('deepseek-v3', '', ModelTypeConst.LLM, + openai_llm_model_credential, TencentCloudChatModel + )) + .build() +) + + +class TencentCloudModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_tencent_cloud_provider', name=_('Tencent Cloud'), icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'tencent_cloud_model_provider', + 'icon', + 'tencent_cloud_icon_svg'))) diff --git a/apps/setting/models_provider/impl/tencent_model_provider/__init__.py b/apps/setting/models_provider/impl/tencent_model_provider/__init__.py new file mode 100644 index 00000000000..8cb7f459eae --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py new file mode 100644 index 00000000000..4c500005b15 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py @@ -0,0 +1,41 @@ +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class TencentEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True) -> bool: + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + self.valid_form(model_credential) + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]) -> Dict[str, object]: + encrypted_secret_key = super().encryption(model.get('SecretKey', '')) + return {**model, 'SecretKey': encrypted_secret_key} + + SecretId = forms.PasswordInputField('SecretId', required=True) + SecretKey = forms.PasswordInputField('SecretKey', required=True) diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py new file mode 100644 index 00000000000..257be9f67bf --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=1.0, + _min=0.1, + _max=1.9, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class TencentVisionModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py new file mode 100644 index 00000000000..4fa28ba3023 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py @@ -0,0 +1,70 @@ +# coding=utf-8 +import traceback + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class TencentLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.5, + _min=0.1, + _max=2.0, + _step=0.01, + precision=2) + + +class TencentLLMModelCredential(BaseForm, BaseModelCredential): + REQUIRED_FIELDS = ['hunyuan_app_id', 'hunyuan_secret_id', 'hunyuan_secret_key'] + + @classmethod + def _validate_model_type(cls, model_type, provider, raise_exception=False): + if not any(mt['value'] == model_type for mt in provider.get_model_type_list()): + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + return False + return True + + @classmethod + def _validate_credential_fields(cls, model_credential, raise_exception=False): + missing_keys = [key for key in cls.REQUIRED_FIELDS if key not in model_credential] + if missing_keys: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('{keys} is required').format(keys=", ".join(missing_keys))) + return False + return True + + def is_valid(self, model_type, model_name, model_credential, model_params, provider, raise_exception=False): + if not (self._validate_model_type(model_type, provider, raise_exception) and + self._validate_credential_fields(model_credential, raise_exception)): + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + return False + return True + + def encryption_dict(self, model): + return {**model, 'hunyuan_secret_key': super().encryption(model.get('hunyuan_secret_key', ''))} + + hunyuan_app_id = forms.TextInputField('APP ID', required=True) + hunyuan_secret_id = forms.PasswordInputField('SecretId', required=True) + hunyuan_secret_key = forms.PasswordInputField('SecretKey', required=True) + + def get_model_params_setting_form(self, model_name): + return TencentLLMModelParams() diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py new file mode 100644 index 00000000000..60fcfbfc9b0 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py @@ -0,0 +1,116 @@ +# coding=utf-8 +import traceback + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class TencentTTIModelParams(BaseForm): + Style = forms.SingleSelect( + TooltipLabel(_('painting style'), _('If not passed, the default value is 201 (Japanese anime style)')), + required=True, + default_value='201', + option_list=[ + {'value': '000', 'label': _('Not limited to style')}, + {'value': '101', 'label': _('ink painting')}, + {'value': '102', 'label': _('concept art')}, + {'value': '103', 'label': _('Oil painting 1')}, + {'value': '118', 'label': _('Oil Painting 2 (Van Gogh)')}, + {'value': '104', 'label': _('watercolor painting')}, + {'value': '105', 'label': _('pixel art')}, + {'value': '106', 'label': _('impasto style')}, + {'value': '107', 'label': _('illustration')}, + {'value': '108', 'label': _('paper cut style')}, + {'value': '109', 'label': _('Impressionism 1 (Monet)')}, + {'value': '119', 'label': _('Impressionism 2')}, + {'value': '110', 'label': '2.5D'}, + {'value': '111', 'label': _('classical portraiture')}, + {'value': '112', 'label': _('black and white sketch')}, + {'value': '113', 'label': _('cyberpunk')}, + {'value': '114', 'label': _('science fiction style')}, + {'value': '115', 'label': _('dark style')}, + {'value': '116', 'label': '3D'}, + {'value': '117', 'label': _('vaporwave')}, + {'value': '201', 'label': _('Japanese animation')}, + {'value': '202', 'label': _('monster style')}, + {'value': '203', 'label': _('Beautiful ancient style')}, + {'value': '204', 'label': _('retro anime')}, + {'value': '301', 'label': _('Game cartoon hand drawing')}, + {'value': '401', 'label': _('Universal realistic style')}, + ], + value_field='value', + text_field='label' + ) + + Resolution = forms.SingleSelect( + TooltipLabel(_('Generate image resolution'), _('If not transmitted, the default value is 768:768.')), + required=True, + default_value='768:768', + option_list=[ + {'value': '768:768', 'label': '768:768(1:1)'}, + {'value': '768:1024', 'label': '768:1024(3:4)'}, + {'value': '1024:768', 'label': '1024:768(4:3)'}, + {'value': '1024:1024', 'label': '1024:1024(1:1)'}, + {'value': '720:1280', 'label': '720:1280(9:16)'}, + {'value': '1280:720', 'label': '1280:720(16:9)'}, + {'value': '768:1280', 'label': '768:1280(3:5)'}, + {'value': '1280:768', 'label': '1280:768(5:3)'}, + {'value': '1080:1920', 'label': '1080:1920(9:16)'}, + {'value': '1920:1080', 'label': '1920:1080(16:9)'}, + ], + value_field='value', + text_field='label' + ) + + +class TencentTTIModelCredential(BaseForm, BaseModelCredential): + REQUIRED_FIELDS = ['hunyuan_secret_id', 'hunyuan_secret_key'] + + @classmethod + def _validate_model_type(cls, model_type, provider, raise_exception=False): + if not any(mt['value'] == model_type for mt in provider.get_model_type_list()): + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + return False + return True + + @classmethod + def _validate_credential_fields(cls, model_credential, raise_exception=False): + missing_keys = [key for key in cls.REQUIRED_FIELDS if key not in model_credential] + if missing_keys: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext('{keys} is required').format(keys=", ".join(missing_keys))) + return False + return True + + def is_valid(self, model_type, model_name, model_credential, model_params, provider, raise_exception=False): + if not (self._validate_model_type(model_type, provider, raise_exception) and + self._validate_credential_fields(model_credential, raise_exception)): + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + return False + return True + + def encryption_dict(self, model): + return {**model, 'hunyuan_secret_key': super().encryption(model.get('hunyuan_secret_key', ''))} + + hunyuan_secret_id = forms.PasswordInputField('SecretId', required=True) + hunyuan_secret_key = forms.PasswordInputField('SecretKey', required=True) + + def get_model_params_setting_form(self, model_name): + return TencentTTIModelParams() diff --git a/apps/setting/models_provider/impl/tencent_model_provider/icon/tencent_icon_svg b/apps/setting/models_provider/impl/tencent_model_provider/icon/tencent_icon_svg new file mode 100644 index 00000000000..6cec08b74c2 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/icon/tencent_icon_svg @@ -0,0 +1,5 @@ + + + + diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/embedding.py b/apps/setting/models_provider/impl/tencent_model_provider/model/embedding.py new file mode 100644 index 00000000000..659a5ac12b4 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/embedding.py @@ -0,0 +1,41 @@ + +from typing import Dict, List + +from langchain_core.embeddings import Embeddings +from tencentcloud.common import credential +from tencentcloud.hunyuan.v20230901.hunyuan_client import HunyuanClient +from tencentcloud.hunyuan.v20230901.models import GetEmbeddingRequest + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class TencentEmbeddingModel(MaxKBBaseModel, Embeddings): + def embed_documents(self, texts: List[str]) -> List[List[float]]: + return [self.embed_query(text) for text in texts] + + def embed_query(self, text: str) -> List[float]: + request = GetEmbeddingRequest() + request.Input = text + res = self.client.GetEmbedding(request) + return res.Data[0].Embedding + + def __init__(self, secret_id: str, secret_key: str, model_name: str): + self.secret_id = secret_id + self.secret_key = secret_key + self.model_name = model_name + cred = credential.Credential( + secret_id, secret_key + ) + self.client = HunyuanClient(cred, "") + + @staticmethod + def new_instance(model_type: str, model_name: str, model_credential: Dict[str, str], **model_kwargs): + return TencentEmbeddingModel( + secret_id=model_credential.get('SecretId'), + secret_key=model_credential.get('SecretKey'), + model_name=model_name, + ) + + def _generate_auth_token(self): + # Example method to generate an authentication token for the model API + return f"{self.secret_id}:{self.secret_key}" diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py b/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py new file mode 100644 index 00000000000..9055c4cb1be --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py @@ -0,0 +1,280 @@ +import json +import logging +from typing import Any, Dict, Iterator, List, Mapping, Optional, Type + +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models.chat_models import ( + BaseChatModel, + generate_from_stream, +) +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + BaseMessage, + BaseMessageChunk, + ChatMessage, + ChatMessageChunk, + HumanMessage, + HumanMessageChunk, SystemMessage, +) +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult +from pydantic import Field, SecretStr, root_validator +from langchain_core.utils import ( + convert_to_secret_str, + get_from_dict_or_env, + get_pydantic_field_names, + pre_init, +) + +logger = logging.getLogger(__name__) + + +def _convert_message_to_dict(message: BaseMessage) -> dict: + message_dict: Dict[str, Any] + if isinstance(message, ChatMessage): + message_dict = {"Role": message.role, "Content": message.content} + elif isinstance(message, HumanMessage): + message_dict = {"Role": "user", "Content": message.content} + elif isinstance(message, AIMessage): + message_dict = {"Role": "assistant", "Content": message.content} + elif isinstance(message, SystemMessage): + message_dict = {"Role": "system", "Content": message.content} + else: + raise TypeError(f"Got unknown type {message}") + + return message_dict + + +def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: + role = _dict["Role"] + if role == "user": + return HumanMessage(content=_dict["Content"]) + elif role == "assistant": + return AIMessage(content=_dict.get("Content", "") or "") + else: + return ChatMessage(content=_dict["Content"], role=role) + + +def _convert_delta_to_message_chunk( + _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk] +) -> BaseMessageChunk: + role = _dict.get("Role") + content = _dict.get("Content") or "" + + if role == "user" or default_class == HumanMessageChunk: + return HumanMessageChunk(content=content) + elif role == "assistant" or default_class == AIMessageChunk: + return AIMessageChunk(content=content) + elif role or default_class == ChatMessageChunk: + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] + else: + return default_class(content=content) # type: ignore[call-arg] + + +def _create_chat_result(response: Mapping[str, Any]) -> ChatResult: + generations = [] + for choice in response["Choices"]: + message = _convert_dict_to_message(choice["Message"]) + generations.append(ChatGeneration(message=message)) + + token_usage = response["Usage"] + llm_output = {"token_usage": token_usage} + return ChatResult(generations=generations, llm_output=llm_output) + + +class ChatHunyuan(BaseChatModel): + """Tencent Hunyuan chat models API by Tencent. + + For more information, see https://cloud.tencent.com/document/product/1729 + """ + + @property + def lc_secrets(self) -> Dict[str, str]: + return { + "hunyuan_app_id": "HUNYUAN_APP_ID", + "hunyuan_secret_id": "HUNYUAN_SECRET_ID", + "hunyuan_secret_key": "HUNYUAN_SECRET_KEY", + } + + @property + def lc_serializable(self) -> bool: + return True + + hunyuan_app_id: Optional[int] = None + """Hunyuan App ID""" + hunyuan_secret_id: Optional[str] = None + """Hunyuan Secret ID""" + hunyuan_secret_key: Optional[SecretStr] = None + """Hunyuan Secret Key""" + streaming: bool = False + """Whether to stream the results or not.""" + request_timeout: int = 60 + """Timeout for requests to Hunyuan API. Default is 60 seconds.""" + temperature: float = 1.0 + """What sampling temperature to use.""" + top_p: float = 1.0 + """What probability mass to use.""" + model: str = "hunyuan-lite" + """What Model to use. + Optional model: + - hunyuan-lite、 + - hunyuan-standard + - hunyuan-standard-256K + - hunyuan-pro + - hunyuan-code + - hunyuan-role + - hunyuan-functioncall + - hunyuan-vision + """ + stream_moderation: bool = False + """Whether to review the results or not when streaming is true.""" + enable_enhancement: bool = True + """Whether to enhancement the results or not.""" + + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for API call not explicitly specified.""" + + class Config: + """Configuration for this pydantic object.""" + + validate_by_name = True + + @root_validator(pre=True) + def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Build extra kwargs from additional params that were passed in.""" + all_required_field_names = get_pydantic_field_names(cls) + extra = values.get("model_kwargs", {}) + for field_name in list(values): + if field_name in extra: + raise ValueError(f"Found {field_name} supplied twice.") + if field_name not in all_required_field_names: + logger.warning( + f"""WARNING! {field_name} is not default parameter. + {field_name} was transferred to model_kwargs. + Please confirm that {field_name} is what you intended.""" + ) + extra[field_name] = values.pop(field_name) + + invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) + if invalid_model_kwargs: + raise ValueError( + f"Parameters {invalid_model_kwargs} should be specified explicitly. " + f"Instead they were passed in as part of `model_kwargs` parameter." + ) + + values["model_kwargs"] = extra + return values + + @pre_init + def validate_environment(cls, values: Dict) -> Dict: + values["hunyuan_app_id"] = get_from_dict_or_env( + values, + "hunyuan_app_id", + "HUNYUAN_APP_ID", + ) + values["hunyuan_secret_id"] = get_from_dict_or_env( + values, + "hunyuan_secret_id", + "HUNYUAN_SECRET_ID", + ) + values["hunyuan_secret_key"] = convert_to_secret_str( + get_from_dict_or_env( + values, + "hunyuan_secret_key", + "HUNYUAN_SECRET_KEY", + ) + ) + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling Hunyuan API.""" + normal_params = { + "Temperature": self.temperature, + "TopP": self.top_p, + "Model": self.model, + "Stream": self.streaming, + "StreamModeration": self.stream_moderation, + "EnableEnhancement": self.enable_enhancement, + } + return {**normal_params, **self.model_kwargs} + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + if self.streaming: + stream_iter = self._stream( + messages=messages, stop=stop, run_manager=run_manager, **kwargs + ) + return generate_from_stream(stream_iter) + + res = self._chat(messages, **kwargs) + return _create_chat_result(json.loads(res.to_json_string())) + + usage_metadata: dict = {} + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + res = self._chat(messages, **kwargs) + + default_chunk_class = AIMessageChunk + for chunk in res: + chunk = chunk.get("data", "") + if len(chunk) == 0: + continue + response = json.loads(chunk) + if "error" in response: + raise ValueError(f"Error from Hunyuan api response: {response}") + + for choice in response["Choices"]: + chunk = _convert_delta_to_message_chunk( + choice["Delta"], default_chunk_class + ) + default_chunk_class = chunk.__class__ + # FinishReason === stop + if choice.get("FinishReason") == "stop": + self.usage_metadata = response.get("Usage", {}) + cg_chunk = ChatGenerationChunk(message=chunk) + if run_manager: + run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk) + yield cg_chunk + + def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> Any: + if self.hunyuan_secret_key is None: + raise ValueError("Hunyuan secret key is not set.") + + try: + from tencentcloud.common import credential + from tencentcloud.hunyuan.v20230901 import hunyuan_client, models + except ImportError: + raise ImportError( + "Could not import tencentcloud python package. " + "Please install it with `pip install tencentcloud-sdk-python`." + ) + + parameters = {**self._default_params, **kwargs} + cred = credential.Credential( + self.hunyuan_secret_id, str(self.hunyuan_secret_key.get_secret_value()) + ) + client = hunyuan_client.HunyuanClient(cred, "") + req = models.ChatCompletionsRequest() + params = { + "Messages": [_convert_message_to_dict(m) for m in messages], + **parameters, + } + req.from_json_string(json.dumps(params)) + resp = client.ChatCompletions(req) + return resp + + @property + def _llm_type(self) -> str: + return "hunyuan-chat" diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py new file mode 100644 index 00000000000..6800cdd567c --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py @@ -0,0 +1,20 @@ +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class TencentVision(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return TencentVision( + model_name=model_name, + openai_api_base='https://api.hunyuan.cloud.tencent.com/v1', + openai_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/llm.py b/apps/setting/models_provider/impl/tencent_model_provider/model/llm.py new file mode 100644 index 00000000000..d462cb7af19 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/llm.py @@ -0,0 +1,45 @@ +# coding=utf-8 + +from typing import List, Dict, Optional, Any + +from langchain_core.messages import BaseMessage + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan + + +class TencentModel(MaxKBBaseModel, ChatHunyuan): + @staticmethod + def is_cache_model(): + return False + + def __init__(self, model_name: str, credentials: Dict[str, str], streaming: bool = False, **kwargs): + hunyuan_app_id = credentials.get('hunyuan_app_id') + hunyuan_secret_id = credentials.get('hunyuan_secret_id') + hunyuan_secret_key = credentials.get('hunyuan_secret_key') + + optional_params = MaxKBBaseModel.filter_optional_params(kwargs) + + if not all([hunyuan_app_id, hunyuan_secret_id, hunyuan_secret_key]): + raise ValueError( + "All of 'hunyuan_app_id', 'hunyuan_secret_id', and 'hunyuan_secret_key' must be provided in credentials.") + + super().__init__(model=model_name, hunyuan_app_id=hunyuan_app_id, hunyuan_secret_id=hunyuan_secret_id, + hunyuan_secret_key=hunyuan_secret_key, streaming=streaming, + temperature=optional_params.get('temperature', 1.0) + ) + + @staticmethod + def new_instance(model_type: str, model_name: str, model_credential: Dict[str, object], + **model_kwargs) -> 'TencentModel': + streaming = model_kwargs.pop('streaming', False) + return TencentModel(model_name=model_name, credentials=model_credential, streaming=streaming, **model_kwargs) + + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: + return self.usage_metadata + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + return self.usage_metadata.get('PromptTokens', 0) + + def get_num_tokens(self, text: str) -> int: + return self.usage_metadata.get('CompletionTokens', 0) diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py b/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py new file mode 100644 index 00000000000..c2e671e4930 --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py @@ -0,0 +1,92 @@ +# coding=utf-8 + +import json +from typing import Dict + +from django.utils.translation import gettext as _ +from tencentcloud.common import credential +from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException +from tencentcloud.common.profile.client_profile import ClientProfile +from tencentcloud.common.profile.http_profile import HttpProfile +from tencentcloud.hunyuan.v20230901 import hunyuan_client, models + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage +from setting.models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan + + +class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage): + hunyuan_secret_id: str + hunyuan_secret_key: str + model: str + params: dict + + @staticmethod + def is_cache_model(): + return False + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.hunyuan_secret_id = kwargs.get('hunyuan_secret_id') + self.hunyuan_secret_key = kwargs.get('hunyuan_secret_key') + self.model = kwargs.get('model_name') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type: str, model_name: str, model_credential: Dict[str, object], + **model_kwargs) -> 'TencentTextToImageModel': + optional_params = {'params': {'Style': '201', 'Resolution': '768:768'}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return TencentTextToImageModel( + model=model_name, + hunyuan_secret_id=model_credential.get('hunyuan_secret_id'), + hunyuan_secret_key=model_credential.get('hunyuan_secret_key'), + **optional_params + ) + + def check_auth(self): + chat = ChatHunyuan(hunyuan_app_id='111111', + hunyuan_secret_id=self.hunyuan_secret_id, + hunyuan_secret_key=self.hunyuan_secret_key, + model="hunyuan-standard") + res = chat.invoke(_('Hello')) + # print(res) + + def generate_image(self, prompt: str, negative_prompt: str = None): + try: + # 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密 + # 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305 + # 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取 + cred = credential.Credential(self.hunyuan_secret_id, self.hunyuan_secret_key) + # 实例化一个http选项,可选的,没有特殊需求可以跳过 + httpProfile = HttpProfile() + httpProfile.endpoint = "hunyuan.tencentcloudapi.com" + + # 实例化一个client选项,可选的,没有特殊需求可以跳过 + clientProfile = ClientProfile() + clientProfile.httpProfile = httpProfile + # 实例化要请求产品的client对象,clientProfile是可选的 + client = hunyuan_client.HunyuanClient(cred, "ap-guangzhou", clientProfile) + + # 实例化一个请求对象,每个接口都会对应一个request对象 + req = models.TextToImageLiteRequest() + params = { + "Prompt": prompt, + "NegativePrompt": negative_prompt, + "RspImgType": "url", + **self.params + } + req.from_json_string(json.dumps(params)) + + # 返回的resp是一个TextToImageLiteResponse的实例,与请求对象对应 + resp = client.TextToImageLite(req) + # 输出json格式的字符串回包 + print(resp.to_json_string()) + file_urls = [] + + file_urls.append(resp.ResultImage) + return file_urls + except TencentCloudSDKException as err: + print(err) diff --git a/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py b/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py new file mode 100644 index 00000000000..e6a1c16fd4c --- /dev/null +++ b/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +import os +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import ( + IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, ModelInfoManage +) +from setting.models_provider.impl.tencent_model_provider.credential.embedding import TencentEmbeddingCredential +from setting.models_provider.impl.tencent_model_provider.credential.image import TencentVisionModelCredential +from setting.models_provider.impl.tencent_model_provider.credential.llm import TencentLLMModelCredential +from setting.models_provider.impl.tencent_model_provider.credential.tti import TencentTTIModelCredential +from setting.models_provider.impl.tencent_model_provider.model.embedding import TencentEmbeddingModel +from setting.models_provider.impl.tencent_model_provider.model.image import TencentVision +from setting.models_provider.impl.tencent_model_provider.model.llm import TencentModel +from setting.models_provider.impl.tencent_model_provider.model.tti import TencentTextToImageModel +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +def _create_model_info(model_name, description, model_type, credential_class, model_class): + return ModelInfo( + name=model_name, + desc=description, + model_type=model_type, + model_credential=credential_class(), + model_class=model_class + ) + + +def _get_tencent_icon_path(): + return os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'tencent_model_provider', + 'icon', 'tencent_icon_svg') + + +def _initialize_model_info(): + model_info_list = [_create_model_info( + 'hunyuan-pro', + _('The most effective version of the current hybrid model, the trillion-level parameter scale MOE-32K long article model. Reaching the absolute leading level on various benchmarks, with complex instructions and reasoning, complex mathematical capabilities, support for function call, and application focus optimization in fields such as multi-language translation, finance, law, and medical care'), + ModelTypeConst.LLM, + TencentLLMModelCredential, + TencentModel + ), + _create_model_info( + 'hunyuan-standard', + _('A better routing strategy is adopted to simultaneously alleviate the problems of load balancing and expert convergence. For long articles, the needle-in-a-haystack index reaches 99.9%'), + ModelTypeConst.LLM, + TencentLLMModelCredential, + TencentModel), + _create_model_info( + 'hunyuan-lite', + _('Upgraded to MOE structure, the context window is 256k, leading many open source models in multiple evaluation sets such as NLP, code, mathematics, industry, etc.'), + ModelTypeConst.LLM, + TencentLLMModelCredential, + TencentModel), + _create_model_info( + 'hunyuan-role', + _("Hunyuan's latest version of the role-playing model, a role-playing model launched by Hunyuan's official fine-tuning training, is based on the Hunyuan model combined with the role-playing scene data set for additional training, and has better basic effects in role-playing scenes."), + ModelTypeConst.LLM, + TencentLLMModelCredential, + TencentModel), + _create_model_info( + 'hunyuan-functioncall', + _("Hunyuan's latest MOE architecture FunctionCall model has been trained with high-quality FunctionCall data and has a context window of 32K, leading in multiple dimensions of evaluation indicators."), + ModelTypeConst.LLM, + TencentLLMModelCredential, + TencentModel), + _create_model_info( + 'hunyuan-code', + _("Hunyuan's latest code generation model, after training the base model with 200B high-quality code data, and iterating on high-quality SFT data for half a year, the context long window length has been increased to 8K, and it ranks among the top in the automatic evaluation indicators of code generation in the five major languages; the five major languages In the manual high-quality evaluation of 10 comprehensive code tasks that consider all aspects, the performance is in the first echelon."), + ModelTypeConst.LLM, + TencentLLMModelCredential, + TencentModel), + ] + + tencent_embedding_model_info = _create_model_info( + 'hunyuan-embedding', + _("Tencent's Hunyuan Embedding interface can convert text into high-quality vector data. The vector dimension is 1024 dimensions."), + ModelTypeConst.EMBEDDING, + TencentEmbeddingCredential, + TencentEmbeddingModel + ) + + model_info_embedding_list = [tencent_embedding_model_info] + + model_info_vision_list = [_create_model_info( + 'hunyuan-vision', + _('Mixed element visual model'), + ModelTypeConst.IMAGE, + TencentVisionModelCredential, + TencentVision)] + + model_info_tti_list = [_create_model_info( + 'hunyuan-dit', + _('Hunyuan graph model'), + ModelTypeConst.TTI, + TencentTTIModelCredential, + TencentTextToImageModel)] + + model_info_manage = ModelInfoManage.builder() \ + .append_model_info_list(model_info_list) \ + .append_model_info_list(model_info_embedding_list) \ + .append_model_info_list(model_info_vision_list) \ + .append_default_model_info(model_info_vision_list[0]) \ + .append_model_info_list(model_info_tti_list) \ + .append_default_model_info(model_info_tti_list[0]) \ + .append_default_model_info(model_info_list[0]) \ + .append_default_model_info(tencent_embedding_model_info) \ + .build() + + return model_info_manage + + +class TencentModelProvider(IModelProvider): + def __init__(self): + self._model_info_manage = _initialize_model_info() + + def get_model_info_manage(self): + return self._model_info_manage + + def get_model_provide_info(self): + icon_path = _get_tencent_icon_path() + icon_data = get_file_content(icon_path) + return ModelProvideInfo( + provider='model_tencent_provider', + name=_('Tencent Hunyuan'), + icon=icon_data + ) diff --git a/apps/setting/models_provider/impl/vllm_model_provider/__init__.py b/apps/setting/models_provider/impl/vllm_model_provider/__init__.py new file mode 100644 index 00000000000..9bad5790a57 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/__init__.py @@ -0,0 +1 @@ +# coding=utf-8 diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py new file mode 100644 index 00000000000..3ebccd20d53 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 16:45 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VllmEmbeddingCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py new file mode 100644 index 00000000000..68674fc0b5a --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py @@ -0,0 +1,72 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VllmImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class VllmImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return VllmImageModelParams() diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py new file mode 100644 index 00000000000..6700b756d90 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py @@ -0,0 +1,73 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class VLLMModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key')) + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid')) + exist = provider.get_model_info_by_name(model_list, model_name) + if len(exist) == 0: + raise AppApiException(ValidCode.valid_error.value, + gettext('The model does not exist, please download the model first')) + model = provider.get_model(model_type, model_name, model_credential, **model_params) + try: + res = model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))} + + def build_model(self, model_info: Dict[str, object]): + for key in ['api_key', 'model']: + if key not in model_info: + raise AppApiException(500, gettext('{key} is required').format(key=key)) + self.api_key = model_info.get('api_key') + return self + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return VLLMModelParams() diff --git a/apps/setting/models_provider/impl/vllm_model_provider/icon/vllm_icon_svg b/apps/setting/models_provider/impl/vllm_model_provider/icon/vllm_icon_svg new file mode 100644 index 00000000000..1ad7d0a6db1 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/icon/vllm_icon_svg @@ -0,0 +1,5 @@ + + + + diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/embedding.py b/apps/setting/models_provider/impl/vllm_model_provider/model/embedding.py new file mode 100644 index 00000000000..616d9d9eee3 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/model/embedding.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 17:44 + @desc: +""" +from typing import Dict + +from langchain_community.embeddings import OpenAIEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class VllmEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return VllmEmbeddingModel( + model=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base=model_credential.get('api_base'), + ) diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/image.py b/apps/setting/models_provider/impl/vllm_model_provider/model/image.py new file mode 100644 index 00000000000..c8cb0a84db9 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/model/image.py @@ -0,0 +1,38 @@ +from typing import Dict, List + +from langchain_core.messages import get_buffer_string, BaseMessage + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class VllmImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return VllmImage( + model_name=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) + + def is_cache_model(self): + return False + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + return self.usage_metadata.get('input_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + return self.get_last_generation_info().get('output_tokens', 0) diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py new file mode 100644 index 00000000000..4662a616965 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py @@ -0,0 +1,57 @@ +# coding=utf-8 + +from typing import Dict, Optional, Sequence, Union, Any, Callable +from urllib.parse import urlparse, ParseResult + +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.tools import BaseTool + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + +class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + vllm_chat_open_ai = VllmChatModel( + model=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + streaming=True, + stream_usage=True, + extra_body=optional_params + ) + return vllm_chat_open_ai + + def get_num_tokens_from_messages( + self, + messages: list[BaseMessage], + tools: Optional[ + Sequence[Union[dict[str, Any], type, Callable, BaseTool]] + ] = None, + ) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + return self.usage_metadata.get('input_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + return self.get_last_generation_info().get('output_tokens', 0) diff --git a/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py b/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py new file mode 100644 index 00000000000..7dc6664a088 --- /dev/null +++ b/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py @@ -0,0 +1,84 @@ +# coding=utf-8 +import os +from urllib.parse import urlparse, ParseResult + +import requests + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ + ModelInfoManage +from setting.models_provider.impl.vllm_model_provider.credential.embedding import VllmEmbeddingCredential +from setting.models_provider.impl.vllm_model_provider.credential.image import VllmImageModelCredential +from setting.models_provider.impl.vllm_model_provider.credential.llm import VLLMModelCredential +from setting.models_provider.impl.vllm_model_provider.model.embedding import VllmEmbeddingModel +from setting.models_provider.impl.vllm_model_provider.model.image import VllmImage +from setting.models_provider.impl.vllm_model_provider.model.llm import VllmChatModel +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +v_llm_model_credential = VLLMModelCredential() +image_model_credential = VllmImageModelCredential() +embedding_model_credential = VllmEmbeddingCredential() + +model_info_list = [ + ModelInfo('facebook/opt-125m', _('Facebook’s 125M parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), + ModelInfo('BAAI/Aquila-7B', _('BAAI’s 7B parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), + ModelInfo('BAAI/AquilaChat-7B', _('BAAI’s 13B parameter mode'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), + +] + +image_model_info_list = [ + ModelInfo('Qwen/Qwen2-VL-2B-Instruct', '', ModelTypeConst.IMAGE, image_model_credential, VllmImage), +] + +embedding_model_info_list = [ + ModelInfo('HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5', '', ModelTypeConst.EMBEDDING, embedding_model_credential, VllmEmbeddingModel), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info(ModelInfo('facebook/opt-125m', + _('Facebook’s 125M parameter model'), + ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel)) + .append_model_info_list(image_model_info_list) + .append_default_model_info(image_model_info_list[0]) + .append_model_info_list(embedding_model_info_list) + .append_default_model_info(embedding_model_info_list[0]) + .build() +) + + +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + +class VllmModelProvider(IModelProvider): + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_vllm_provider', name='vLLM', icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'vllm_model_provider', 'icon', + 'vllm_icon_svg'))) + + @staticmethod + def get_base_model_list(api_base, api_key): + base_url = get_base_url(api_base) + base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1') + headers = {} + if api_key: + headers['Authorization'] = f"Bearer {api_key}" + r = requests.request(method="GET", url=f"{base_url}/models", headers=headers, timeout=5) + r.raise_for_status() + return r.json().get('data') + + @staticmethod + def get_model_info_by_name(model_list, model_name): + if model_list is None: + return [] + return [model for model in model_list if model.get('id') == model_name] diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/__init__.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/__init__.py new file mode 100644 index 00000000000..8cb7f459eae --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py new file mode 100644 index 00000000000..40cb8baee38 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py @@ -0,0 +1,53 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/7/12 16:45 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VolcanicEmbeddingCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py new file mode 100644 index 00000000000..23d22cf932c --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py @@ -0,0 +1,72 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VolcanicEngineImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.95, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class VolcanicEngineImageModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField('API Key', required=True) + api_base = forms.TextInputField('API URL', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key', 'api_base']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return VolcanicEngineImageModelParams() diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py new file mode 100644 index 00000000000..3b7734da5b0 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py @@ -0,0 +1,79 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 17:57 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VolcanicEngineLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.3, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class VolcanicEngineLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['access_key_id', 'secret_access_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.invoke([HumanMessage(content=gettext('Hello'))]) + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'access_key_id': super().encryption(model.get('access_key_id', ''))} + + access_key_id = forms.PasswordInputField('Access Key ID', required=True) + secret_access_key = forms.PasswordInputField('Secret Access Key', required=True) + + def get_model_params_setting_form(self, model_name): + return VolcanicEngineLLMModelParams() diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py new file mode 100644 index 00000000000..6aae433c373 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py @@ -0,0 +1,52 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VolcanicEngineSTTModelCredential(BaseForm, BaseModelCredential): + volcanic_api_url = forms.TextInputField('API URL', required=True, + default_value='wss://openspeech.bytedance.com/api/v2/asr') + volcanic_app_id = forms.TextInputField('App ID', required=True) + volcanic_token = forms.PasswordInputField('Access Token', required=True) + volcanic_cluster = forms.TextInputField('Cluster ID', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['volcanic_api_url', 'volcanic_app_id', 'volcanic_token', 'volcanic_cluster']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'volcanic_token': super().encryption(model.get('volcanic_token', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py new file mode 100644 index 00000000000..98c119e21cb --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py @@ -0,0 +1,68 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VolcanicEngineTTIModelGeneralParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), + _('If the gap between width, height and 512 is too large, the picture rendering effect will be poor and the probability of excessive delay will increase significantly. Recommended ratio and corresponding width and height before super score: width*height')), + required=True, + default_value='512*512', + option_list=[ + {'value': '512*512', 'label': '512*512'}, + {'value': '512*384', 'label': '512*384'}, + {'value': '384*512', 'label': '384*512'}, + {'value': '512*341', 'label': '512*341'}, + {'value': '341*512', 'label': '341*512'}, + {'value': '512*288', 'label': '512*288'}, + {'value': '288*512', 'label': '288*512'}, + ], + text_field='label', + value_field='value') + + +class VolcanicEngineTTIModelCredential(BaseForm, BaseModelCredential): + access_key = forms.PasswordInputField('Access Key ID', required=True) + secret_key = forms.PasswordInputField('Secret Access Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['access_key', 'secret_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'secret_key': super().encryption(model.get('secret_key', ''))} + + def get_model_params_setting_form(self, model_name): + return VolcanicEngineTTIModelGeneralParams() diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py new file mode 100644 index 00000000000..4d0b68363ff --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py @@ -0,0 +1,78 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class VolcanicEngineTTSModelGeneralParams(BaseForm): + voice_type = forms.SingleSelect( + TooltipLabel(_('timbre'), _('Chinese sounds can support mixed scenes of Chinese and English')), + required=True, default_value='zh_female_cancan_mars_bigtts', + text_field='value', + value_field='value', + option_list=[ + {'text': '灿灿/Shiny', 'value': 'zh_female_cancan_mars_bigtts'}, + {'text': '清新女声', 'value': 'zh_female_qingxinnvsheng_mars_bigtts'}, + {'text': '爽快思思/Skye', 'value': 'zh_female_shuangkuaisisi_moon_bigtts'}, + {'text': '湾区大叔', 'value': 'zh_female_wanqudashu_moon_bigtts' }, + {'text': '呆萌川妹', 'value': 'zh_female_daimengchuanmei_moon_bigtts'}, + {'text': '广州德哥', 'value': 'zh_male_guozhoudege_moon_bigtts'}, + {'text': '北京小爷', 'value': 'zh_male_beijingxiaoye_moon_bigtts'}, + {'text': '少年梓辛/Brayan', 'value': 'zh_male_shaonianzixin_moon_bigtts'}, + {'text': '魅力女友', 'value': 'zh_female_meilinvyou_moon_bigtts'}, + ]) + speed_ratio = forms.SliderField( + TooltipLabel(_('speaking speed'), _('[0.2,3], the default is 1, usually one decimal place is enough')), + required=True, default_value=1, + _min=0.2, + _max=3, + _step=0.1, + precision=1) + + +class VolcanicEngineTTSModelCredential(BaseForm, BaseModelCredential): + volcanic_api_url = forms.TextInputField('API URL', required=True, + default_value='wss://openspeech.bytedance.com/api/v1/tts/ws_binary') + volcanic_app_id = forms.TextInputField('App ID', required=True) + volcanic_token = forms.PasswordInputField('Access Token', required=True) + volcanic_cluster = forms.TextInputField('Cluster ID', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['volcanic_api_url', 'volcanic_app_id', 'volcanic_token', 'volcanic_cluster']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'volcanic_token': super().encryption(model.get('volcanic_token', ''))} + + def get_model_params_setting_form(self, model_name): + return VolcanicEngineTTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/icon/volcanic_engine_icon_svg b/apps/setting/models_provider/impl/volcanic_engine_model_provider/icon/volcanic_engine_icon_svg new file mode 100644 index 00000000000..05a1279ef4d --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/icon/volcanic_engine_icon_svg @@ -0,0 +1,5 @@ + + + + diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/embedding.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/embedding.py new file mode 100644 index 00000000000..b950beacf34 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/embedding.py @@ -0,0 +1,16 @@ +from typing import Dict + +from langchain_openai import OpenAIEmbeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class VolcanicEngineEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return VolcanicEngineEmbeddingModel( + openai_api_key=model_credential.get('api_key'), + model=model_name, + openai_api_base=model_credential.get('api_base'), + check_embedding_ctx_length=False, + ) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 new file mode 100644 index 00000000000..75e744c8ff5 Binary files /dev/null and b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 differ diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py new file mode 100644 index 00000000000..6e2517bd4ad --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py @@ -0,0 +1,20 @@ +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return VolcanicEngineImage( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base=model_credential.get('api_base'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py new file mode 100644 index 00000000000..8f089f26988 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py @@ -0,0 +1,21 @@ +from typing import List, Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel + +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return VolcanicEngineChatModel( + model=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/stt.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/stt.py new file mode 100644 index 00000000000..c441bee8ed3 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/stt.py @@ -0,0 +1,343 @@ +# coding=utf-8 + +""" +requires Python 3.6 or later + +pip install asyncio +pip install websockets +""" +import asyncio +import base64 +import gzip +import hmac +import json +import os +import ssl +import uuid +import wave +from hashlib import sha256 +from io import BytesIO +from typing import Dict +from urllib.parse import urlparse + +import websockets + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + +audio_format = "mp3" # wav 或者 mp3,根据实际音频格式设置 + +PROTOCOL_VERSION = 0b0001 +DEFAULT_HEADER_SIZE = 0b0001 + +PROTOCOL_VERSION_BITS = 4 +HEADER_BITS = 4 +MESSAGE_TYPE_BITS = 4 +MESSAGE_TYPE_SPECIFIC_FLAGS_BITS = 4 +MESSAGE_SERIALIZATION_BITS = 4 +MESSAGE_COMPRESSION_BITS = 4 +RESERVED_BITS = 8 + +# Message Type: +CLIENT_FULL_REQUEST = 0b0001 +CLIENT_AUDIO_ONLY_REQUEST = 0b0010 +SERVER_FULL_RESPONSE = 0b1001 +SERVER_ACK = 0b1011 +SERVER_ERROR_RESPONSE = 0b1111 + +# Message Type Specific Flags +NO_SEQUENCE = 0b0000 # no check sequence +POS_SEQUENCE = 0b0001 +NEG_SEQUENCE = 0b0010 +NEG_SEQUENCE_1 = 0b0011 + +# Message Serialization +NO_SERIALIZATION = 0b0000 +JSON = 0b0001 +THRIFT = 0b0011 +CUSTOM_TYPE = 0b1111 + +# Message Compression +NO_COMPRESSION = 0b0000 +GZIP = 0b0001 +CUSTOM_COMPRESSION = 0b1111 + +ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) +ssl_context.check_hostname = False +ssl_context.verify_mode = ssl.CERT_NONE + + +def generate_header( + version=PROTOCOL_VERSION, + message_type=CLIENT_FULL_REQUEST, + message_type_specific_flags=NO_SEQUENCE, + serial_method=JSON, + compression_type=GZIP, + reserved_data=0x00, + extension_header=bytes() +): + """ + protocol_version(4 bits), header_size(4 bits), + message_type(4 bits), message_type_specific_flags(4 bits) + serialization_method(4 bits) message_compression(4 bits) + reserved (8bits) 保留字段 + header_extensions 扩展头(大小等于 8 * 4 * (header_size - 1) ) + """ + header = bytearray() + header_size = int(len(extension_header) / 4) + 1 + header.append((version << 4) | header_size) + header.append((message_type << 4) | message_type_specific_flags) + header.append((serial_method << 4) | compression_type) + header.append(reserved_data) + header.extend(extension_header) + return header + + +def generate_full_default_header(): + return generate_header() + + +def generate_audio_default_header(): + return generate_header( + message_type=CLIENT_AUDIO_ONLY_REQUEST + ) + + +def generate_last_audio_default_header(): + return generate_header( + message_type=CLIENT_AUDIO_ONLY_REQUEST, + message_type_specific_flags=NEG_SEQUENCE + ) + + +def parse_response(res): + """ + protocol_version(4 bits), header_size(4 bits), + message_type(4 bits), message_type_specific_flags(4 bits) + serialization_method(4 bits) message_compression(4 bits) + reserved (8bits) 保留字段 + header_extensions 扩展头(大小等于 8 * 4 * (header_size - 1) ) + payload 类似与http 请求体 + """ + protocol_version = res[0] >> 4 + header_size = res[0] & 0x0f + message_type = res[1] >> 4 + message_type_specific_flags = res[1] & 0x0f + serialization_method = res[2] >> 4 + message_compression = res[2] & 0x0f + reserved = res[3] + header_extensions = res[4:header_size * 4] + payload = res[header_size * 4:] + result = {} + payload_msg = None + payload_size = 0 + if message_type == SERVER_FULL_RESPONSE: + payload_size = int.from_bytes(payload[:4], "big", signed=True) + payload_msg = payload[4:] + elif message_type == SERVER_ACK: + seq = int.from_bytes(payload[:4], "big", signed=True) + result['seq'] = seq + if len(payload) >= 8: + payload_size = int.from_bytes(payload[4:8], "big", signed=False) + payload_msg = payload[8:] + elif message_type == SERVER_ERROR_RESPONSE: + code = int.from_bytes(payload[:4], "big", signed=False) + result['code'] = code + payload_size = int.from_bytes(payload[4:8], "big", signed=False) + payload_msg = payload[8:] + print(f"Error code: {code}, message: {payload_msg}") + if payload_msg is None: + return result + if message_compression == GZIP: + payload_msg = gzip.decompress(payload_msg) + if serialization_method == JSON: + payload_msg = json.loads(str(payload_msg, "utf-8")) + elif serialization_method != NO_SERIALIZATION: + payload_msg = str(payload_msg, "utf-8") + result['payload_msg'] = payload_msg + result['payload_size'] = payload_size + return result + + +def read_wav_info(data: bytes = None) -> (int, int, int, int, int): + with BytesIO(data) as _f: + wave_fp = wave.open(_f, 'rb') + nchannels, sampwidth, framerate, nframes = wave_fp.getparams()[:4] + wave_bytes = wave_fp.readframes(nframes) + return nchannels, sampwidth, framerate, nframes, len(wave_bytes) + + +class VolcanicEngineSpeechToText(MaxKBBaseModel, BaseSpeechToText): + workflow: str = "audio_in,resample,partition,vad,fe,decode,itn,nlu_punctuate" + show_language: bool = False + show_utterances: bool = False + result_type: str = "full" + format: str = "mp3" + rate: int = 16000 + language: str = "zh-CN" + bits: int = 16 + channel: int = 1 + codec: str = "raw" + audio_type: int = 1 + secret: str = "access_secret" + auth_method: str = "token" + mp3_seg_size: int = 10000 + success_code: int = 1000 # success code, default is 1000 + seg_duration: int = 15000 + nbest: int = 1 + + volcanic_app_id: str + volcanic_cluster: str + volcanic_api_url: str + volcanic_token: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.volcanic_api_url = kwargs.get('volcanic_api_url') + self.volcanic_token = kwargs.get('volcanic_token') + self.volcanic_app_id = kwargs.get('volcanic_app_id') + self.volcanic_cluster = kwargs.get('volcanic_cluster') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return VolcanicEngineSpeechToText( + volcanic_api_url=model_credential.get('volcanic_api_url'), + volcanic_token=model_credential.get('volcanic_token'), + volcanic_app_id=model_credential.get('volcanic_app_id'), + volcanic_cluster=model_credential.get('volcanic_cluster'), + **optional_params + ) + + def construct_request(self, reqid): + req = { + 'app': { + 'appid': self.volcanic_app_id, + 'cluster': self.volcanic_cluster, + 'token': self.volcanic_token, + }, + 'user': { + 'uid': 'uid' + }, + 'request': { + 'reqid': reqid, + 'nbest': self.nbest, + 'workflow': self.workflow, + 'show_language': self.show_language, + 'show_utterances': self.show_utterances, + 'result_type': self.result_type, + "sequence": 1 + }, + 'audio': { + 'format': self.format, + 'rate': self.rate, + 'language': self.language, + 'bits': self.bits, + 'channel': self.channel, + 'codec': self.codec + } + } + return req + + @staticmethod + def slice_data(data: bytes, chunk_size: int) -> (list, bool): + """ + slice data + :param data: wav data + :param chunk_size: the segment size in one request + :return: segment data, last flag + """ + data_len = len(data) + offset = 0 + while offset + chunk_size < data_len: + yield data[offset: offset + chunk_size], False + offset += chunk_size + else: + yield data[offset: data_len], True + + def _real_processor(self, request_params: dict) -> dict: + pass + + def token_auth(self): + return {'Authorization': 'Bearer; {}'.format(self.volcanic_token)} + + def signature_auth(self, data): + header_dicts = { + 'Custom': 'auth_custom', + } + + url_parse = urlparse(self.volcanic_api_url) + input_str = 'GET {} HTTP/1.1\n'.format(url_parse.path) + auth_headers = 'Custom' + for header in auth_headers.split(','): + input_str += '{}\n'.format(header_dicts[header]) + input_data = bytearray(input_str, 'utf-8') + input_data += data + mac = base64.urlsafe_b64encode( + hmac.new(self.secret.encode('utf-8'), input_data, digestmod=sha256).digest()) + header_dicts['Authorization'] = 'HMAC256; access_token="{}"; mac="{}"; h="{}"'.format(self.volcanic_token, + str(mac, 'utf-8'), + auth_headers) + return header_dicts + + async def segment_data_processor(self, wav_data: bytes, segment_size: int): + reqid = str(uuid.uuid4()) + # 构建 full client request,并序列化压缩 + request_params = self.construct_request(reqid) + payload_bytes = str.encode(json.dumps(request_params)) + payload_bytes = gzip.compress(payload_bytes) + full_client_request = bytearray(generate_full_default_header()) + full_client_request.extend((len(payload_bytes)).to_bytes(4, 'big')) # payload size(4 bytes) + full_client_request.extend(payload_bytes) # payload + header = None + if self.auth_method == "token": + header = self.token_auth() + elif self.auth_method == "signature": + header = self.signature_auth(full_client_request) + async with websockets.connect(self.volcanic_api_url, extra_headers=header, max_size=1000000000, + ssl=ssl_context) as ws: + # 发送 full client request + await ws.send(full_client_request) + res = await ws.recv() + result = parse_response(res) + if 'payload_msg' in result and result['payload_msg']['code'] != self.success_code: + raise Exception( + f"Error code: {result['payload_msg']['code']}, message: {result['payload_msg']['message']}") + for seq, (chunk, last) in enumerate(VolcanicEngineSpeechToText.slice_data(wav_data, segment_size), 1): + # if no compression, comment this line + payload_bytes = gzip.compress(chunk) + audio_only_request = bytearray(generate_audio_default_header()) + if last: + audio_only_request = bytearray(generate_last_audio_default_header()) + audio_only_request.extend((len(payload_bytes)).to_bytes(4, 'big')) # payload size(4 bytes) + audio_only_request.extend(payload_bytes) # payload + # 发送 audio-only client request + await ws.send(audio_only_request) + res = await ws.recv() + result = parse_response(res) + if 'payload_msg' in result and result['payload_msg']['code'] != self.success_code: + return result + return result['payload_msg']['result'][0]['text'] + + def check_auth(self): + cwd = os.path.dirname(os.path.abspath(__file__)) + with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f: + self.speech_to_text(f) + + def speech_to_text(self, file): + data = file.read() + audio_data = bytes(data) + if self.format == "mp3": + segment_size = self.mp3_seg_size + return asyncio.run(self.segment_data_processor(audio_data, segment_size)) + if self.format != "wav": + raise Exception("format should in wav or mp3") + nchannels, sampwidth, framerate, nframes, wav_len = read_wav_info( + audio_data) + size_per_sec = nchannels * sampwidth * framerate + segment_size = int(size_per_sec * self.seg_duration / 1000) + return asyncio.run(self.segment_data_processor(audio_data, segment_size)) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tti.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tti.py new file mode 100644 index 00000000000..dd021c64320 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tti.py @@ -0,0 +1,172 @@ +# coding=utf-8 + +''' +requires Python 3.6 or later + +pip install asyncio +pip install websockets + +''' + +import datetime +import hashlib +import hmac +import json +import sys +from typing import Dict + +import requests + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + +method = 'POST' +host = 'visual.volcengineapi.com' +region = 'cn-north-1' +endpoint = 'https://visual.volcengineapi.com' +service = 'cv' + +req_key_dict = { + 'general_v1.4': 'high_aes_general_v14', + 'general_v2.0': 'high_aes_general_v20', + 'general_v2.0_L': 'high_aes_general_v20_L', + 'anime_v1.3': 'high_aes', + 'anime_v1.3.1': 'high_aes', +} + + +def sign(key, msg): + return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest() + + +def getSignatureKey(key, dateStamp, regionName, serviceName): + kDate = sign(key.encode('utf-8'), dateStamp) + kRegion = sign(kDate, regionName) + kService = sign(kRegion, serviceName) + kSigning = sign(kService, 'request') + return kSigning + + +def formatQuery(parameters): + request_parameters_init = '' + for key in sorted(parameters): + request_parameters_init += key + '=' + parameters[key] + '&' + request_parameters = request_parameters_init[:-1] + return request_parameters + + +def signV4Request(access_key, secret_key, service, req_query, req_body): + if access_key is None or secret_key is None: + print('No access key is available.') + sys.exit() + + t = datetime.datetime.utcnow() + current_date = t.strftime('%Y%m%dT%H%M%SZ') + # current_date = '20210818T095729Z' + datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope + canonical_uri = '/' + canonical_querystring = req_query + signed_headers = 'content-type;host;x-content-sha256;x-date' + payload_hash = hashlib.sha256(req_body.encode('utf-8')).hexdigest() + content_type = 'application/json' + canonical_headers = 'content-type:' + content_type + '\n' + 'host:' + host + \ + '\n' + 'x-content-sha256:' + payload_hash + \ + '\n' + 'x-date:' + current_date + '\n' + canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + \ + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash + # print(canonical_request) + algorithm = 'HMAC-SHA256' + credential_scope = datestamp + '/' + region + '/' + service + '/' + 'request' + string_to_sign = algorithm + '\n' + current_date + '\n' + credential_scope + '\n' + hashlib.sha256( + canonical_request.encode('utf-8')).hexdigest() + # print(string_to_sign) + signing_key = getSignatureKey(secret_key, datestamp, region, service) + # print(signing_key) + signature = hmac.new(signing_key, (string_to_sign).encode( + 'utf-8'), hashlib.sha256).hexdigest() + # print(signature) + + authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + \ + credential_scope + ', ' + 'SignedHeaders=' + \ + signed_headers + ', ' + 'Signature=' + signature + # print(authorization_header) + headers = {'X-Date': current_date, + 'Authorization': authorization_header, + 'X-Content-Sha256': payload_hash, + 'Content-Type': content_type + } + # print(headers) + + # ************* SEND THE REQUEST ************* + request_url = endpoint + '?' + canonical_querystring + + print('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++') + print('Request URL = ' + request_url) + try: + r = requests.post(request_url, headers=headers, data=req_body) + except Exception as err: + print(f'error occurred: {err}') + raise + else: + print('\nRESPONSE++++++++++++++++++++++++++++++++++++') + print(f'Response code: {r.status_code}\n') + # 使用 replace 方法将 \u0026 替换为 & + resp_str = r.text.replace("\\u0026", "&") + if r.status_code != 200: + raise Exception(f'Error: {resp_str}') + print(f'Response body: {resp_str}\n') + return json.loads(resp_str)['data']['image_urls'] + + +class VolcanicEngineTextToImage(MaxKBBaseModel, BaseTextToImage): + access_key: str + secret_key: str + model_version: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.access_key = kwargs.get('access_key') + self.secret_key = kwargs.get('secret_key') + self.model_version = kwargs.get('model_version') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return VolcanicEngineTextToImage( + model_version=model_name, + access_key=model_credential.get('access_key'), + secret_key=model_credential.get('secret_key'), + **optional_params + ) + + def check_auth(self): + res = self.generate_image('生成一张小猫图片') + print(res) + + def generate_image(self, prompt: str, negative_prompt: str = None): + # 请求Query,按照接口文档中填入即可 + query_params = { + 'Action': 'CVProcess', + 'Version': '2022-08-31', + } + formatted_query = formatQuery(query_params) + size = self.params.pop('size', '512*512').split('*') + body_params = { + "req_key": req_key_dict[self.model_version], + "prompt": prompt, + "model_version": self.model_version, + "return_url": True, + "width": int(size[0]), + "height": int(size[1]), + **self.params + } + formatted_body = json.dumps(body_params) + return signV4Request(self.access_key, self.secret_key, service, formatted_query, formatted_body) + + def is_cache_model(self): + return False diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py new file mode 100644 index 00000000000..4c1bfcc6f33 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py @@ -0,0 +1,182 @@ +# coding=utf-8 + +''' +requires Python 3.6 or later + +pip install asyncio +pip install websockets + +''' + +import asyncio +import copy +import gzip +import json +import re +import ssl +import uuid +from typing import Dict + +import websockets +from django.utils.translation import gettext as _ + +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech + +MESSAGE_TYPES = {11: "audio-only server response", 12: "frontend server response", 15: "error message from server"} +MESSAGE_TYPE_SPECIFIC_FLAGS = {0: "no sequence number", 1: "sequence number > 0", + 2: "last message from server (seq < 0)", 3: "sequence number < 0"} +MESSAGE_SERIALIZATION_METHODS = {0: "no serialization", 1: "JSON", 15: "custom type"} +MESSAGE_COMPRESSIONS = {0: "no compression", 1: "gzip", 15: "custom compression method"} + +# version: b0001 (4 bits) +# header size: b0001 (4 bits) +# message type: b0001 (Full client request) (4bits) +# message type specific flags: b0000 (none) (4bits) +# message serialization method: b0001 (JSON) (4 bits) +# message compression: b0001 (gzip) (4bits) +# reserved data: 0x00 (1 byte) +default_header = bytearray(b'\x11\x10\x11\x00') + +ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) +ssl_context.check_hostname = False +ssl_context.verify_mode = ssl.CERT_NONE + + +class VolcanicEngineTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + volcanic_app_id: str + volcanic_cluster: str + volcanic_api_url: str + volcanic_token: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.volcanic_api_url = kwargs.get('volcanic_api_url') + self.volcanic_token = kwargs.get('volcanic_token') + self.volcanic_app_id = kwargs.get('volcanic_app_id') + self.volcanic_cluster = kwargs.get('volcanic_cluster') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'voice_type': 'zh_female_cancan_mars_bigtts', 'speed_ratio': 1.0}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return VolcanicEngineTextToSpeech( + volcanic_api_url=model_credential.get('volcanic_api_url'), + volcanic_token=model_credential.get('volcanic_token'), + volcanic_app_id=model_credential.get('volcanic_app_id'), + volcanic_cluster=model_credential.get('volcanic_cluster'), + **optional_params + ) + + def check_auth(self): + self.text_to_speech(_('Hello')) + + def text_to_speech(self, text): + request_json = { + "app": { + "appid": self.volcanic_app_id, + "token": "access_token", + "cluster": self.volcanic_cluster + }, + "user": { + "uid": "uid" + }, + "audio": { + "encoding": "mp3", + "volume_ratio": 1.0, + "pitch_ratio": 1.0, + } | self.params, + "request": { + "reqid": str(uuid.uuid4()), + "text": '', + "text_type": "plain", + "operation": "xxx" + } + } + text = _remove_empty_lines(text) + + return asyncio.run(self.submit(request_json, text)) + + def is_cache_model(self): + return False + + def token_auth(self): + return {'Authorization': 'Bearer; {}'.format(self.volcanic_token)} + + async def submit(self, request_json, text): + submit_request_json = copy.deepcopy(request_json) + submit_request_json["request"]["operation"] = "submit" + header = {"Authorization": f"Bearer; {self.volcanic_token}"} + result = b'' + async with websockets.connect(self.volcanic_api_url, extra_headers=header, ping_interval=None, + ssl=ssl_context) as ws: + lines = [text[i:i + 200] for i in range(0, len(text), 200)] + for line in lines: + if self.is_table_format_chars_only(line): + continue + submit_request_json["request"]["reqid"] = str(uuid.uuid4()) + submit_request_json["request"]["text"] = line + payload_bytes = str.encode(json.dumps(submit_request_json)) + payload_bytes = gzip.compress(payload_bytes) # if no compression, comment this line + full_client_request = bytearray(default_header) + full_client_request.extend((len(payload_bytes)).to_bytes(4, 'big')) # payload size(4 bytes) + full_client_request.extend(payload_bytes) # payload + await ws.send(full_client_request) + result += await self.parse_response(ws) + return result + + @staticmethod + def is_table_format_chars_only(s): + # 检查是否仅包含 "|", "-", 和空格字符 + return bool(s) and re.fullmatch(r'[|\-\s]+', s) + + @staticmethod + async def parse_response(ws): + result = b'' + while True: + res = await ws.recv() + protocol_version = res[0] >> 4 + header_size = res[0] & 0x0f + message_type = res[1] >> 4 + message_type_specific_flags = res[1] & 0x0f + serialization_method = res[2] >> 4 + message_compression = res[2] & 0x0f + reserved = res[3] + header_extensions = res[4:header_size * 4] + payload = res[header_size * 4:] + if header_size != 1: + # print(f" Header extensions: {header_extensions}") + pass + if message_type == 0xb: # audio-only server response + if message_type_specific_flags == 0: # no sequence number as ACK + continue + else: + sequence_number = int.from_bytes(payload[:4], "big", signed=True) + payload_size = int.from_bytes(payload[4:8], "big", signed=False) + payload = payload[8:] + result += payload + if sequence_number < 0: + break + else: + continue + elif message_type == 0xf: + code = int.from_bytes(payload[:4], "big", signed=False) + msg_size = int.from_bytes(payload[4:8], "big", signed=False) + error_msg = payload[8:] + if message_compression == 1: + error_msg = gzip.decompress(error_msg) + error_msg = str(error_msg, "utf-8") + raise Exception(f"Error code: {code}, message: {error_msg}") + elif message_type == 0xc: + msg_size = int.from_bytes(payload[:4], "big", signed=False) + payload = payload[4:] + if message_compression == 1: + payload = gzip.decompress(payload) + else: + break + return result diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py new file mode 100644 index 00000000000..d963a144625 --- /dev/null +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :MaxKB +@File :gemini_model_provider.py +@Author :Brian Yang +@Date :5/13/24 7:47 AM +""" +import os + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ + ModelInfoManage +from setting.models_provider.impl.openai_model_provider.credential.llm import OpenAILLMModelCredential +from setting.models_provider.impl.volcanic_engine_model_provider.credential.embedding import VolcanicEmbeddingCredential +from setting.models_provider.impl.volcanic_engine_model_provider.credential.image import \ + VolcanicEngineImageModelCredential +from setting.models_provider.impl.volcanic_engine_model_provider.credential.tti import VolcanicEngineTTIModelCredential +from setting.models_provider.impl.volcanic_engine_model_provider.credential.tts import VolcanicEngineTTSModelCredential +from setting.models_provider.impl.volcanic_engine_model_provider.model.embedding import VolcanicEngineEmbeddingModel +from setting.models_provider.impl.volcanic_engine_model_provider.model.image import VolcanicEngineImage +from setting.models_provider.impl.volcanic_engine_model_provider.model.llm import VolcanicEngineChatModel +from setting.models_provider.impl.volcanic_engine_model_provider.credential.stt import VolcanicEngineSTTModelCredential +from setting.models_provider.impl.volcanic_engine_model_provider.model.stt import VolcanicEngineSpeechToText +from setting.models_provider.impl.volcanic_engine_model_provider.model.tti import VolcanicEngineTextToImage +from setting.models_provider.impl.volcanic_engine_model_provider.model.tts import VolcanicEngineTextToSpeech + +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +volcanic_engine_llm_model_credential = OpenAILLMModelCredential() +volcanic_engine_stt_model_credential = VolcanicEngineSTTModelCredential() +volcanic_engine_tts_model_credential = VolcanicEngineTTSModelCredential() +volcanic_engine_image_model_credential = VolcanicEngineImageModelCredential() +volcanic_engine_tti_model_credential = VolcanicEngineTTIModelCredential() + +model_info_list = [ + ModelInfo('ep-xxxxxxxxxx-yyyy', + _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'), + ModelTypeConst.LLM, + volcanic_engine_llm_model_credential, VolcanicEngineChatModel + ), + ModelInfo('ep-xxxxxxxxxx-yyyy', + _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'), + ModelTypeConst.IMAGE, + volcanic_engine_image_model_credential, VolcanicEngineImage + ), + ModelInfo('asr', + '', + ModelTypeConst.STT, + volcanic_engine_stt_model_credential, VolcanicEngineSpeechToText + ), + ModelInfo('tts', + '', + ModelTypeConst.TTS, + volcanic_engine_tts_model_credential, VolcanicEngineTextToSpeech + ), + ModelInfo('general_v2.0', + _('Universal 2.0-Vincent Diagram'), + ModelTypeConst.TTI, + volcanic_engine_tti_model_credential, VolcanicEngineTextToImage + ), + ModelInfo('general_v2.0_L', + _('Universal 2.0Pro-Vincent Chart'), + ModelTypeConst.TTI, + volcanic_engine_tti_model_credential, VolcanicEngineTextToImage + ), + ModelInfo('general_v1.4', + _('Universal 1.4-Vincent Chart'), + ModelTypeConst.TTI, + volcanic_engine_tti_model_credential, VolcanicEngineTextToImage + ), + ModelInfo('anime_v1.3', + _('Animation 1.3.0-Vincent Picture'), + ModelTypeConst.TTI, + volcanic_engine_tti_model_credential, VolcanicEngineTextToImage + ), + ModelInfo('anime_v1.3.1', + _('Animation 1.3.1-Vincent Picture'), + ModelTypeConst.TTI, + volcanic_engine_tti_model_credential, VolcanicEngineTextToImage + ), +] + +open_ai_embedding_credential = VolcanicEmbeddingCredential() +model_info_embedding_list = [ + ModelInfo('ep-xxxxxxxxxx-yyyy', + _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'), + ModelTypeConst.EMBEDDING, open_ai_embedding_credential, + VolcanicEngineEmbeddingModel) +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info(model_info_list[0]) + .append_default_model_info(model_info_list[1]) + .append_default_model_info(model_info_list[2]) + .append_default_model_info(model_info_list[3]) + .append_default_model_info(model_info_list[4]) + .append_model_info_list(model_info_embedding_list) + .append_default_model_info(model_info_embedding_list[0]) + .build() +) + + +class VolcanicEngineModelProvider(IModelProvider): + + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_volcanic_engine_provider', name=_('volcano engine'), icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'volcanic_engine_model_provider', + 'icon', + 'volcanic_engine_icon_svg'))) diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py new file mode 100644 index 00000000000..85359511d33 --- /dev/null +++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py @@ -0,0 +1,49 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/10/17 15:40 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QianfanEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + self.valid_form(model_credential) + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'qianfan_sk': super().encryption(model.get('qianfan_sk', ''))} + + qianfan_ak = forms.PasswordInputField('API Key', required=True) + + qianfan_sk = forms.PasswordInputField("Secret Key", required=True) diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py new file mode 100644 index 00000000000..d4d379db3d5 --- /dev/null +++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py @@ -0,0 +1,82 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/12 10:19 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class WenxinLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.95, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_output_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=2, + _max=100000, + _step=1, + precision=0) + + +class WenxinLLMModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model_info = [model.lower() for model in model.client.models()] + if not model_info.__contains__(model_name.lower()): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_name} The model does not support').format(model_name=model_name)) + for key in ['api_key', 'secret_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model.invoke( + [HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + raise e + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return {**model_info, 'secret_key': super().encryption(model_info.get('secret_key', ''))} + + def build_model(self, model_info: Dict[str, object]): + for key in ['api_key', 'secret_key', 'model']: + if key not in model_info: + raise AppApiException(500, gettext('{key} is required').format(key=key)) + self.api_key = model_info.get('api_key') + self.secret_key = model_info.get('secret_key') + return self + + api_key = forms.PasswordInputField('API Key', required=True) + + secret_key = forms.PasswordInputField("Secret Key", required=True) + + def get_model_params_setting_form(self, model_name): + return WenxinLLMModelParams() diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/model/embedding.py b/apps/setting/models_provider/impl/wenxin_model_provider/model/embedding.py new file mode 100644 index 00000000000..d46ac51eaab --- /dev/null +++ b/apps/setting/models_provider/impl/wenxin_model_provider/model/embedding.py @@ -0,0 +1,23 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/10/17 16:48 + @desc: +""" +from typing import Dict + +from langchain_community.embeddings import QianfanEmbeddingsEndpoint + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class QianfanEmbeddings(MaxKBBaseModel, QianfanEmbeddingsEndpoint): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return QianfanEmbeddings( + model=model_name, + qianfan_ak=model_credential.get('qianfan_ak'), + qianfan_sk=model_credential.get('qianfan_sk'), + ) diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/model/llm.py b/apps/setting/models_provider/impl/wenxin_model_provider/model/llm.py new file mode 100644 index 00000000000..1f23e5a185a --- /dev/null +++ b/apps/setting/models_provider/impl/wenxin_model_provider/model/llm.py @@ -0,0 +1,76 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2023/11/10 17:45 + @desc: +""" +from typing import List, Dict, Optional, Any, Iterator + +from langchain_community.chat_models.baidu_qianfan_endpoint import _convert_dict_to_message, QianfanChatEndpoint +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.messages import ( + AIMessageChunk, + BaseMessage, +) +from langchain_core.outputs import ChatGenerationChunk + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class QianfanChatModel(MaxKBBaseModel, QianfanChatEndpoint): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return QianfanChatModel(model=model_name, + qianfan_ak=model_credential.get('api_key'), + qianfan_sk=model_credential.get('secret_key'), + streaming=model_kwargs.get('streaming', False), + init_kwargs=optional_params) + + usage_metadata: dict = {} + + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: + return self.usage_metadata + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + return self.usage_metadata.get('prompt_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + return self.usage_metadata.get('completion_tokens', 0) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + kwargs = {**self.init_kwargs, **kwargs} + params = self._convert_prompt_msg_params(messages, **kwargs) + params["stop"] = stop + params["stream"] = True + for res in self.client.do(**params): + if res: + msg = _convert_dict_to_message(res) + additional_kwargs = msg.additional_kwargs.get("function_call", {}) + if msg.content == "" or res.get("body").get("is_end"): + token_usage = res.get("body").get("usage") + self.usage_metadata = token_usage + chunk = ChatGenerationChunk( + text=res["result"], + message=AIMessageChunk( # type: ignore[call-arg] + content=msg.content, + role="assistant", + additional_kwargs=additional_kwargs, + ), + generation_info=msg.additional_kwargs, + ) + if run_manager: + run_manager.on_llm_new_token(chunk.text, chunk=chunk) + yield chunk diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/model/qian_fan_chat_model.py b/apps/setting/models_provider/impl/wenxin_model_provider/model/qian_fan_chat_model.py deleted file mode 100644 index b07e8a01ba1..00000000000 --- a/apps/setting/models_provider/impl/wenxin_model_provider/model/qian_fan_chat_model.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: qian_fan_chat_model.py - @date:2023/11/10 17:45 - @desc: -""" -from typing import Optional, List, Any, Iterator, cast - -from langchain.callbacks.manager import CallbackManager -from langchain.chat_models.base import BaseChatModel -from langchain.load import dumpd -from langchain.schema import LLMResult -from langchain.schema.language_model import LanguageModelInput -from langchain.schema.messages import BaseMessageChunk, BaseMessage, HumanMessage, AIMessage, get_buffer_string -from langchain.schema.output import ChatGenerationChunk -from langchain.schema.runnable import RunnableConfig -from langchain_community.chat_models import QianfanChatEndpoint - -from common.config.tokenizer_manage_config import TokenizerManage - - -class QianfanChatModel(QianfanChatEndpoint): - - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) - - def stream( - self, - input: LanguageModelInput, - config: Optional[RunnableConfig] = None, - *, - stop: Optional[List[str]] = None, - **kwargs: Any, - ) -> Iterator[BaseMessageChunk]: - if len(input) % 2 == 0: - input = [HumanMessage(content='padding'), *input] - input = [ - HumanMessage(content=input[index].content) if index % 2 == 0 else AIMessage(content=input[index].content) - for index in range(0, len(input))] - if type(self)._stream == BaseChatModel._stream: - # model doesn't implement streaming, so use default implementation - yield cast( - BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs) - ) - else: - config = config or {} - messages = self._convert_input(input).to_messages() - params = self._get_invocation_params(stop=stop, **kwargs) - options = {"stop": stop, **kwargs} - callback_manager = CallbackManager.configure( - config.get("callbacks"), - self.callbacks, - self.verbose, - config.get("tags"), - self.tags, - config.get("metadata"), - self.metadata, - ) - (run_manager,) = callback_manager.on_chat_model_start( - dumpd(self), - [messages], - invocation_params=params, - options=options, - name=config.get("run_name"), - ) - try: - generation: Optional[ChatGenerationChunk] = None - for chunk in self._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ): - yield chunk.message - if generation is None: - generation = chunk - assert generation is not None - except BaseException as e: - run_manager.on_llm_error(e) - raise e - else: - run_manager.on_llm_end( - LLMResult(generations=[[generation]]), - ) diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py b/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py index 3d7c9a7d9ed..b07f90223f7 100644 --- a/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py +++ b/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py @@ -7,122 +7,62 @@ @desc: """ import os -from typing import Dict -from langchain.schema import HumanMessage -from langchain_community.chat_models import QianfanChatEndpoint -from qianfan import ChatCompletion - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \ - ModelInfo, IModelProvider, ValidCode -from setting.models_provider.impl.wenxin_model_provider.model.qian_fan_chat_model import QianfanChatModel +from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ + ModelInfoManage +from setting.models_provider.impl.wenxin_model_provider.credential.embedding import QianfanEmbeddingCredential +from setting.models_provider.impl.wenxin_model_provider.credential.llm import WenxinLLMModelCredential +from setting.models_provider.impl.wenxin_model_provider.model.embedding import QianfanEmbeddings +from setting.models_provider.impl.wenxin_model_provider.model.llm import QianfanChatModel from smartdoc.conf import PROJECT_DIR - - -class WenxinLLMModelCredential(BaseForm, BaseModelCredential): - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = WenxinModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - model_info = [model.lower() for model in ChatCompletion.models()] - if not model_info.__contains__(model_name.lower()): - raise AppApiException(ValidCode.valid_error.value, f'{model_name} 模型不支持') - for key in ['api_key', 'secret_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - WenxinModelProvider().get_model(model_type, model_name, model_credential).invoke( - [HumanMessage(content='你好')]) - except Exception as e: - raise e - return True - - def encryption_dict(self, model_info: Dict[str, object]): - return {**model_info, 'secret_key': super().encryption(model_info.get('secret_key', ''))} - - def build_model(self, model_info: Dict[str, object]): - for key in ['api_key', 'secret_key', 'model']: - if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') - self.api_key = model_info.get('api_key') - self.secret_key = model_info.get('secret_key') - return self - - api_key = forms.PasswordInputField('API Key', required=True) - - secret_key = forms.PasswordInputField("Secret Key", required=True) - +from django.utils.translation import gettext as _ win_xin_llm_model_credential = WenxinLLMModelCredential() -model_dict = { - 'ERNIE-Bot-4': ModelInfo('ERNIE-Bot-4', - 'ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'ERNIE-Bot': ModelInfo('ERNIE-Bot', - 'ERNIE-Bot是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'ERNIE-Bot-turbo': ModelInfo('ERNIE-Bot-turbo', - 'ERNIE-Bot-turbo是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力,响应速度更快。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'BLOOMZ-7B': ModelInfo('BLOOMZ-7B', - 'BLOOMZ-7B是业内知名的大语言模型,由BigScience研发并开源,能够以46种语言和13种编程语言输出文本。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'Llama-2-7b-chat': ModelInfo('Llama-2-7b-chat', - 'Llama-2-7b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-7b-chat是高性能原生开源版本,适用于对话场景。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'Llama-2-13b-chat': ModelInfo('Llama-2-13b-chat', - 'Llama-2-13b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-13b-chat是性能与效果均衡的原生开源版本,适用于对话场景。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'Llama-2-70b-chat': ModelInfo('Llama-2-70b-chat', - 'Llama-2-70b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-70b-chat是高精度效果的原生开源版本。', - ModelTypeConst.LLM, win_xin_llm_model_credential), - - 'Qianfan-Chinese-Llama-2-7B': ModelInfo('Qianfan-Chinese-Llama-2-7B', - '千帆团队在Llama-2-7b基础上的中文增强版本,在CMMLU、C-EVAL等中文知识库上表现优异。', - ModelTypeConst.LLM, win_xin_llm_model_credential) -} +qianfan_embedding_credential = QianfanEmbeddingCredential() +model_info_list = [ModelInfo('ERNIE-Bot-4', + _('ERNIE-Bot-4 is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('ERNIE-Bot', + _('ERNIE-Bot is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('ERNIE-Bot-turbo', + _('ERNIE-Bot-turbo is a large language model independently developed by Baidu. It covers massive Chinese data, has stronger capabilities in dialogue Q&A, content creation and generation, and has a faster response speed.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('BLOOMZ-7B', + _('BLOOMZ-7B is a well-known large language model in the industry. It was developed and open sourced by BigScience and can output text in 46 languages and 13 programming languages.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('Llama-2-7b-chat', + 'Llama-2-7b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning and knowledge application. Llama-2-7b-chat is a high-performance native open source version suitable for conversation scenarios.', + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('Llama-2-13b-chat', + _('Llama-2-13b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning and knowledge application. Llama-2-13b-chat is a native open source version with balanced performance and effect, suitable for conversation scenarios.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('Llama-2-70b-chat', + _('Llama-2-70b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning, and knowledge application. Llama-2-70b-chat is a native open source version with high-precision effects.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), + ModelInfo('Qianfan-Chinese-Llama-2-7B', + _('The Chinese enhanced version developed by the Qianfan team based on Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-EVAL.'), + ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel) + ] +embedding_model_info = ModelInfo('Embedding-V1', + _('Embedding-V1 is a text representation model based on Baidu Wenxin large model technology. It can convert text into a vector form represented by numerical values and can be used in text retrieval, information recommendation, knowledge mining and other scenarios. Embedding-V1 provides the Embeddings interface, which can generate corresponding vector representations based on input content. You can call this interface to input text into the model and obtain the corresponding vector representation for subsequent text processing and analysis.'), + ModelTypeConst.EMBEDDING, qianfan_embedding_credential, QianfanEmbeddings) +model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info( + ModelInfo('ERNIE-Bot-4', + _('ERNIE-Bot-4 is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'), + ModelTypeConst.LLM, + win_xin_llm_model_credential, + QianfanChatModel)).append_model_info(embedding_model_info).append_default_model_info( + embedding_model_info).build() class WenxinModelProvider(IModelProvider): - def get_dialogue_number(self): - return 2 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], - **model_kwargs) -> QianfanChatEndpoint: - return QianfanChatModel(model=model_name, - qianfan_ak=model_credential.get('api_key'), - qianfan_sk=model_credential.get('secret_key'), - streaming=model_kwargs.get('streaming', False)) - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] - - def get_model_list(self, model_type): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return win_xin_llm_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_wenxin_provider', name='千帆大模型', icon=get_file_content( + return ModelProvideInfo(provider='model_wenxin_provider', name=_('Thousand sails large model'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'wenxin_model_provider', 'icon', 'azure_icon_svg'))) diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py new file mode 100644 index 00000000000..57f66d3ac88 --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py @@ -0,0 +1,50 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/10/17 15:40 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XFEmbeddingCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + self.valid_form(model_credential) + try: + model = provider.get_model(model_type, model_name, model_credential) + model.embed_query(_('Hello')) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} + + base_url = forms.TextInputField('API URL', required=True, default_value="https://emb-cn-huabei-1.xf-yun.com/") + spark_app_id = forms.TextInputField('APP ID', required=True) + spark_api_key = forms.PasswordInputField("API Key", required=True) + spark_api_secret = forms.PasswordInputField('API Secret', required=True) diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/image.py b/apps/setting/models_provider/impl/xf_model_provider/credential/image.py new file mode 100644 index 00000000000..b68b84149be --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/image.py @@ -0,0 +1,60 @@ +# coding=utf-8 +import base64 +import os +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.xf_model_provider.model.image import ImageMessage + + +class XunFeiImageModelCredential(BaseForm, BaseModelCredential): + spark_api_url = forms.TextInputField('API URL', required=True, + default_value='wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image') + spark_app_id = forms.TextInputField('APP ID', required=True) + spark_api_key = forms.PasswordInputField("API Key", required=True) + spark_api_secret = forms.PasswordInputField('API Secret', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + cwd = os.path.dirname(os.path.abspath(__file__)) + with open(f'{cwd}/img_1.png', 'rb') as f: + message_list = [ImageMessage(str(base64.b64encode(f.read()), 'utf-8')), + HumanMessage(_('Please outline this picture'))] + model.stream(message_list) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/img_1.png b/apps/setting/models_provider/impl/xf_model_provider/credential/img_1.png new file mode 100644 index 00000000000..ccb9d3b2035 Binary files /dev/null and b/apps/setting/models_provider/impl/xf_model_provider/credential/img_1.png differ diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py new file mode 100644 index 00000000000..f62a7164ced --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py @@ -0,0 +1,101 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/12 10:29 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XunFeiLLMModelGeneralParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.5, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=4096, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class XunFeiLLMModelProParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.5, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=4096, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class XunFeiLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} + + spark_api_url = forms.TextInputField('API URL', required=True) + spark_app_id = forms.TextInputField('APP ID', required=True) + spark_api_key = forms.PasswordInputField("API Key", required=True) + spark_api_secret = forms.PasswordInputField('API Secret', required=True) + + def get_model_params_setting_form(self, model_name): + if model_name == 'general' or model_name == 'pro-128k': + return XunFeiLLMModelGeneralParams() + return XunFeiLLMModelProParams() diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py b/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py new file mode 100644 index 00000000000..44db3b15272 --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py @@ -0,0 +1,51 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XunFeiSTTModelCredential(BaseForm, BaseModelCredential): + spark_api_url = forms.TextInputField('API URL', required=True, default_value='wss://iat-api.xfyun.cn/v2/iat') + spark_app_id = forms.TextInputField('APP ID', required=True) + spark_api_key = forms.PasswordInputField("API Key", required=True) + spark_api_secret = forms.PasswordInputField('API Secret', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py b/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py new file mode 100644 index 00000000000..39463886264 --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py @@ -0,0 +1,75 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XunFeiTTSModelGeneralParams(BaseForm): + vcn = forms.SingleSelect( + TooltipLabel(_('Speaker'), + _('Speaker, optional value: Please go to the console to add a trial or purchase speaker. After adding, the speaker parameter value will be displayed.')), + required=True, default_value='xiaoyan', + text_field='value', + value_field='value', + option_list=[ + {'text': _('iFlytek Xiaoyan'), 'value': 'xiaoyan'}, + {'text': _('iFlytek Xujiu'), 'value': 'aisjiuxu'}, + {'text': _('iFlytek Xiaoping'), 'value': 'aisxping'}, + {'text': _('iFlytek Xiaojing'), 'value': 'aisjinger'}, + {'text': _('iFlytek Xuxiaobao'), 'value': 'aisbabyxu'}, + ]) + speed = forms.SliderField( + TooltipLabel(_('speaking speed'), _('Speech speed, optional value: [0-100], default is 50')), + required=True, default_value=50, + _min=1, + _max=100, + _step=5, + precision=1) + + +class XunFeiTTSModelCredential(BaseForm, BaseModelCredential): + spark_api_url = forms.TextInputField('API URL', required=True, default_value='wss://tts-api.xfyun.cn/v2/tts') + spark_app_id = forms.TextInputField('APP ID', required=True) + spark_api_key = forms.PasswordInputField("API Key", required=True) + spark_api_secret = forms.PasswordInputField('API Secret', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} + + def get_model_params_setting_form(self, model_name): + return XunFeiTTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/embedding.py b/apps/setting/models_provider/impl/xf_model_provider/model/embedding.py new file mode 100644 index 00000000000..78cc04ceb4b --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/model/embedding.py @@ -0,0 +1,49 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: embedding.py + @date:2024/10/17 15:29 + @desc: +""" + +import base64 +import json +from typing import Dict, Optional + +import numpy as np +from langchain_community.embeddings import SparkLLMTextEmbeddings +from numpy import ndarray + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class XFEmbedding(MaxKBBaseModel, SparkLLMTextEmbeddings): + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return XFEmbedding( + spark_app_id=model_credential.get('spark_app_id'), + spark_api_key=model_credential.get('spark_api_key'), + spark_api_secret=model_credential.get('spark_api_secret') + ) + + @staticmethod + def _parser_message( + message: str, + ) -> Optional[ndarray]: + data = json.loads(message) + code = data["header"]["code"] + if code != 0: + # 这里是讯飞的QPS限制会报错,所以不建议用讯飞的向量模型 + raise Exception(f"Request error: {code}, {data}") + else: + text_base = data["payload"]["feature"]["text"] + text_data = base64.b64decode(text_base) + dt = np.dtype(np.float32) + dt = dt.newbyteorder("<") + text = np.frombuffer(text_data, dtype=dt) + if len(text) > 2560: + array = text[:2560] + else: + array = text + return array diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 new file mode 100644 index 00000000000..75e744c8ff5 Binary files /dev/null and b/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 differ diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/image.py b/apps/setting/models_provider/impl/xf_model_provider/model/image.py new file mode 100644 index 00000000000..b7813287de2 --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/model/image.py @@ -0,0 +1,96 @@ +# coding=utf-8 +import base64 +import os +from typing import Dict, Any, List, Optional, Iterator + +from docutils.utils import SystemMessage +from langchain_community.chat_models.sparkllm import ChatSparkLLM, _convert_delta_to_message_chunk +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.messages import BaseMessage, ChatMessage, HumanMessage, AIMessage, AIMessageChunk +from langchain_core.outputs import ChatGenerationChunk + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class ImageMessage(HumanMessage): + content: str + + +def convert_message_to_dict(message: BaseMessage) -> dict: + message_dict: Dict[str, Any] + if isinstance(message, ChatMessage): + message_dict = {"role": "user", "content": message.content} + elif isinstance(message, ImageMessage): + message_dict = {"role": "user", "content": message.content, "content_type": "image"} + elif isinstance(message, HumanMessage): + message_dict = {"role": "user", "content": message.content} + elif isinstance(message, AIMessage): + message_dict = {"role": "assistant", "content": message.content} + if "function_call" in message.additional_kwargs: + message_dict["function_call"] = message.additional_kwargs["function_call"] + # If function call only, content is None not empty string + if message_dict["content"] == "": + message_dict["content"] = None + if "tool_calls" in message.additional_kwargs: + message_dict["tool_calls"] = message.additional_kwargs["tool_calls"] + # If tool calls only, content is None not empty string + if message_dict["content"] == "": + message_dict["content"] = None + elif isinstance(message, SystemMessage): + message_dict = {"role": "system", "content": message.content} + else: + raise ValueError(f"Got unknown type {message}") + + return message_dict + + +class XFSparkImage(MaxKBBaseModel, ChatSparkLLM): + spark_app_id: str + spark_api_key: str + spark_api_secret: str + spark_api_url: str + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return XFSparkImage( + spark_app_id=model_credential.get('spark_app_id'), + spark_api_key=model_credential.get('spark_api_key'), + spark_api_secret=model_credential.get('spark_api_secret'), + spark_api_url=model_credential.get('spark_api_url'), + **optional_params + ) + + @staticmethod + def generate_message(prompt: str, image) -> list[BaseMessage]: + if image is None: + cwd = os.path.dirname(os.path.abspath(__file__)) + with open(f'{cwd}/img_1.png', 'rb') as f: + base64_image = base64.b64encode(f.read()).decode("utf-8") + return [ImageMessage(f'data:image/jpeg;base64,{base64_image}'), HumanMessage(prompt)] + return [HumanMessage(prompt)] + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + default_chunk_class = AIMessageChunk + + self.client.arun( + [convert_message_to_dict(m) for m in messages], + self.spark_user_id, + self.model_kwargs, + streaming=True, + ) + for content in self.client.subscribe(timeout=self.request_timeout): + if "data" not in content: + continue + delta = content["data"] + chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) + cg_chunk = ChatGenerationChunk(message=chunk) + if run_manager: + run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk) + yield cg_chunk diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/img_1.png b/apps/setting/models_provider/impl/xf_model_provider/model/img_1.png new file mode 100644 index 00000000000..ccb9d3b2035 Binary files /dev/null and b/apps/setting/models_provider/impl/xf_model_provider/model/img_1.png differ diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/llm.py b/apps/setting/models_provider/impl/xf_model_provider/model/llm.py new file mode 100644 index 00000000000..6380f752f61 --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/model/llm.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: __init__.py.py + @date:2024/04/19 15:55 + @desc: +""" +from typing import List, Optional, Any, Iterator, Dict + +from langchain_community.chat_models.sparkllm import \ + ChatSparkLLM, convert_message_to_dict, _convert_delta_to_message_chunk +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.messages import BaseMessage, AIMessageChunk +from langchain_core.outputs import ChatGenerationChunk + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class XFChatSparkLLM(MaxKBBaseModel, ChatSparkLLM): + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return XFChatSparkLLM( + spark_app_id=model_credential.get('spark_app_id'), + spark_api_key=model_credential.get('spark_api_key'), + spark_api_secret=model_credential.get('spark_api_secret'), + spark_api_url=model_credential.get('spark_api_url'), + spark_llm_domain=model_name, + streaming=model_kwargs.get('streaming', False), + **optional_params + ) + + usage_metadata: dict = {} + + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: + return self.usage_metadata + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + return self.usage_metadata.get('prompt_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + return self.usage_metadata.get('completion_tokens', 0) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + default_chunk_class = AIMessageChunk + + self.client.arun( + [convert_message_to_dict(m) for m in messages], + self.spark_user_id, + self.model_kwargs, + True, + ) + for content in self.client.subscribe(timeout=self.request_timeout): + if "data" in content: + delta = content["data"] + chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) + cg_chunk = ChatGenerationChunk(message=chunk) + elif "usage" in content: + generation_info = content["usage"] + self.usage_metadata = generation_info + continue + else: + continue + if cg_chunk is not None: + if run_manager: + run_manager.on_llm_new_token(str(cg_chunk.message.content), chunk=cg_chunk) + yield cg_chunk diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/stt.py b/apps/setting/models_provider/impl/xf_model_provider/model/stt.py new file mode 100644 index 00000000000..a1150bc83f2 --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/model/stt.py @@ -0,0 +1,171 @@ +# -*- coding:utf-8 -*- +# +# 错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看) +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +import asyncio +import base64 +import datetime +import hashlib +import hmac +import json +import logging +import os +import ssl +from datetime import datetime, UTC +from typing import Dict +from urllib.parse import urlencode, urlparse + +import websockets + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + +STATUS_FIRST_FRAME = 0 # 第一帧的标识 +STATUS_CONTINUE_FRAME = 1 # 中间帧标识 +STATUS_LAST_FRAME = 2 # 最后一帧的标识 + +ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) +ssl_context.check_hostname = False +ssl_context.verify_mode = ssl.CERT_NONE + +max_kb = logging.getLogger("max_kb") + + +class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText): + spark_app_id: str + spark_api_key: str + spark_api_secret: str + spark_api_url: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.spark_api_url = kwargs.get('spark_api_url') + self.spark_app_id = kwargs.get('spark_app_id') + self.spark_api_key = kwargs.get('spark_api_key') + self.spark_api_secret = kwargs.get('spark_api_secret') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return XFSparkSpeechToText( + spark_app_id=model_credential.get('spark_app_id'), + spark_api_key=model_credential.get('spark_api_key'), + spark_api_secret=model_credential.get('spark_api_secret'), + spark_api_url=model_credential.get('spark_api_url'), + **optional_params + ) + + # 生成url + def create_url(self): + url = self.spark_api_url + host = urlparse(url).hostname + # 生成RFC1123格式的时间戳 + gmt_format = '%a, %d %b %Y %H:%M:%S GMT' + date = datetime.now(UTC).strftime(gmt_format) + + # 拼接字符串 + signature_origin = "host: " + host + "\n" + signature_origin += "date: " + date + "\n" + signature_origin += "GET " + "/v2/iat " + "HTTP/1.1" + # 进行hmac-sha256进行加密 + signature_sha = hmac.new(self.spark_api_secret.encode('utf-8'), signature_origin.encode('utf-8'), + digestmod=hashlib.sha256).digest() + signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8') + + authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % ( + self.spark_api_key, "hmac-sha256", "host date request-line", signature_sha) + authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8') + # 将请求的鉴权参数组合为字典 + v = { + "authorization": authorization, + "date": date, + "host": host + } + # 拼接鉴权参数,生成url + url = url + '?' + urlencode(v) + # print("date: ",date) + # print("v: ",v) + # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致 + # print('websocket url :', url) + return url + + def check_auth(self): + cwd = os.path.dirname(os.path.abspath(__file__)) + with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f: + self.speech_to_text(f) + + def speech_to_text(self, file): + async def handle(): + async with websockets.connect(self.create_url(), max_size=1000000000, ssl=ssl_context) as ws: + # 发送 full client request + await self.send(ws, file) + return await self.handle_message(ws) + + return asyncio.run(handle()) + + @staticmethod + async def handle_message(ws): + res = await ws.recv() + message = json.loads(res) + code = message["code"] + sid = message["sid"] + if code != 0: + errMsg = message["message"] + raise Exception(f"sid: {sid} call error: {errMsg} code is: {code}") + else: + data = message["data"]["result"]["ws"] + result = "" + for i in data: + for w in i["cw"]: + result += w["w"] + # print("sid:%s call success!,data is:%s" % (sid, json.dumps(data, ensure_ascii=False))) + return result + + # 收到websocket连接建立的处理 + async def send(self, ws, file): + frameSize = 8000 # 每一帧的音频大小 + status = STATUS_FIRST_FRAME # 音频的状态信息,标识音频是第一帧,还是中间帧、最后一帧 + + while True: + buf = file.read(frameSize) + # 文件结束 + if not buf: + status = STATUS_LAST_FRAME + # 第一帧处理 + # 发送第一帧音频,带business 参数 + # appid 必须带上,只需第一帧发送 + if status == STATUS_FIRST_FRAME: + d = { + "common": {"app_id": self.spark_app_id}, + "business": { + "domain": "iat", + "language": "zh_cn", + "accent": "mandarin", + "vinfo": 1, + "vad_eos": 10000 + }, + "data": { + "status": 0, "format": "audio/L16;rate=16000", + "audio": str(base64.b64encode(buf), 'utf-8'), + "encoding": "lame"} + } + d = json.dumps(d) + await ws.send(d) + status = STATUS_CONTINUE_FRAME + # 中间帧处理 + elif status == STATUS_CONTINUE_FRAME: + d = {"data": {"status": 1, "format": "audio/L16;rate=16000", + "audio": str(base64.b64encode(buf), 'utf-8'), + "encoding": "lame"}} + await ws.send(json.dumps(d)) + # 最后一帧处理 + elif status == STATUS_LAST_FRAME: + d = {"data": {"status": 2, "format": "audio/L16;rate=16000", + "audio": str(base64.b64encode(buf), 'utf-8'), + "encoding": "lame"}} + await ws.send(json.dumps(d)) + break diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/tts.py b/apps/setting/models_provider/impl/xf_model_provider/model/tts.py new file mode 100644 index 00000000000..1db2b83b31b --- /dev/null +++ b/apps/setting/models_provider/impl/xf_model_provider/model/tts.py @@ -0,0 +1,150 @@ +# -*- coding:utf-8 -*- +# +# author: iflytek +# +# 错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看) +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +import asyncio +import base64 +import datetime +import hashlib +import hmac +import json +import logging +import ssl +from datetime import datetime, UTC +from typing import Dict +from urllib.parse import urlencode, urlparse + +import websockets +from django.utils.translation import gettext as _ + +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech + +max_kb = logging.getLogger("max_kb") + +STATUS_FIRST_FRAME = 0 # 第一帧的标识 +STATUS_CONTINUE_FRAME = 1 # 中间帧标识 +STATUS_LAST_FRAME = 2 # 最后一帧的标识 + +ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) +ssl_context.check_hostname = False +ssl_context.verify_mode = ssl.CERT_NONE + + +class XFSparkTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + spark_app_id: str + spark_api_key: str + spark_api_secret: str + spark_api_url: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.spark_api_url = kwargs.get('spark_api_url') + self.spark_app_id = kwargs.get('spark_app_id') + self.spark_api_key = kwargs.get('spark_api_key') + self.spark_api_secret = kwargs.get('spark_api_secret') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'vcn': 'xiaoyan', 'speed': 50}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return XFSparkTextToSpeech( + spark_app_id=model_credential.get('spark_app_id'), + spark_api_key=model_credential.get('spark_api_key'), + spark_api_secret=model_credential.get('spark_api_secret'), + spark_api_url=model_credential.get('spark_api_url'), + **optional_params + ) + + # 生成url + def create_url(self): + url = self.spark_api_url + host = urlparse(url).hostname + # 生成RFC1123格式的时间戳 + gmt_format = '%a, %d %b %Y %H:%M:%S GMT' + date = datetime.now(UTC).strftime(gmt_format) + + # 拼接字符串 + signature_origin = "host: " + host + "\n" + signature_origin += "date: " + date + "\n" + signature_origin += "GET " + "/v2/tts " + "HTTP/1.1" + # 进行hmac-sha256进行加密 + signature_sha = hmac.new(self.spark_api_secret.encode('utf-8'), signature_origin.encode('utf-8'), + digestmod=hashlib.sha256).digest() + signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8') + + authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % ( + self.spark_api_key, "hmac-sha256", "host date request-line", signature_sha) + authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8') + # 将请求的鉴权参数组合为字典 + v = { + "authorization": authorization, + "date": date, + "host": host + } + # 拼接鉴权参数,生成url + url = url + '?' + urlencode(v) + # print("date: ",date) + # print("v: ",v) + # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致 + # print('websocket url :', url) + return url + + def check_auth(self): + self.text_to_speech(_('Hello')) + + def text_to_speech(self, text): + + # 使用小语种须使用以下方式,此处的unicode指的是 utf16小端的编码方式,即"UTF-16LE"” + # self.Data = {"status": 2, "text": str(base64.b64encode(self.Text.encode('utf-16')), "UTF8")} + text = _remove_empty_lines(text) + + async def handle(): + async with websockets.connect(self.create_url(), max_size=1000000000, ssl=ssl_context) as ws: + # 发送 full client request + await self.send(ws, text) + return await self.handle_message(ws) + + return asyncio.run(handle()) + + def is_cache_model(self): + return False + + @staticmethod + async def handle_message(ws): + audio_bytes: bytes = b'' + while True: + res = await ws.recv() + message = json.loads(res) + # print(message) + code = message["code"] + sid = message["sid"] + + if code != 0: + errMsg = message["message"] + raise Exception(f"sid: {sid} call error: {errMsg} code is: {code}") + else: + audio = message["data"]["audio"] + audio = base64.b64decode(audio) + audio_bytes += audio + # 退出 + if message["data"]["status"] == 2: + break + return audio_bytes + + async def send(self, ws, text): + business = {"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "tte": "utf8"} + d = { + "common": {"app_id": self.spark_app_id}, + "business": business | self.params, + "data": {"status": 2, "text": str(base64.b64encode(text.encode('utf-8')), "UTF8")}, + } + d = json.dumps(d) + await ws.send(d) diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/xf_chat_model.py b/apps/setting/models_provider/impl/xf_model_provider/model/xf_chat_model.py deleted file mode 100644 index 3b6a22c4747..00000000000 --- a/apps/setting/models_provider/impl/xf_model_provider/model/xf_chat_model.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: __init__.py.py - @date:2024/04/19 15:55 - @desc: -""" - -from typing import List, Optional, Any, Iterator - -from langchain_community.chat_models import ChatSparkLLM -from langchain_community.chat_models.sparkllm import _convert_message_to_dict, _convert_delta_to_message_chunk -from langchain_core.callbacks import CallbackManagerForLLMRun -from langchain_core.messages import BaseMessage, AIMessageChunk, get_buffer_string -from langchain_core.outputs import ChatGenerationChunk - -from common.config.tokenizer_manage_config import TokenizerManage - - -class XFChatSparkLLM(ChatSparkLLM): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) - - def _stream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - default_chunk_class = AIMessageChunk - - self.client.arun( - [_convert_message_to_dict(m) for m in messages], - self.spark_user_id, - self.model_kwargs, - True, - ) - for content in self.client.subscribe(timeout=self.request_timeout): - if "data" not in content: - continue - delta = content["data"] - chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) - cg_chunk = ChatGenerationChunk(message=chunk) - if run_manager: - run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk) - yield cg_chunk diff --git a/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py b/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py index 28059c5c69b..faf21144dc4 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py +++ b/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py @@ -7,97 +7,62 @@ @desc: """ import os -from typing import Dict - -from langchain.schema import HumanMessage -from langchain_community.chat_models import ChatSparkLLM +import ssl -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \ - ModelInfo, IModelProvider, ValidCode -from setting.models_provider.impl.xf_model_provider.model.xf_chat_model import XFChatSparkLLM +from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ + ModelInfoManage +from setting.models_provider.impl.xf_model_provider.credential.embedding import XFEmbeddingCredential +from setting.models_provider.impl.xf_model_provider.credential.image import XunFeiImageModelCredential +from setting.models_provider.impl.xf_model_provider.credential.llm import XunFeiLLMModelCredential +from setting.models_provider.impl.xf_model_provider.credential.stt import XunFeiSTTModelCredential +from setting.models_provider.impl.xf_model_provider.credential.tts import XunFeiTTSModelCredential +from setting.models_provider.impl.xf_model_provider.model.embedding import XFEmbedding +from setting.models_provider.impl.xf_model_provider.model.image import XFSparkImage +from setting.models_provider.impl.xf_model_provider.model.llm import XFChatSparkLLM +from setting.models_provider.impl.xf_model_provider.model.stt import XFSparkSpeechToText +from setting.models_provider.impl.xf_model_provider.model.tts import XFSparkTextToSpeech from smartdoc.conf import PROJECT_DIR -import ssl +from django.utils.translation import gettext as _ ssl._create_default_https_context = ssl.create_default_context() - -class XunFeiLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = XunFeiModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - - for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = XunFeiModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} - - spark_api_url = forms.TextInputField('API 域名', required=True) - spark_app_id = forms.TextInputField('APP ID', required=True) - spark_api_key = forms.PasswordInputField("API Key", required=True) - spark_api_secret = forms.PasswordInputField('API Secret', required=True) - - qwen_model_credential = XunFeiLLMModelCredential() - -model_dict = { - 'generalv3.5': ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential), - 'generalv3': ModelInfo('generalv3', '', ModelTypeConst.LLM, qwen_model_credential), - 'generalv2': ModelInfo('generalv2', '', ModelTypeConst.LLM, qwen_model_credential) -} +stt_model_credential = XunFeiSTTModelCredential() +image_model_credential = XunFeiImageModelCredential() +tts_model_credential = XunFeiTTSModelCredential() +embedding_model_credential = XFEmbeddingCredential() +model_info_list = [ + ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), + ModelInfo('generalv3', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), + ModelInfo('generalv2', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), + ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), + ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech), + ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding) +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info( + ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM)) + .append_default_model_info( + ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), + ) + .append_default_model_info( + ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech)) + .append_default_model_info( + ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding)) + .build() +) class XunFeiModelProvider(IModelProvider): - def get_dialogue_number(self): - return 3 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> XFChatSparkLLM: - zhipuai_chat = XFChatSparkLLM( - spark_app_id=model_credential.get('spark_app_id'), - spark_api_key=model_credential.get('spark_api_key'), - spark_api_secret=model_credential.get('spark_api_secret'), - spark_api_url=model_credential.get('spark_api_url'), - spark_llm_domain=model_name - ) - return zhipuai_chat - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return qwen_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_xf_provider', name='讯飞星火', icon=get_file_content( + return ModelProvideInfo(provider='model_xf_provider', name=_('iFlytek Spark'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'xf_model_provider', 'icon', 'xf_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/impl/xinference_model_provider/__init__.py b/apps/setting/models_provider/impl/xinference_model_provider/__init__.py new file mode 100644 index 00000000000..9bad5790a57 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/__init__.py @@ -0,0 +1 @@ +# coding=utf-8 diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py new file mode 100644 index 00000000000..e16319e7627 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py @@ -0,0 +1,44 @@ +# coding=utf-8 +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding + + +class XinferenceEmbeddingModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'), + 'embedding') + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) + exist = provider.get_model_info_by_name(model_list, model_name) + model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential) + if len(exist) == 0: + model.start_down_model_thread() + raise AppApiException(ValidCode.model_not_fount, + _('The model does not exist, please download the model first')) + model.embed_query(_('Hello')) + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return model_info + + def build_model(self, model_info: Dict[str, object]): + for key in ['model']: + if key not in model_info: + raise AppApiException(500, _('{key} is required').format(key=key)) + return self + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py new file mode 100644 index 00000000000..2cd59a6c47e --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py @@ -0,0 +1,70 @@ +# coding=utf-8 +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XinferenceImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class XinferenceImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return XinferenceImageModelParams() diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py new file mode 100644 index 00000000000..cfc28b42a05 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py @@ -0,0 +1,67 @@ +# coding=utf-8 + +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XinferenceLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.7, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class XinferenceLLMModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + try: + model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'), + model_type) + except Exception as e: + raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid')) + exist = provider.get_model_info_by_name(model_list, model_name) + if len(exist) == 0: + raise AppApiException(ValidCode.valid_error.value, + gettext('The model does not exist, please download the model first')) + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))} + + def build_model(self, model_info: Dict[str, object]): + for key in ['api_key', 'model']: + if key not in model_info: + raise AppApiException(500, gettext('{key} is required').format(key=key)) + self.api_key = model_info.get('api_key') + return self + + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return XinferenceLLMModelParams() diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py new file mode 100644 index 00000000000..f1f3aca32d8 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py @@ -0,0 +1,51 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py + @date:2024/9/10 9:46 + @desc: +""" +from typing import Dict + +from django.utils.translation import gettext as _ +from langchain_core.documents import Document + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XInferenceRerankerModelCredential(BaseForm, BaseModelCredential): + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=True): + if not model_type == 'RERANKER': + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['server_url']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.compress_documents([Document(page_content=_('Hello'))], _('Hello')) + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model_info: Dict[str, object]): + return model_info + + server_url = forms.TextInputField('API URL', required=True) + + api_key = forms.PasswordInputField('API Key', required=False) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py new file mode 100644 index 00000000000..39a4b675bd6 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py @@ -0,0 +1,47 @@ +# coding=utf-8 +from typing import Dict + +from django.utils.translation import gettext as _ + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XInferenceSTTModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + model.check_auth() + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + pass diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py new file mode 100644 index 00000000000..b1b2e6c2e45 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py @@ -0,0 +1,87 @@ +# coding=utf-8 +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XinferenceTTIModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), + _('The image generation endpoint allows you to create raw images based on text prompts. The dimensions of the image can be 1024x1024, 1024x1792, or 1792x1024 pixels.')), + required=True, + default_value='1024x1024', + option_list=[ + {'value': '1024x1024', 'label': '1024x1024'}, + {'value': '1024x1792', 'label': '1024x1792'}, + {'value': '1792x1024', 'label': '1792x1024'}, + ], + text_field='label', + value_field='value' + ) + + quality = forms.SingleSelect( + TooltipLabel(_('Picture quality'), + _('By default, images are generated in standard quality, you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest.')), + required=True, + default_value='standard', + option_list=[ + {'value': 'standard', 'label': 'standard'}, + {'value': 'hd', 'label': 'hd'}, + ], + text_field='label', + value_field='value' + ) + + n = forms.SliderField( + TooltipLabel(_('Number of pictures'), + _('You can request 1 image at a time (requesting more images by making parallel requests), or up to 10 images at a time using the n parameter.')), + required=True, default_value=1, + _min=1, + _max=10, + _step=1, + precision=0) + + +class XinferenceTextToImageModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return XinferenceTTIModelParams() diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py new file mode 100644 index 00000000000..13bb9678015 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py @@ -0,0 +1,66 @@ +# coding=utf-8 +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class XInferenceTTSModelGeneralParams(BaseForm): + # ['中文女', '中文男', '日语男', '粤语女', '英文女', '英文男', '韩语女'] + voice = forms.SingleSelect( + TooltipLabel(_('timbre'), ''), + required=True, default_value='中文女', + text_field='value', + value_field='value', + option_list=[ + {'text': _('Chinese female'), 'value': '中文女'}, + {'text': _('Chinese male'), 'value': '中文男'}, + {'text': _('Japanese male'), 'value': '日语男'}, + {'text': _('Cantonese female'), 'value': '粤语女'}, + {'text': _('English female'), 'value': '英文女'}, + {'text': _('English male'), 'value': '英文男'}, + {'text': _('Korean female'), 'value': '韩语女'}, + ]) + + +class XInferenceTTSModelCredential(BaseForm, BaseModelCredential): + api_base = forms.TextInputField('API URL', required=True) + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_base', 'api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.check_auth() + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return XInferenceTTSModelGeneralParams() diff --git a/apps/setting/models_provider/impl/xinference_model_provider/icon/xinference_icon_svg b/apps/setting/models_provider/impl/xinference_model_provider/icon/xinference_icon_svg new file mode 100644 index 00000000000..fc553ee3ccd --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/icon/xinference_icon_svg @@ -0,0 +1,5 @@ + + + + diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/embedding.py b/apps/setting/models_provider/impl/xinference_model_provider/model/embedding.py new file mode 100644 index 00000000000..73abc14cd54 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/embedding.py @@ -0,0 +1,91 @@ +# coding=utf-8 +import threading +from typing import Dict, Optional, List, Any + +from langchain_core.embeddings import Embeddings + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class XinferenceEmbedding(MaxKBBaseModel, Embeddings): + client: Any + server_url: Optional[str] + """URL of the xinference server""" + model_uid: Optional[str] + """UID of the launched model""" + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return XinferenceEmbedding( + model_uid=model_name, + server_url=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + ) + + def down_model(self): + self.client.launch_model(model_name=self.model_uid, model_type="embedding") + + def start_down_model_thread(self): + thread = threading.Thread(target=self.down_model) + thread.daemon = True + thread.start() + + def __init__( + self, server_url: Optional[str] = None, model_uid: Optional[str] = None, + api_key: Optional[str] = None + ): + try: + from xinference.client import RESTfulClient + except ImportError: + try: + from xinference_client import RESTfulClient + except ImportError as e: + raise ImportError( + "Could not import RESTfulClient from xinference. Please install it" + " with `pip install xinference` or `pip install xinference_client`." + ) from e + + if server_url is None: + raise ValueError("Please provide server URL") + + if model_uid is None: + raise ValueError("Please provide the model UID") + + self.server_url = server_url + + self.model_uid = model_uid + + self.api_key = api_key + + self.client = RESTfulClient(server_url, api_key) + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Embed a list of documents using Xinference. + Args: + texts: The list of texts to embed. + Returns: + List of embeddings, one for each text. + """ + + model = self.client.get_model(self.model_uid) + + embeddings = [ + model.create_embedding(text)["data"][0]["embedding"] for text in texts + ] + return [list(map(float, e)) for e in embeddings] + + def embed_query(self, text: str) -> List[float]: + """Embed a query of documents using Xinference. + Args: + text: The text to embed. + Returns: + Embeddings for the text. + """ + + model = self.client.get_model(self.model_uid) + + embedding_res = model.create_embedding(text) + + embedding = embedding_res["data"][0]["embedding"] + + return list(map(float, embedding)) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/image.py b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py new file mode 100644 index 00000000000..66a766ba8c0 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py @@ -0,0 +1,35 @@ +from typing import Dict, List + +from langchain_core.messages import BaseMessage, get_buffer_string + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return XinferenceImage( + model_name=model_name, + openai_api_base=model_credential.get('api_base'), + openai_api_key=model_credential.get('api_key'), + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + return self.usage_metadata.get('input_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + return self.get_last_generation_info().get('output_tokens', 0) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py new file mode 100644 index 00000000000..9c0316ad20a --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py @@ -0,0 +1,50 @@ +# coding=utf-8 + +from typing import Dict, List +from urllib.parse import urlparse, ParseResult + +from langchain_core.messages import BaseMessage, get_buffer_string + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + +class XinferenceChatModel(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + api_base = model_credential.get('api_base', '') + base_url = get_base_url(api_base) + base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1') + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return XinferenceChatModel( + model=model_name, + openai_api_base=base_url, + openai_api_key=model_credential.get('api_key'), + extra_body=optional_params + ) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + return self.usage_metadata.get('input_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) + return self.get_last_generation_info().get('output_tokens', 0) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/reranker.py b/apps/setting/models_provider/impl/xinference_model_provider/model/reranker.py new file mode 100644 index 00000000000..28c8d267839 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/reranker.py @@ -0,0 +1,57 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: reranker.py + @date:2024/9/10 9:45 + @desc: +""" +from typing import Sequence, Optional, Any, Dict + +from langchain_core.callbacks import Callbacks +from langchain_core.documents import BaseDocumentCompressor, Document +from xinference_client.client.restful.restful_client import RESTfulRerankModelHandle + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class XInferenceReranker(MaxKBBaseModel, BaseDocumentCompressor): + server_url: Optional[str] + """URL of the xinference server""" + model_uid: Optional[str] + """UID of the launched model""" + api_key: Optional[str] + + @staticmethod + def is_cache_model(): + return False + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + return XInferenceReranker(server_url=model_credential.get('server_url'), model_uid=model_name, + api_key=model_credential.get('api_key'), top_n=model_kwargs.get('top_n', 3)) + + top_n: Optional[int] = 3 + + def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \ + Sequence[Document]: + if documents is None or len(documents) == 0: + return [] + client: Any + if documents is None or len(documents) == 0: + return [] + try: + from xinference.client import RESTfulClient + except ImportError: + try: + from xinference_client import RESTfulClient + except ImportError as e: + raise ImportError( + "Could not import RESTfulClient from xinference. Please install it" + " with `pip install xinference` or `pip install xinference_client`." + ) from e + + client = RESTfulClient(self.server_url, self.api_key) + model: RESTfulRerankModelHandle = client.get_model(self.model_uid) + res = model.rerank([document.page_content for document in documents], query, self.top_n, return_documents=True) + return [Document(page_content=d.get('document', {}).get('text'), + metadata={'relevance_score': d.get('relevance_score')}) for d in res.get('results', [])] diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/stt.py b/apps/setting/models_provider/impl/xinference_model_provider/model/stt.py new file mode 100644 index 00000000000..ed0e98c3e1b --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/stt.py @@ -0,0 +1,57 @@ +import io +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_stt import BaseSpeechToText + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class XInferenceSpeechToText(MaxKBBaseModel, BaseSpeechToText): + api_base: str + api_key: str + model: str + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {} + if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None: + optional_params['max_tokens'] = model_kwargs['max_tokens'] + if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None: + optional_params['temperature'] = model_kwargs['temperature'] + return XInferenceSpeechToText( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + response_list = client.models.with_raw_response.list() + # print(response_list) + + def speech_to_text(self, audio_file): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + audio_data = audio_file.read() + buffer = io.BytesIO(audio_data) + buffer.name = "file.mp3" # this is the important line + res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer) + return res.text diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/tti.py b/apps/setting/models_provider/impl/xinference_model_provider/model/tti.py new file mode 100644 index 00000000000..e050c6bf79b --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/tti.py @@ -0,0 +1,63 @@ +import base64 +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from common.util.common import bytes_to_uploaded_file +from dataset.serializers.file_serializers import FileSerializer +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class XinferenceTextToImage(MaxKBBaseModel, BaseTextToImage): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return XinferenceTextToImage( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def is_cache_model(self): + return False + + def check_auth(self): + self.generate_image('生成一个小猫图片') + + def generate_image(self, prompt: str, negative_prompt: str = None): + chat = OpenAI(api_key=self.api_key, base_url=self.api_base) + res = chat.images.generate(model=self.model, prompt=prompt, response_format='b64_json', **self.params) + file_urls = [] + # 临时文件 + for img in res.data: + file = bytes_to_uploaded_file(base64.b64decode(img.b64_json), 'file_name.jpg') + meta = { + 'debug': True, + } + file_url = FileSerializer(data={'file': file, 'meta': meta}).upload() + file_urls.append(f'http://localhost:8080{file_url}') + + return file_urls diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py b/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py new file mode 100644 index 00000000000..416e6fab9fb --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py @@ -0,0 +1,61 @@ +from typing import Dict + +from openai import OpenAI + +from common.config.tokenizer_manage_config import TokenizerManage +from common.util.common import _remove_empty_lines +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tts import BaseTextToSpeech +from django.utils.translation import gettext as _ + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class XInferenceTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): + api_base: str + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.api_base = kwargs.get('api_base') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'voice': '中文女'}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return XInferenceTextToSpeech( + model=model_name, + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def check_auth(self): + self.text_to_speech(_('Hello')) + + def text_to_speech(self, text): + client = OpenAI( + base_url=self.api_base, + api_key=self.api_key + ) + # ['中文女', '中文男', '日语男', '粤语女', '英文女', '英文男', '韩语女'] + text = _remove_empty_lines(text) + with client.audio.speech.with_streaming_response.create( + model=self.model, + input=text, + **self.params + ) as response: + return response.read() + + def is_cache_model(self): + return False diff --git a/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py new file mode 100644 index 00000000000..e2da1e66a68 --- /dev/null +++ b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py @@ -0,0 +1,585 @@ +# coding=utf-8 +import os +from urllib.parse import urlparse, ParseResult + +import requests + +from common.util.file_util import get_file_content +from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ + ModelInfoManage +from setting.models_provider.impl.xinference_model_provider.credential.embedding import \ + XinferenceEmbeddingModelCredential +from setting.models_provider.impl.xinference_model_provider.credential.image import XinferenceImageModelCredential +from setting.models_provider.impl.xinference_model_provider.credential.llm import XinferenceLLMModelCredential +from setting.models_provider.impl.xinference_model_provider.credential.reranker import XInferenceRerankerModelCredential +from setting.models_provider.impl.xinference_model_provider.credential.stt import XInferenceSTTModelCredential +from setting.models_provider.impl.xinference_model_provider.credential.tti import XinferenceTextToImageModelCredential +from setting.models_provider.impl.xinference_model_provider.credential.tts import XInferenceTTSModelCredential +from setting.models_provider.impl.xinference_model_provider.model.embedding import XinferenceEmbedding +from setting.models_provider.impl.xinference_model_provider.model.image import XinferenceImage +from setting.models_provider.impl.xinference_model_provider.model.llm import XinferenceChatModel +from setting.models_provider.impl.xinference_model_provider.model.reranker import XInferenceReranker +from setting.models_provider.impl.xinference_model_provider.model.stt import XInferenceSpeechToText +from setting.models_provider.impl.xinference_model_provider.model.tti import XinferenceTextToImage +from setting.models_provider.impl.xinference_model_provider.model.tts import XInferenceTextToSpeech +from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext as _ + +xinference_llm_model_credential = XinferenceLLMModelCredential() +xinference_stt_model_credential = XInferenceSTTModelCredential() +xinference_tts_model_credential = XInferenceTTSModelCredential() +xinference_image_model_credential = XinferenceImageModelCredential() +xinference_tti_model_credential = XinferenceTextToImageModelCredential() + +model_info_list = [ + ModelInfo( + 'code-llama', + _('Code Llama is a language model specifically designed for code generation.'), + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'code-llama-instruct', + _(''' +Code Llama Instruct is a fine-tuned version of Code Llama's instructions, designed to perform specific tasks. + '''), + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'code-llama-python', + _('Code Llama Python is a language model specifically designed for Python code generation.'), + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'codeqwen1.5', + _('CodeQwen 1.5 is a language model for code generation with high performance.'), + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'codeqwen1.5-chat', + _('CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5.'), + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'deepseek', + _('Deepseek is a large-scale language model with 13 billion parameters.'), + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'deepseek-chat', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'deepseek-coder', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'deepseek-coder-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'deepseek-vl-chat', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'gpt-3.5-turbo', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'gpt-4', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'gpt-4-vision-preview', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'gpt4all', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'llama2', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'llama2-chat', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'llama2-chat-32k', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen-chat', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen-chat-32k', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen-code', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen-code-chat', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen-vl', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen-vl-chat', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2-72b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2-57b-a14b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2-7b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-72b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-32b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-14b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-7b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-1.5b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-0.5b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-3b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'minicpm-llama3-v-2_5', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), +] + +voice_model_info = [ + ModelInfo( + 'CosyVoice-300M-SFT', + '', + ModelTypeConst.TTS, + xinference_tts_model_credential, + XInferenceTextToSpeech + ), + ModelInfo( + 'Belle-whisper-large-v3-zh', + '', + ModelTypeConst.STT, + xinference_stt_model_credential, + XInferenceSpeechToText + ), +] + +image_model_info = [ + ModelInfo( + 'qwen-vl-chat', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'deepseek-vl-chat', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'yi-vl-chat', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'omnilmm', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'internvl-chat', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'cogvlm2', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'MiniCPM-Llama3-V-2_5', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'GLM-4V', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'MiniCPM-V-2.6', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'internvl2', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'qwen2-vl-instruct', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'llama-3.2-vision', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'llama-3.2-vision-instruct', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), + ModelInfo( + 'glm-edge-v', + '', + ModelTypeConst.IMAGE, + xinference_image_model_credential, + XinferenceImage + ), +] + +tti_model_info = [ + ModelInfo( + 'sd-turbo', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), + ModelInfo( + 'sdxl-turbo', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), + ModelInfo( + 'stable-diffusion-v1.5', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), + ModelInfo( + 'stable-diffusion-xl-base-1.0', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), + ModelInfo( + 'sd3-medium', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), + ModelInfo( + 'FLUX.1-schnell', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), + ModelInfo( + 'FLUX.1-dev', + '', + ModelTypeConst.TTI, + xinference_tti_model_credential, + XinferenceTextToImage + ), +] + +xinference_embedding_model_credential = XinferenceEmbeddingModelCredential() + +# 生成embedding_model_info列表 +embedding_model_info = [ + ModelInfo('bce-embedding-base_v1', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-base-en', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-base-en-v1.5', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-base-zh', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-base-zh-v1.5', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-large-en', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-large-en-v1.5', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-large-zh', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-large-zh-noinstruct', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-large-zh-v1.5', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-m3', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + XinferenceEmbedding), + ModelInfo('bge-small-en-v1.5', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-small-zh', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('bge-small-zh-v1.5', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('e5-large-v2', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('gte-base', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + XinferenceEmbedding), + ModelInfo('gte-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + XinferenceEmbedding), + ModelInfo('jina-embeddings-v2-base-en', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('jina-embeddings-v2-base-zh', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('jina-embeddings-v2-small-en', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('m3e-base', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + XinferenceEmbedding), + ModelInfo('m3e-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + XinferenceEmbedding), + ModelInfo('m3e-small', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + XinferenceEmbedding), + ModelInfo('multilingual-e5-large', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('text2vec-base-chinese', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('text2vec-base-chinese-paraphrase', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('text2vec-base-chinese-sentence', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('text2vec-base-multilingual', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), + ModelInfo('text2vec-large-chinese', '', ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding), +] +rerank_list = [ModelInfo('bce-reranker-base_v1', + '', + ModelTypeConst.RERANKER, XInferenceRerankerModelCredential(), XInferenceReranker)] +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_model_info_list(voice_model_info) + .append_default_model_info(voice_model_info[0]) + .append_default_model_info(voice_model_info[1]) + .append_default_model_info(ModelInfo('phi3', + '', + ModelTypeConst.LLM, xinference_llm_model_credential, + XinferenceChatModel)) + .append_model_info_list(embedding_model_info) + .append_default_model_info(ModelInfo('', + '', + ModelTypeConst.EMBEDDING, + xinference_embedding_model_credential, XinferenceEmbedding)) + .append_model_info_list(rerank_list) + .append_model_info_list(image_model_info) + .append_default_model_info(image_model_info[0]) + .append_model_info_list(tti_model_info) + .append_default_model_info(tti_model_info[0]) + .append_default_model_info(rerank_list[0]) + .build() +) + + +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + +class XinferenceModelProvider(IModelProvider): + def get_model_info_manage(self): + return model_info_manage + + def get_model_provide_info(self): + return ModelProvideInfo(provider='model_xinference_provider', name='Xorbits Inference', icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'xinference_model_provider', 'icon', + 'xinference_icon_svg'))) + + @staticmethod + def get_base_model_list(api_base, api_key, model_type): + base_url = get_base_url(api_base) + base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1') + headers = {} + if api_key: + headers['Authorization'] = f"Bearer {api_key}" + r = requests.request(method="GET", url=f"{base_url}/models", headers=headers, timeout=5) + r.raise_for_status() + model_list = r.json().get('data') + return [model for model in model_list if model.get('model_type') == model_type] + + @staticmethod + def get_model_info_by_name(model_list, model_name): + if model_list is None: + return [] + return [model for model in model_list if model.get('model_name') == model_name or model.get('id') == model_name] diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py new file mode 100644 index 00000000000..599526df7eb --- /dev/null +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py @@ -0,0 +1,71 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class ZhiPuImageModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.95, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class ZhiPuImageModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) + for chunk in res: + print(chunk) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return ZhiPuImageModelParams() diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py new file mode 100644 index 00000000000..e6dc74d6260 --- /dev/null +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py @@ -0,0 +1,76 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/12 10:46 + @desc: +""" +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class ZhiPuLLMModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), + required=True, default_value=0.95, + _min=0.1, + _max=1.0, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), + required=True, default_value=1024, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class ZhiPuLLMModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + model.invoke([HumanMessage(content=gettext('Hello'))]) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return ZhiPuLLMModelParams() diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py new file mode 100644 index 00000000000..38a88f26743 --- /dev/null +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py @@ -0,0 +1,69 @@ +# coding=utf-8 +import traceback +from typing import Dict + +from django.utils.translation import gettext_lazy as _, gettext + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class ZhiPuTTIModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel(_('Image size'), + _('Image size, only cogview-3-plus supports this parameter. Optional range: [1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the default is 1024x1024.')), + required=True, + default_value='1024x1024', + option_list=[ + {'value': '1024x1024', 'label': '1024x1024'}, + {'value': '768x1344', 'label': '768x1344'}, + {'value': '864x1152', 'label': '864x1152'}, + {'value': '1344x768', 'label': '1344x768'}, + {'value': '1152x864', 'label': '1152x864'}, + {'value': '1440x720', 'label': '1440x720'}, + {'value': '720x1440', 'label': '720x1440'}, + ], + text_field='label', + value_field='value') + + +class ZhiPuTextToImageModelCredential(BaseForm, BaseModelCredential): + api_key = forms.PasswordInputField('API Key', required=True) + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, + gettext('{model_type} Model type is not supported').format(model_type=model_type)) + + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key)) + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential, **model_params) + res = model.check_auth() + print(res) + except Exception as e: + traceback.print_exc() + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, + gettext( + 'Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + def get_model_params_setting_form(self, model_name): + return ZhiPuTTIModelParams() diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py new file mode 100644 index 00000000000..6ac7830d8ff --- /dev/null +++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py @@ -0,0 +1,20 @@ +from typing import Dict + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + + +class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + return ZhiPuImage( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base='https://open.bigmodel.cn/api/paas/v4', + # stream_options={"include_usage": True}, + streaming=True, + stream_usage=True, + extra_body=optional_params + ) diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/llm.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/llm.py new file mode 100644 index 00000000000..03699321c82 --- /dev/null +++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/llm.py @@ -0,0 +1,107 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: llm.py + @date:2024/4/28 11:42 + @desc: +""" + +import json +from collections.abc import Iterator +from typing import Any, Dict, List, Optional + +from langchain_community.chat_models import ChatZhipuAI +from langchain_community.chat_models.zhipuai import _truncate_params, _get_jwt_token, connect_sse, \ + _convert_delta_to_message_chunk +from langchain_core.callbacks import ( + CallbackManagerForLLMRun, +) +from langchain_core.messages import ( + AIMessageChunk, + BaseMessage +) +from langchain_core.outputs import ChatGenerationChunk + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class ZhipuChatModel(MaxKBBaseModel, ChatZhipuAI): + optional_params: dict + + @staticmethod + def is_cache_model(): + return False + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + zhipuai_chat = ZhipuChatModel( + api_key=model_credential.get('api_key'), + model=model_name, + streaming=model_kwargs.get('streaming', False), + optional_params=optional_params, + **optional_params, + ) + return zhipuai_chat + + usage_metadata: dict = {} + + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: + return self.usage_metadata + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + return self.usage_metadata.get('prompt_tokens', 0) + + def get_num_tokens(self, text: str) -> int: + return self.usage_metadata.get('completion_tokens', 0) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + """Stream the chat response in chunks.""" + if self.zhipuai_api_key is None: + raise ValueError("Did not find zhipuai_api_key.") + if self.zhipuai_api_base is None: + raise ValueError("Did not find zhipu_api_base.") + message_dicts, params = self._create_message_dicts(messages, stop) + payload = {**params, **kwargs, **self.optional_params, "messages": message_dicts, "stream": True} + _truncate_params(payload) + headers = { + "Authorization": _get_jwt_token(self.zhipuai_api_key), + "Accept": "application/json", + } + + default_chunk_class = AIMessageChunk + import httpx + + with httpx.Client(headers=headers, timeout=60) as client: + with connect_sse( + client, "POST", self.zhipuai_api_base, json=payload + ) as event_source: + for sse in event_source.iter_sse(): + chunk = json.loads(sse.data) + if len(chunk["choices"]) == 0: + continue + choice = chunk["choices"][0] + generation_info = {} + if "usage" in chunk: + generation_info = chunk["usage"] + self.usage_metadata = generation_info + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + finish_reason = choice.get("finish_reason", None) + + chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info + ) + yield chunk + if run_manager: + run_manager.on_llm_new_token(chunk.text, chunk=chunk) + if finish_reason is not None: + break diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py new file mode 100644 index 00000000000..a3137a50e3e --- /dev/null +++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py @@ -0,0 +1,69 @@ +from typing import Dict + +from django.utils.translation import gettext +from langchain_community.chat_models import ChatZhipuAI +from langchain_core.messages import HumanMessage +from zhipuai import ZhipuAI + +from common.config.tokenizer_manage_config import TokenizerManage +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +def custom_get_token_ids(text: str): + tokenizer = TokenizerManage.get_tokenizer() + return tokenizer.encode(text) + + +class ZhiPuTextToImage(MaxKBBaseModel, BaseTextToImage): + api_key: str + model: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.model = kwargs.get('model') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024x1024'}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + return ZhiPuTextToImage( + model=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + + def is_cache_model(self): + return False + + def check_auth(self): + chat = ChatZhipuAI( + zhipuai_api_key=self.api_key, + model_name=self.model, + ) + chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])]) + + # self.generate_image('生成一个小猫图片') + + def generate_image(self, prompt: str, negative_prompt: str = None): + # chat = ChatZhipuAI( + # zhipuai_api_key=self.api_key, + # model_name=self.model, + # ) + chat = ZhipuAI(api_key=self.api_key) + response = chat.images.generations( + model=self.model, # 填写需要调用的模型编码 + prompt=prompt, # 填写需要生成图片的文本 + **self.params # 填写额外参数 + ) + file_urls = [] + for content in response.data: + url = content.url + file_urls.append(url) + + return file_urls diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/zhipu_chat_model.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/zhipu_chat_model.py deleted file mode 100644 index ceab8988d92..00000000000 --- a/apps/setting/models_provider/impl/zhipu_model_provider/model/zhipu_chat_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -""" - @project: maxkb - @Author:虎 - @file: zhipu_chat_model.py - @date:2024/4/28 11:42 - @desc: -""" -from typing import List - -from langchain_community.chat_models import ChatZhipuAI -from langchain_core.messages import BaseMessage, get_buffer_string - -from common.config.tokenizer_manage_config import TokenizerManage - - -class ZhipuChatModel(ChatZhipuAI): - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py b/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py index ebbb3b46934..fcaa5447bfb 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py @@ -7,88 +7,71 @@ @desc: """ import os -from typing import Dict -from langchain.schema import HumanMessage -from langchain_community.chat_models import ChatZhipuAI - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content -from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \ - ModelInfo, IModelProvider, ValidCode -from setting.models_provider.impl.zhipu_model_provider.model.zhipu_chat_model import ZhipuChatModel +from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ + ModelInfoManage +from setting.models_provider.impl.zhipu_model_provider.credential.image import ZhiPuImageModelCredential +from setting.models_provider.impl.zhipu_model_provider.credential.llm import ZhiPuLLMModelCredential +from setting.models_provider.impl.zhipu_model_provider.credential.tti import ZhiPuTextToImageModelCredential +from setting.models_provider.impl.zhipu_model_provider.model.image import ZhiPuImage +from setting.models_provider.impl.zhipu_model_provider.model.llm import ZhipuChatModel +from setting.models_provider.impl.zhipu_model_provider.model.tti import ZhiPuTextToImage from smartdoc.conf import PROJECT_DIR - - -class ZhiPuLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False): - model_type_list = ZhiPuModelProvider().get_model_type_list() - if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') - for key in ['api_key']: - if key not in model_credential: - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') - else: - return False - try: - model = ZhiPuModelProvider().get_model(model_type, model_name, model_credential) - model.invoke([HumanMessage(content='你好')]) - except Exception as e: - if isinstance(e, AppApiException): - raise e - if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') - else: - return False - return True - - def encryption_dict(self, model: Dict[str, object]): - return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - - api_key = forms.PasswordInputField('API Key', required=True) - +from django.utils.translation import gettext as _ qwen_model_credential = ZhiPuLLMModelCredential() - -model_dict = { - 'glm-4': ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential), - 'glm-4v': ModelInfo('glm-4v', '', ModelTypeConst.LLM, qwen_model_credential), - 'glm-3-turbo': ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, qwen_model_credential) -} +zhipu_image_model_credential = ZhiPuImageModelCredential() +zhipu_tti_model_credential = ZhiPuTextToImageModelCredential() + +model_info_list = [ + ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel), + ModelInfo('glm-4v', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel), + ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel) +] + +model_info_image_list = [ + ModelInfo('glm-4v-plus', _('Have strong multi-modal understanding capabilities. Able to understand up to five images simultaneously and supports video content understanding'), + ModelTypeConst.IMAGE, zhipu_image_model_credential, + ZhiPuImage), + ModelInfo('glm-4v', _('Focus on single picture understanding. Suitable for scenarios requiring efficient image analysis'), + ModelTypeConst.IMAGE, zhipu_image_model_credential, + ZhiPuImage), + ModelInfo('glm-4v-flash', _('Focus on single picture understanding. Suitable for scenarios requiring efficient image analysis (free)'), + ModelTypeConst.IMAGE, zhipu_image_model_credential, + ZhiPuImage), +] + +model_info_tti_list = [ + ModelInfo('cogview-3', _('Quickly and accurately generate images based on user text descriptions. Resolution supports 1024x1024'), + ModelTypeConst.TTI, zhipu_tti_model_credential, + ZhiPuTextToImage), + ModelInfo('cogview-3-plus', _('Generate high-quality images based on user text descriptions, supporting multiple image sizes'), + ModelTypeConst.TTI, zhipu_tti_model_credential, + ZhiPuTextToImage), + ModelInfo('cogview-3-flash', _('Generate high-quality images based on user text descriptions, supporting multiple image sizes (free)'), + ModelTypeConst.TTI, zhipu_tti_model_credential, + ZhiPuTextToImage), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_default_model_info(ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel)) + .append_model_info_list(model_info_image_list) + .append_default_model_info(model_info_image_list[0]) + .append_model_info_list(model_info_tti_list) + .append_default_model_info(model_info_tti_list[0]) + .build() +) class ZhiPuModelProvider(IModelProvider): - def get_dialogue_number(self): - return 3 - - def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatZhipuAI: - zhipuai_chat = ZhipuChatModel( - temperature=0.5, - api_key=model_credential.get('api_key'), - model=model_name - ) - return zhipuai_chat - - def get_model_credential(self, model_type, model_name): - if model_name in model_dict: - return model_dict.get(model_name).model_credential - return qwen_model_credential + def get_model_info_manage(self): + return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_zhipu_provider', name='智谱AI', icon=get_file_content( + return ModelProvideInfo(provider='model_zhipu_provider', name=_('zhipu AI'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'zhipu_model_provider', 'icon', 'zhipuai_icon_svg'))) - - def get_model_list(self, model_type: str): - if model_type is None: - raise AppApiException(500, '模型类型不能为空') - return [model_dict.get(key).to_dict() for key in - list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))] - - def get_model_type_list(self): - return [{'key': "大语言模型", 'value': "LLM"}] diff --git a/apps/setting/models_provider/tools.py b/apps/setting/models_provider/tools.py new file mode 100644 index 00000000000..150e3d40018 --- /dev/null +++ b/apps/setting/models_provider/tools.py @@ -0,0 +1,37 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: tools.py + @date:2024/7/22 11:18 + @desc: +""" +from django.db import connection +from django.db.models import QuerySet + +from common.config.embedding_config import ModelManage +from setting.models import Model +from setting.models_provider import get_model +from django.utils.translation import gettext_lazy as _ + + +def get_model_by_id(_id, user_id): + model = QuerySet(Model).filter(id=_id).first() + # 手动关闭数据库连接 + connection.close() + if model is None: + raise Exception(_('Model does not exist')) + if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id): + raise Exception(_('No permission to use this model') + f"{model.name}") + return model + + +def get_model_instance_by_model_user_id(model_id, user_id, **kwargs): + """ + 获取模型实例,根据模型相关数据 + @param model_id: 模型id + @param user_id: 用户id + @return: 模型实例 + """ + model = get_model_by_id(model_id, user_id) + return ModelManage.get_model(model_id, lambda _id: get_model(model, **kwargs)) diff --git a/apps/setting/serializers/model_apply_serializers.py b/apps/setting/serializers/model_apply_serializers.py new file mode 100644 index 00000000000..88609a18fd6 --- /dev/null +++ b/apps/setting/serializers/model_apply_serializers.py @@ -0,0 +1,76 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: model_apply_serializers.py + @date:2024/8/20 20:39 + @desc: +""" +from django.db import connection +from django.db.models import QuerySet +from langchain_core.documents import Document +from rest_framework import serializers + +from common.config.embedding_config import ModelManage +from common.util.field_message import ErrMessage +from setting.models import Model +from setting.models_provider import get_model +from django.utils.translation import gettext_lazy as _ + +def get_embedding_model(model_id): + model = QuerySet(Model).filter(id=model_id).first() + # 手动关闭数据库连接 + connection.close() + embedding_model = ModelManage.get_model(model_id, + lambda _id: get_model(model, use_local=True)) + return embedding_model + + +class EmbedDocuments(serializers.Serializer): + texts = serializers.ListField(required=True, child=serializers.CharField(required=True, + error_messages=ErrMessage.char( + _('vector text'))), + error_messages=ErrMessage.list(_('vector text list'))) + + +class EmbedQuery(serializers.Serializer): + text = serializers.CharField(required=True, error_messages=ErrMessage.char(_('vector text'))) + + +class CompressDocument(serializers.Serializer): + page_content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('text'))) + metadata = serializers.DictField(required=False, error_messages=ErrMessage.dict(_('metadata'))) + + +class CompressDocuments(serializers.Serializer): + documents = CompressDocument(required=True, many=True) + query = serializers.CharField(required=True, error_messages=ErrMessage.char(_('query'))) + + +class ModelApplySerializers(serializers.Serializer): + model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('model id'))) + + def embed_documents(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + EmbedDocuments(data=instance).is_valid(raise_exception=True) + + model = get_embedding_model(self.data.get('model_id')) + return model.embed_documents(instance.getlist('texts')) + + def embed_query(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + EmbedQuery(data=instance).is_valid(raise_exception=True) + + model = get_embedding_model(self.data.get('model_id')) + return model.embed_query(instance.get('text')) + + def compress_documents(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + CompressDocuments(data=instance).is_valid(raise_exception=True) + model = get_embedding_model(self.data.get('model_id')) + return [{'page_content': d.page_content, 'metadata': d.metadata} for d in model.compress_documents( + [Document(page_content=document.get('page_content'), metadata=document.get('metadata')) for document in + instance.get('documents')], instance.get('query'))] diff --git a/apps/setting/serializers/provider_serializers.py b/apps/setting/serializers/provider_serializers.py index 351a98c8a80..9f16901627e 100644 --- a/apps/setting/serializers/provider_serializers.py +++ b/apps/setting/serializers/provider_serializers.py @@ -7,21 +7,35 @@ @desc: """ import json +import re import threading import time import uuid from typing import Dict -from django.db.models import QuerySet +from django.core import validators +from django.db.models import QuerySet, Q from rest_framework import serializers from application.models import Application +from common.config.embedding_config import ModelManage from common.exception.app_exception import AppApiException from common.util.field_message import ErrMessage from common.util.rsa_util import rsa_long_decrypt, rsa_long_encrypt -from setting.models.model_management import Model, Status +from dataset.models import DataSet +from setting.models.model_management import Model, Status, PermissionType +from setting.models_provider import get_model, get_model_credential from setting.models_provider.base_model_provider import ValidCode, DownModelChunkStatus from setting.models_provider.constants.model_provider_constants import ModelProvideConstants +from django.utils.translation import gettext_lazy as _ + + +def get_default_model_params_setting(provider, model_type, model_name): + credential = get_model_credential(provider, model_type, model_name) + setting_form = credential.get_model_params_setting_form(model_name) + if setting_form is not None: + return setting_form.to_form_list() + return [] class ModelPullManage: @@ -36,6 +50,9 @@ def pull(model: Model, credential: Dict): for chunk in response: down_model_chunk[chunk.digest] = chunk.to_dict() if time.time() - timestamp > 5: + model_new = QuerySet(Model).filter(id=model.id).first() + if model_new.status == Status.PAUSE_DOWNLOAD: + return QuerySet(Model).filter(id=model.id).update( meta={"down_model_chunk": list(down_model_chunk.values())}) timestamp = time.time() @@ -56,49 +73,76 @@ def pull(model: Model, credential: Dict): class ModelSerializer(serializers.Serializer): class Query(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) + + name = serializers.CharField(required=False, max_length=64, + error_messages=ErrMessage.char(_('model name'))) - name = serializers.CharField(required=False, max_length=20, - error_messages=ErrMessage.char("模型名称")) + model_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_('model type'))) - model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型")) + model_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_('model name'))) - model_name = serializers.CharField(required=False, error_messages=ErrMessage.char("基础模型")) + provider = serializers.CharField(required=False, error_messages=ErrMessage.char(_('provider'))) - provider = serializers.CharField(required=False, error_messages=ErrMessage.char("供应商")) + permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_('permission type'))) + + create_user = serializers.CharField(required=False, error_messages=ErrMessage.char(_('create user'))) def list(self, with_valid): if with_valid: self.is_valid(raise_exception=True) user_id = self.data.get('user_id') name = self.data.get('name') - model_query_set = QuerySet(Model).filter(user_id=user_id) + create_user = self.data.get('create_user') + if create_user is not None: + # 当前用户能查看自己的模型,包括公开和私有的 + if create_user == user_id: + model_query_set = QuerySet(Model).filter(Q(user_id=create_user)) + # 当前用户能查看其他人的模型,只能查看公开的 + else: + model_query_set = QuerySet(Model).filter( + (Q(user_id=self.data.get('create_user')) & Q(permission_type='PUBLIC'))) + else: + model_query_set = QuerySet(Model).filter((Q(user_id=user_id) | Q(permission_type='PUBLIC'))) query_params = {} if name is not None: - query_params['name__contains'] = name + query_params['name__icontains'] = name if self.data.get('model_type') is not None: query_params['model_type'] = self.data.get('model_type') if self.data.get('model_name') is not None: query_params['model_name'] = self.data.get('model_name') if self.data.get('provider') is not None: query_params['provider'] = self.data.get('provider') + if self.data.get('permission_type') is not None: + query_params['permission_type'] = self.data.get('permission_type') return [ {'id': str(model.id), 'provider': model.provider, 'name': model.name, 'model_type': model.model_type, - 'model_name': model.model_name, 'status': model.status, 'meta': model.meta} for model in + 'model_name': model.model_name, 'status': model.status, 'meta': model.meta, + 'permission_type': model.permission_type, 'user_id': model.user_id, 'username': model.user.username} + for model in model_query_set.filter(**query_params).order_by("-create_time")] class Edit(serializers.Serializer): - user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid(_('user id'))) + + name = serializers.CharField(required=False, max_length=64, + error_messages=ErrMessage.char(_("model name"))) - name = serializers.CharField(required=False, max_length=20, - error_messages=ErrMessage.char("模型名称")) + model_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_("model type"))) - model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型")) + permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_("permission type")), + validators=[ + validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), + message=_( + "permissions only supportPUBLIC|PRIVATE"), + code=500) + ]) - model_name = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型")) + model_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("model type"))) - credential = serializers.DictField(required=False, error_messages=ErrMessage.dict("认证信息")) + credential = serializers.DictField(required=False, + error_messages=ErrMessage.dict(_("certification information"))) def is_valid(self, model=None, raise_exception=False): super().is_valid(raise_exception=True) @@ -115,43 +159,55 @@ def is_valid(self, model=None, raise_exception=False): model_name = self.data.get( 'model_name') credential = self.data.get('credential') - + provider_handler = ModelProvideConstants[provider].value model_credential = ModelProvideConstants[provider].value.get_model_credential(model_type, model_name) source_model_credential = json.loads(rsa_long_decrypt(model.credential)) source_encryption_model_credential = model_credential.encryption_dict(source_model_credential) if credential is not None: for k in source_encryption_model_credential.keys(): - if credential[k] == source_encryption_model_credential[k]: + if k in credential and credential[k] == source_encryption_model_credential[k]: credential[k] = source_model_credential[k] - return credential, model_credential + return credential, model_credential, provider_handler class Create(serializers.Serializer): - user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("user id"))) + + name = serializers.CharField(required=True, max_length=64, error_messages=ErrMessage.char(_("model name"))) + + provider = serializers.CharField(required=True, error_messages=ErrMessage.char(_("provider"))) - name = serializers.CharField(required=True, max_length=20, error_messages=ErrMessage.char("模型名称")) + model_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("model type"))) - provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商")) + permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("permission type")), + validators=[ + validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), + message=_( + "permissions only supportPUBLIC|PRIVATE"), + code=500) + ]) - model_type = serializers.CharField(required=True, error_messages=ErrMessage.char("模型类型")) + model_name = serializers.CharField(required=True, error_messages=ErrMessage.char(_("model name"))) - model_name = serializers.CharField(required=True, error_messages=ErrMessage.char("基础模型")) + model_params_form = serializers.ListField(required=False, default=list, + error_messages=ErrMessage.char(_("parameter configuration"))) - credential = serializers.DictField(required=True, error_messages=ErrMessage.dict("认证信息")) + credential = serializers.DictField(required=True, + error_messages=ErrMessage.dict(_("certification information"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if QuerySet(Model).filter(user_id=self.data.get('user_id'), name=self.data.get('name')).exists(): - raise AppApiException(500, f'模型名称【{self.data.get("name")}】已存在') - # 校验模型认证数据 - ModelProvideConstants[self.data.get('provider')].value.get_model_credential(self.data.get('model_type'), - self.data.get( - 'model_name')).is_valid( - self.data.get('model_type'), - self.data.get('model_name'), - self.data.get('credential'), - raise_exception=True) + raise AppApiException(500, _('Model name【{model_name}】already exists').format( + model_name=self.data.get("name"))) + default_params = {item['field']: item['default_value'] for item in self.data.get('model_params_form')} + ModelProvideConstants[self.data.get('provider')].value.is_valid_credential(self.data.get('model_type'), + self.data.get('model_name'), + self.data.get('credential'), + default_params, + raise_exception=True + ) def insert(self, user_id, with_valid=False): status = Status.SUCCESS @@ -168,10 +224,14 @@ def insert(self, user_id, with_valid=False): provider = self.data.get('provider') model_type = self.data.get('model_type') model_name = self.data.get('model_name') + permission_type = self.data.get('permission_type') + model_params_form = self.data.get('model_params_form') model_credential_str = json.dumps(credential) model = Model(id=uuid.uuid1(), status=status, user_id=user_id, name=name, credential=rsa_long_encrypt(model_credential_str), - provider=provider, model_type=model_type, model_name=model_name) + provider=provider, model_type=model_type, model_name=model_name, + model_params_form=model_params_form, + permission_type=permission_type) model.save() if status == Status.DOWNLOAD: thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential)) @@ -187,12 +247,58 @@ def model_to_dict(model: Model): 'meta': model.meta, 'credential': ModelProvideConstants[model.provider].value.get_model_credential(model.model_type, model.model_name).encryption_dict( - credential)} + credential), + 'permission_type': model.permission_type} + + class ModelParams(serializers.Serializer): + id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) + + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id"))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + model = QuerySet(Model).filter(id=self.data.get("id")).first() + if model is None: + raise AppApiException(500, '模型不存在') + if model.permission_type == PermissionType.PRIVATE and self.data.get('user_id') != str(model.user_id): + raise AppApiException(500, '没有权限访问到此模型') + + def get_model_params(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + model_id = self.data.get('id') + model = QuerySet(Model).filter(id=model_id).first() + # 已经保存过的模型参数表单 + return model.model_params_form + + class ModelParamsForm(serializers.Serializer): + id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) + + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id"))) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + model = QuerySet(Model).filter(id=self.data.get("id")).first() + if model is None: + raise AppApiException(500, '模型不存在') + if model.permission_type == PermissionType.PRIVATE and self.data.get('user_id') != str(model.user_id): + raise AppApiException(500, '没有权限访问到此模型') + + def save_model_params_form(self, model_params_form, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + if model_params_form is None: + model_params_form = [] + model_id = self.data.get('id') + model = QuerySet(Model).filter(id=model_id).first() + model.model_params_form = model_params_form + model.save() + return True class Operate(serializers.Serializer): id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -207,21 +313,53 @@ def one(self, with_valid=False): return ModelSerializer.model_to_dict(model) def one_meta(self, with_valid=False): + model = None if with_valid: - self.is_valid(raise_exception=True) - model = QuerySet(Model).get(id=self.data.get('id'), user_id=self.data.get('user_id')) + super().is_valid(raise_exception=True) + model = QuerySet(Model).filter(id=self.data.get("id")).first() + if model is None: + raise AppApiException(500, _('Model does not exist')) + if model.permission_type == 'PRIVATE' and str(model.user_id) != str(self.data.get("user_id")): + raise Exception(_('No permission to use this model') + f"{model.name}") + if model is None: + model = QuerySet(Model).get(id=self.data.get('id')) return {'id': str(model.id), 'provider': model.provider, 'name': model.name, 'model_type': model.model_type, 'model_name': model.model_name, 'status': model.status, - 'meta': model.meta, } + 'meta': model.meta + } def delete(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) - application_list = QuerySet(Application).filter(model_id=self.data.get('id')).all() - if len(application_list) > 0: - raise AppApiException(500, f"该模型关联了{len(application_list)} 个应用,无法删除该模型。") - QuerySet(Model).filter(id=self.data.get('id')).delete() + model_id = self.data.get('id') + model = Model.objects.filter(id=model_id).first() + if not model: + # 模型不存在,直接返回或抛出异常 + raise AppApiException(500, "模型不存在") + if model.model_type == 'LLM': + application_count = Application.objects.filter(model_id=model_id).count() + if application_count > 0: + raise AppApiException(500, f"该模型关联了{application_count} 个应用,无法删除该模型。") + elif model.model_type == 'EMBEDDING': + dataset_count = DataSet.objects.filter(embedding_mode_id=model_id).count() + if dataset_count > 0: + raise AppApiException(500, f"该模型关联了{dataset_count} 个知识库,无法删除该模型。") + elif model.model_type == 'TTS': + dataset_count = Application.objects.filter(tts_model_id=model_id).count() + if dataset_count > 0: + raise AppApiException(500, f"该模型关联了{dataset_count} 个应用,无法删除该模型。") + elif model.model_type == 'STT': + dataset_count = Application.objects.filter(stt_model_id=model_id).count() + if dataset_count > 0: + raise AppApiException(500, f"该模型关联了{dataset_count} 个应用,无法删除该模型。") + model.delete() + return True + + def pause_download(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + QuerySet(Model).filter(id=self.data.get('id')).update(status=Status.PAUSE_DOWNLOAD) return True def edit(self, instance: Dict, user_id: str, with_valid=True): @@ -232,22 +370,25 @@ def edit(self, instance: Dict, user_id: str, with_valid=True): if model is None: raise AppApiException(500, '不存在的id') else: - credential, model_credential = ModelSerializer.Edit(data={**instance, 'user_id': user_id}).is_valid( + credential, model_credential, provider_handler = ModelSerializer.Edit( + data={**instance, 'user_id': user_id}).is_valid( model=model) try: model.status = Status.SUCCESS + default_params = {item['field']: item['default_value'] for item in model.model_params_form} # 校验模型认证数据 - model_credential.is_valid( - model.model_type, - instance.get("model_name"), - credential, - raise_exception=True) + provider_handler.is_valid_credential(model.model_type, + instance.get("model_name"), + credential, + default_params, + raise_exception=True) + except AppApiException as e: if e.code == ValidCode.model_not_fount: model.status = Status.DOWNLOAD else: raise e - update_keys = ['credential', 'name', 'model_type', 'model_name'] + update_keys = ['credential', 'name', 'model_type', 'model_name', 'permission_type'] for update_key in update_keys: if update_key in instance and instance.get(update_key) is not None: if update_key == 'credential': @@ -255,6 +396,8 @@ def edit(self, instance: Dict, user_id: str, with_valid=True): model.__setattr__(update_key, rsa_long_encrypt(model_credential_str)) else: model.__setattr__(update_key, instance.get(update_key)) + # 修改模型时候删除缓存 + ModelManage.delete_key(str(model.id)) model.save() if model.status == Status.DOWNLOAD: thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential)) @@ -263,7 +406,7 @@ def edit(self, instance: Dict, user_id: str, with_valid=True): class ProviderSerializer(serializers.Serializer): - provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商")) + provider = serializers.CharField(required=True, error_messages=ErrMessage.char(_("provider"))) method = serializers.CharField(required=True, error_messages=ErrMessage.char("执行函数名称")) diff --git a/apps/setting/serializers/system_setting.py b/apps/setting/serializers/system_setting.py index a66b15805f2..9f1525bbff3 100644 --- a/apps/setting/serializers/system_setting.py +++ b/apps/setting/serializers/system_setting.py @@ -13,6 +13,7 @@ from common.exception.app_exception import AppApiException from common.util.field_message import ErrMessage from setting.models.system_management import SystemSetting, SettingType +from django.utils.translation import gettext_lazy as _ class SystemSettingSerializer(serializers.Serializer): @@ -25,13 +26,13 @@ def one(): return system_setting.meta class Create(serializers.Serializer): - email_host = serializers.CharField(required=True, error_messages=ErrMessage.char("SMTP 主机")) - email_port = serializers.IntegerField(required=True, error_messages=ErrMessage.char("SMTP 端口")) - email_host_user = serializers.CharField(required=True, error_messages=ErrMessage.char("发件人邮箱")) - email_host_password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码")) - email_use_tls = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否开启TLS")) - email_use_ssl = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否开启SSL")) - from_email = serializers.EmailField(required=True, error_messages=ErrMessage.char("发送人邮箱")) + email_host = serializers.CharField(required=True, error_messages=ErrMessage.char(_('SMTP host'))) + email_port = serializers.IntegerField(required=True, error_messages=ErrMessage.char(_('SMTP port'))) + email_host_user = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Sender\'s email'))) + email_host_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Password'))) + email_use_tls = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_('Whether to enable TLS'))) + email_use_ssl = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_('Whether to enable SSL'))) + from_email = serializers.EmailField(required=True, error_messages=ErrMessage.char(_('Sender\'s email'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -45,7 +46,7 @@ def is_valid(self, *, raise_exception=False): self.data.get("email_use_ssl") ).open() except Exception as e: - raise AppApiException(1004, "邮箱校验失败") + raise AppApiException(1004, _('Email verification failed')) def update_or_save(self): self.is_valid(raise_exception=True) diff --git a/apps/setting/serializers/team_serializers.py b/apps/setting/serializers/team_serializers.py index 46266bb35db..0b264e88b29 100644 --- a/apps/setting/serializers/team_serializers.py +++ b/apps/setting/serializers/team_serializers.py @@ -29,6 +29,7 @@ from smartdoc.conf import PROJECT_DIR from users.models.user import User from users.serializers.user_serializers import UserSerializer +from django.utils.translation import gettext_lazy as _ user_cache = cache.caches['user_cache'] @@ -38,39 +39,39 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['id', 'username', 'email', 'role', 'is_active', 'team_id', 'member_id'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用"), - 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title="团队id", description="团队id"), - 'member_id': openapi.Schema(type=openapi.TYPE_STRING, title="成员id", description="成员id"), + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'), description=_('Is active')), + 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')), + 'member_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('member id'), description=_('member id')), } ) class TeamMemberPermissionOperate(ApiMixin, serializers.Serializer): - USE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("使用")) - MANAGE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("管理")) + USE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('use'))) + MANAGE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('manage'))) def get_request_body_api(self): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="类型", - description="操作权限USE,MANAGE权限", + title=_('type'), + description=_('Operation permissions USE, MANAGE permissions'), properties={ 'USE': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="使用权限", - description="使用权限 True|False"), + title=_('use permission'), + description=_('use permission True|False')), 'MANAGE': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="管理权限", - description="管理权限 True|False") + title=_('manage permission'), + description=_('manage permission True|False')) } ) class UpdateTeamMemberItemPermissionSerializer(ApiMixin, serializers.Serializer): - target_id = serializers.CharField(required=True, error_messages=ErrMessage.char("目标id")) - type = serializers.CharField(required=True, error_messages=ErrMessage.char("目标类型")) + target_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('target id'))) + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type'))) operate = TeamMemberPermissionOperate(required=True, many=False) def get_request_body_api(self): @@ -78,10 +79,10 @@ def get_request_body_api(self): type=openapi.TYPE_OBJECT, required=['id', 'type', 'operate'], properties={ - 'target_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库/应用id", - description="知识库或者应用的id"), + 'target_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id/application id'), + description=_('dataset id/application id')), 'type': openapi.Schema(type=openapi.TYPE_STRING, - title="类型", + title=_('type'), description="DATASET|APPLICATION", ), 'operate': TeamMemberPermissionOperate().get_request_body_api() @@ -100,7 +101,8 @@ def is_valid(self, *, user_id=None): os.path.join(PROJECT_DIR, "apps", "setting", 'sql', 'check_member_permission_target_exists.sql')), [json.dumps(permission_list), user_id, user_id]) if illegal_target_id_list is not None and len(illegal_target_id_list) > 0: - raise AppApiException(500, '不存在的 应用|知识库id[' + str(illegal_target_id_list) + ']') + raise AppApiException(500, + _('Non-existent application|knowledge base id[') + str(illegal_target_id_list) + ']') def update_or_save(self, member_id: str): team_member_permission_list = self.data.get("team_member_permission_list") @@ -134,8 +136,8 @@ def get_request_body_api(self): required=['id'], properties={ 'team_member_permission_list': - openapi.Schema(type=openapi.TYPE_ARRAY, title="权限数据", - description="权限数据", + openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Permission data'), + description=_('Permission data'), items=UpdateTeamMemberItemPermissionSerializer().get_request_body_api() ), } @@ -143,7 +145,7 @@ def get_request_body_api(self): class TeamMemberSerializer(ApiMixin, serializers.Serializer): - team_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("团队id")) + team_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('team id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -152,8 +154,8 @@ def is_valid(self, *, raise_exception=False): def get_bach_request_body_api(): return openapi.Schema( type=openapi.TYPE_ARRAY, - title="用户id列表", - description="用户id列表", + title=_('user id list'), + description=_('user id list'), items=openapi.Schema(type=openapi.TYPE_STRING) ) @@ -163,12 +165,30 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['username_or_email'], properties={ - 'username_or_email': openapi.Schema(type=openapi.TYPE_STRING, title="用户名或者邮箱", - description="用户名或者邮箱"), + 'username_or_email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username or email'), + description=_('Username or email')), } ) + @staticmethod + def get_response_body_api(): + return openapi.Schema( + type=openapi.TYPE_OBJECT, + properties={ + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'), + description=_('Is active')), + 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')), + 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('member type'), + description=_('member type manage|member')), + } + ) + @transaction.atomic def batch_add_member(self, user_id_list: List[str], with_valid=True): """ @@ -187,18 +207,20 @@ def batch_add_member(self, user_id_list: List[str], with_valid=True): create_team_member_list = [ self.to_member_model(add_user_id, team_member_user_id_list, use_user_id_list, team_id) for add_user_id in user_id_list] - QuerySet(TeamMember).bulk_create(create_team_member_list) if len(create_team_member_list) > 0 else None + QuerySet(TeamMember).bulk_create( + [team_member for team_member in create_team_member_list if team_member is not None]) if len( + create_team_member_list) > 0 else None return TeamMemberSerializer( data={'team_id': self.data.get("team_id")}).list_member() def to_member_model(self, add_user_id, team_member_user_id_list, use_user_id_list, user_id): if use_user_id_list.__contains__(add_user_id): if team_member_user_id_list.__contains__(add_user_id) or user_id == add_user_id: - raise AppApiException(500, "团队中已存在当前成员,不要重复添加") + return None else: return TeamMember(team_id=self.data.get("team_id"), user_id=add_user_id) else: - raise AppApiException(500, "不存在的用户") + return None def add_member(self, username_or_email: str, with_valid=True): """ @@ -210,14 +232,14 @@ def add_member(self, username_or_email: str, with_valid=True): if with_valid: self.is_valid(raise_exception=True) if username_or_email is None: - raise AppApiException(500, "用户名或者邮箱必填") + raise AppApiException(500, _('Username or email is required')) user = QuerySet(User).filter( Q(username=username_or_email) | Q(email=username_or_email)).first() if user is None: - raise AppApiException(500, "不存在的用户") + raise AppApiException(500, _('User does not exist')) if QuerySet(TeamMember).filter(Q(team_id=self.data.get('team_id')) & Q(user=user)).exists() or self.data.get( "team_id") == str(user.id): - raise AppApiException(500, "团队中已存在当前成员,不要重复添加") + raise AppApiException(500, _('The current members already exist in the team, do not add them again.')) TeamMember(team_id=self.data.get("team_id"), user=user).save() return self.list_member(with_valid=False) @@ -241,22 +263,22 @@ def list_member(self, with_valid=True): def get_response_body_api(self): return get_api_response(openapi.Schema( - type=openapi.TYPE_ARRAY, title="成员列表", description="成员列表", + type=openapi.TYPE_ARRAY, title=_('member list'), description=_('member list'), items=UserSerializer().get_response_body_api() )) class Operate(ApiMixin, serializers.Serializer): # 团队 成员id - member_id = serializers.CharField(required=True, error_messages=ErrMessage.char("成员id")) + member_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('member id'))) # 团队id - team_id = serializers.CharField(required=True, error_messages=ErrMessage.char("团队id")) + team_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('team id'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) if self.data.get('member_id') != 'root' and not QuerySet(TeamMember).filter( team_id=self.data.get('team_id'), id=self.data.get('member_id')).exists(): - raise AppApiException(500, "不存在的成员,请先添加成员") + raise AppApiException(500, _('The member does not exist, please add a member first')) return True @@ -290,7 +312,7 @@ def edit(self, member_permission: Dict): self.is_valid(raise_exception=True) member_id = self.data.get("member_id") if member_id == 'root': - raise AppApiException(500, "管理员权限不允许修改") + raise AppApiException(500, _('Administrator rights do not allow modification')) s = UpdateTeamMemberPermissionSerializer(data=member_permission) s.is_valid(user_id=self.data.get("team_id")) s.update_or_save(member_id) @@ -304,7 +326,7 @@ def delete(self): self.is_valid(raise_exception=True) member_id = self.data.get("member_id") if member_id == 'root': - raise AppApiException(500, "无法移除团队管理员") + raise AppApiException(500, _('Unable to remove team admin')) # 删除成员权限 QuerySet(TeamMemberPermission).filter(member_id=member_id).delete() # 删除成员 @@ -317,4 +339,4 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='团队成员id')] + description=_('member id')), ] diff --git a/apps/setting/serializers/valid_serializers.py b/apps/setting/serializers/valid_serializers.py new file mode 100644 index 00000000000..1ddd393b51e --- /dev/null +++ b/apps/setting/serializers/valid_serializers.py @@ -0,0 +1,55 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: valid_serializers.py + @date:2024/7/8 18:00 + @desc: +""" +import re + +from django.core import validators +from django.db.models import QuerySet +from rest_framework import serializers + +from application.models import Application +from common.exception.app_exception import AppApiException +from common.models.db_model_manage import DBModelManage +from common.util.field_message import ErrMessage +from dataset.models import DataSet +from users.models import User +from django.utils.translation import gettext_lazy as _ + +model_message_dict = { + 'dataset': {'model': DataSet, 'count': 50, + 'message': _( + 'The community version supports up to 50 knowledge bases. If you need more knowledge bases, please contact us (https://fit2cloud.com/).')}, + 'application': {'model': Application, 'count': 5, + 'message': _( + 'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).')}, + 'user': {'model': User, 'count': 2, + 'message': _( + 'The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).')} +} + + +class ValidSerializer(serializers.Serializer): + valid_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')), validators=[ + validators.RegexValidator(regex=re.compile("^application|dataset|user$"), + message="类型只支持:application|dataset|user", code=500) + ]) + valid_count = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_('check quantity'))) + + def valid(self, is_valid=True): + if is_valid: + self.is_valid(raise_exception=True) + model_value = model_message_dict.get(self.data.get('valid_type')) + xpack_cache = DBModelManage.get_model('xpack_cache') + is_license_valid = xpack_cache.get('XPACK_LICENSE_IS_VALID', False) if xpack_cache is not None else False + if not is_license_valid: + if self.data.get('valid_count') != model_value.get('count'): + raise AppApiException(400, model_value.get('message')) + if QuerySet( + model_value.get('model')).count() >= model_value.get('count'): + raise AppApiException(400, model_value.get('message')) + return True diff --git a/apps/setting/sql/get_member_permission.sql b/apps/setting/sql/get_member_permission.sql index f6b2d953fcc..17edd80ad7f 100644 --- a/apps/setting/sql/get_member_permission.sql +++ b/apps/setting/sql/get_member_permission.sql @@ -8,7 +8,8 @@ FROM "id", "name", 'DATASET' AS "type", - user_id + user_id, + "type" AS "icon" FROM dataset WHERE @@ -17,7 +18,8 @@ FROM "id", "name", 'APPLICATION' AS "type", - user_id + user_id, + "icon" AS "icon" FROM application WHERE diff --git a/apps/setting/swagger_api/provide_api.py b/apps/setting/swagger_api/provide_api.py index f68ac5be4d1..263b6c245c7 100644 --- a/apps/setting/swagger_api/provide_api.py +++ b/apps/setting/swagger_api/provide_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class ModelQueryApi(ApiMixin): @@ -18,20 +19,20 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='模型名称'), + description=_('name')), openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='模型类型'), + description=_('model type')), openapi.Parameter(name='model_name', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='基础模型名称'), + description=_('model name')), openapi.Parameter(name='provider', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='供应名称') + description=_('provider')), ] @@ -39,22 +40,25 @@ class ModelEditApi(ApiMixin): @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="调用函数所需要的参数", - description="调用函数所需要的参数", + title=_('parameters required to call the function'), + description=_('parameters required to call the function'), required=['provide', 'model_info'], properties={ 'name': openapi.Schema(type=openapi.TYPE_STRING, - title="模型名称", - description="模型名称"), + title=_('name'), + description=_('name')), 'model_type': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model type'), + description=_('model type')), 'model_name': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model name'), + description=_('model name')), + 'provider': openapi.Schema(type=openapi.TYPE_STRING, + title=_('provider'), + description=_('provider')), 'credential': openapi.Schema(type=openapi.TYPE_OBJECT, - title="模型证书信息", - description="模型证书信息") + title=_('model certificate information'), + description=_('model certificate information')) } ) @@ -64,25 +68,28 @@ class ModelCreateApi(ApiMixin): @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="调用函数所需要的参数", - description="调用函数所需要的参数", + title=_('parameters required to call the function'), + description=_('parameters required to call the function'), required=['provide', 'model_info'], properties={ 'name': openapi.Schema(type=openapi.TYPE_STRING, - title="模型名称", - description="模型名称"), + title=_('name'), + description=_('name')), 'provider': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('provider'), + description=_('provider')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), + description="PUBLIC|PRIVATE"), 'model_type': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model type'), + description=_('model type')), 'model_name': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model name'), + description=_('model name')), 'credential': openapi.Schema(type=openapi.TYPE_OBJECT, - title="模型证书信息", - description="模型证书信息") + title=_('model certificate information'), + description=_('model certificate information')), + } ) @@ -95,7 +102,7 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='供应名称'), + description=_('provider')), ] @staticmethod @@ -104,10 +111,10 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['key', 'value'], properties={ - 'key': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型描述", - description="模型类型描述", default="大语言模型"), - 'value': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型值", - description="模型类型值", default="LLM"), + 'key': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type description'), + description=_('model type description'), default=_('large language model')), + 'value': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type value'), + description=_('model type value'), default="LLM"), } ) @@ -119,12 +126,12 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='供应名称'), + description=_('provider')), openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='模型类型'), + description=_('model type')), ] @staticmethod @@ -133,12 +140,12 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['name', 'desc', 'model_type'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="模型名称", - description="模型名称", default="模型名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="模型描述", - description="模型描述", default="xxx模型"), - 'model_type': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型值", - description="模型类型值", default="LLM"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('name'), + description=_('name'), default=_('name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('model description'), + description=_('model description')), + 'model_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type value'), + description=_('model type value'), default="LLM"), } ) @@ -150,17 +157,17 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='供应名称'), + description=_('provider')), openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='模型类型'), + description=_('model type')), openapi.Parameter(name='model_name', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='模型名称'), + description=_('model name')), ] @staticmethod @@ -169,17 +176,17 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='供应商'), + description=_('provider')), openapi.Parameter(name='method', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='需要执行的函数'), + description=_('function that needs to be executed')), ] @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="调用函数所需要的参数", - description="调用函数所需要的参数", + title=_('parameters required to call the function'), + description=_('parameters required to call the function'), ) diff --git a/apps/setting/swagger_api/system_setting.py b/apps/setting/swagger_api/system_setting.py index 1246ff27d3a..282c20d206f 100644 --- a/apps/setting/swagger_api/system_setting.py +++ b/apps/setting/swagger_api/system_setting.py @@ -9,69 +9,70 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class SystemSettingEmailApi(ApiMixin): @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="邮箱相关参数", - description="邮箱相关参数", + title=_('Email related parameters'), + description=_('Email related parameters'), required=['email_host', 'email_port', 'email_host_user', 'email_host_password', 'email_use_tls', 'email_use_ssl', 'from_email'], properties={ 'email_host': openapi.Schema(type=openapi.TYPE_STRING, - title="SMTP 主机", - description="SMTP 主机"), + title=_('SMTP host'), + description=_('SMTP host')), 'email_port': openapi.Schema(type=openapi.TYPE_NUMBER, - title="SMTP 端口", - description="SMTP 端口"), + title=_('SMTP port'), + description=_('SMTP port')), 'email_host_user': openapi.Schema(type=openapi.TYPE_STRING, - title="发件人邮箱", - description="发件人邮箱"), + title=_('Sender\'s email'), + description=_('Sender\'s email')), 'email_host_password': openapi.Schema(type=openapi.TYPE_STRING, - title="密码", - description="密码"), + title=_('Password'), + description=_('Password')), 'email_use_tls': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启TLS", - description="是否开启TLS"), + title=_('Whether to enable TLS'), + description=_('Whether to enable TLS')), 'email_use_ssl': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启SSL", - description="是否开启SSL"), + title=_('Whether to enable SSL'), + description=_('Whether to enable SSL')), 'from_email': openapi.Schema(type=openapi.TYPE_STRING, - title="发送人邮箱", - description="发送人邮箱") + title=_('Sender\'s email'), + description=_('Sender\'s email')) } ) @staticmethod def get_response_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="邮箱相关参数", - description="邮箱相关参数", + title=_('Email related parameters'), + description=_('Email related parameters'), required=['email_host', 'email_port', 'email_host_user', 'email_host_password', 'email_use_tls', 'email_use_ssl', 'from_email'], properties={ 'email_host': openapi.Schema(type=openapi.TYPE_STRING, - title="SMTP 主机", - description="SMTP 主机"), + title=_('SMTP host'), + description=_('SMTP host')), 'email_port': openapi.Schema(type=openapi.TYPE_NUMBER, - title="SMTP 端口", - description="SMTP 端口"), + title=_('SMTP port'), + description=_('SMTP port')), 'email_host_user': openapi.Schema(type=openapi.TYPE_STRING, - title="发件人邮箱", - description="发件人邮箱"), + title=_('Sender\'s email'), + description=_('Sender\'s email')), 'email_host_password': openapi.Schema(type=openapi.TYPE_STRING, - title="密码", - description="密码"), + title=_('Password'), + description=_('Password')), 'email_use_tls': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启TLS", - description="是否开启TLS"), + title=_('Whether to enable TLS'), + description=_('Whether to enable TLS')), 'email_use_ssl': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启SSL", - description="是否开启SSL"), + title=_('Whether to enable SSL'), + description=_('Whether to enable SSL')), 'from_email': openapi.Schema(type=openapi.TYPE_STRING, - title="发送人邮箱", - description="发送人邮箱") + title=_('Sender\'s email'), + description=_('Sender\'s email')) } ) diff --git a/apps/setting/swagger_api/valid_api.py b/apps/setting/swagger_api/valid_api.py new file mode 100644 index 00000000000..f5bc5c9a294 --- /dev/null +++ b/apps/setting/swagger_api/valid_api.py @@ -0,0 +1,28 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: valid_api.py + @date:2024/7/8 17:52 + @desc: +""" +from drf_yasg import openapi + +from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ + + +class ValidApi(ApiMixin): + @staticmethod + def get_request_params_api(): + return [openapi.Parameter(name='valid_type', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('Verification type: application|dataset|user')), + openapi.Parameter(name='valid_count', + in_=openapi.IN_PATH, + type=openapi.TYPE_STRING, + required=True, + description=_('check quantity')) + ] diff --git a/apps/setting/urls.py b/apps/setting/urls.py index 42fea74ec7d..73fe9ba12db 100644 --- a/apps/setting/urls.py +++ b/apps/setting/urls.py @@ -1,3 +1,5 @@ +import os + from django.urls import path from . import views @@ -12,11 +14,26 @@ path('provider/model_type_list', views.Provide.ModelTypeList.as_view(), name="provider/model_type_list"), path('provider/model_list', views.Provide.ModelList.as_view(), name="provider/model_name_list"), + path('provider/model_params_form', views.Provide.ModelParamsForm.as_view(), + name="provider/model_params_form"), path('provider/model_form', views.Provide.ModelForm.as_view(), name="provider/model_form"), path('model', views.Model.as_view(), name='model'), + path('model//model_params_form', views.Model.ModelParamsForm.as_view(), + name='model/model_params_form'), path('model/', views.Model.Operate.as_view(), name='model/operate'), + path('model//pause_download', views.Model.PauseDownload.as_view(), name='model/operate'), path('model//meta', views.Model.ModelMeta.as_view(), name='model/operate/meta'), - path('email_setting', views.SystemSetting.Email.as_view(), name='email_setting') + path('email_setting', views.SystemSetting.Email.as_view(), name='email_setting'), + path('valid//', views.Valid.as_view()) ] +if os.environ.get('SERVER_NAME', 'web') == 'local_model': + urlpatterns += [ + path('model//embed_documents', views.ModelApply.EmbedDocuments.as_view(), + name='model/embed_documents'), + path('model//embed_query', views.ModelApply.EmbedQuery.as_view(), + name='model/embed_query'), + path('model//compress_documents', views.ModelApply.CompressDocuments.as_view(), + name='model/embed_query'), + ] diff --git a/apps/setting/views/Team.py b/apps/setting/views/Team.py index 71710e3d67b..9c2ade72fbc 100644 --- a/apps/setting/views/Team.py +++ b/apps/setting/views/Team.py @@ -13,29 +13,36 @@ from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import PermissionConstants +from common.log.log import log from common.response import result from setting.serializers.team_serializers import TeamMemberSerializer, get_response_body_api, \ UpdateTeamMemberPermissionSerializer +from django.utils.translation import gettext_lazy as _ + +from setting.views.common import get_member_operation_object, get_member_operation_object_batch class TeamMember(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取团队成员列表", - operation_id="获取团员成员列表", + @swagger_auto_schema(operation_summary=_('Get a list of team members'), + operation_id=_('Get a list of team members'), responses=result.get_api_response(get_response_body_api()), - tags=["团队"]) + tags=[_('Team')]) @has_permissions(PermissionConstants.TEAM_READ) def get(self, request: Request): return result.success(TeamMemberSerializer(data={'team_id': str(request.user.id)}).list_member()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="添加成员", - operation_id="添加成员", + @swagger_auto_schema(operation_summary=_('Add member'), + operation_id=_('Add member'), request_body=TeamMemberSerializer().get_request_body_api(), - tags=["团队"]) + responses=result.get_default_response(), + tags=[_('Team')]) @has_permissions(PermissionConstants.TEAM_CREATE) + @log(menu='Team', operate='Add member', + get_operation_object=lambda r, k: {'name': r.data.get('username_or_email')}) def post(self, request: Request): team = TeamMemberSerializer(data={'team_id': str(request.user.id)}) return result.success((team.add_member(**request.data))) @@ -44,11 +51,14 @@ class Batch(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="批量添加成员", - operation_id="批量添加成员", + @swagger_auto_schema(operation_summary=_('Add members in batches'), + operation_id=_('Add members in batches'), request_body=TeamMemberSerializer.get_bach_request_body_api(), - tags=["团队"]) + responses=result.get_api_array_response(TeamMemberSerializer.get_response_body_api()), + tags=[_('Team')]) @has_permissions(PermissionConstants.TEAM_CREATE) + @log(menu='Team', operate='Add members in batches', + get_operation_object=lambda r, k: get_member_operation_object_batch(r.data)) def post(self, request: Request): return result.success( TeamMemberSerializer(data={'team_id': request.user.id}).batch_add_member(request.data)) @@ -57,34 +67,41 @@ class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取团队成员权限", - operation_id="获取团队成员权限", + @swagger_auto_schema(operation_summary=_('Get team member permissions'), + operation_id=_('Get team member permissions'), manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(), - tags=["团队"]) + tags=[_('Team')]) @has_permissions(PermissionConstants.TEAM_READ) def get(self, request: Request, member_id: str): return result.success(TeamMemberSerializer.Operate( data={'member_id': member_id, 'team_id': str(request.user.id)}).list_member_permission()) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改团队成员权限", - operation_id="修改团队成员权限", + @swagger_auto_schema(operation_summary=_('Update team member permissions'), + operation_id=_('Update team member permissions'), request_body=UpdateTeamMemberPermissionSerializer().get_request_body_api(), + responses=result.get_default_response(), manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(), - tags=["团队"] + tags=[_('Team')] ) @has_permissions(PermissionConstants.TEAM_EDIT) + @log(menu='Team', operate='Update team member permissions', + get_operation_object=lambda r, k: get_member_operation_object(k.get('member_id')) + ) def put(self, request: Request, member_id: str): return result.success(TeamMemberSerializer.Operate( data={'member_id': member_id, 'team_id': str(request.user.id)}).edit(request.data)) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="移除成员", - operation_id="移除成员", + @swagger_auto_schema(operation_summary=_('Remove member'), + operation_id=_('Remove member'), manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(), - tags=["团队"] + responses=result.get_default_response(), + tags=[_('Team')] ) @has_permissions(PermissionConstants.TEAM_DELETE) + @log(menu='Team', operate='Remove member', + get_operation_object=lambda r, k: get_member_operation_object(k.get('member_id'))) def delete(self, request: Request, member_id: str): return result.success(TeamMemberSerializer.Operate( data={'member_id': member_id, 'team_id': str(request.user.id)}).delete()) diff --git a/apps/setting/views/__init__.py b/apps/setting/views/__init__.py index 0885ef978f8..4fe505635c6 100644 --- a/apps/setting/views/__init__.py +++ b/apps/setting/views/__init__.py @@ -9,3 +9,5 @@ from .Team import * from .model import * from .system_setting import * +from .valid import * +from .model_apply import * diff --git a/apps/setting/views/common.py b/apps/setting/views/common.py new file mode 100644 index 00000000000..ccc68e8ffa6 --- /dev/null +++ b/apps/setting/views/common.py @@ -0,0 +1,78 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: common.py + @date:2025/3/25 16:26 + @desc: +""" +from django.db.models import QuerySet + +from common.util.common import encryption +from setting.models import Model +from users.models import User + + +def get_model_operation_object(model_id): + model_model = QuerySet(model=Model).filter(id=model_id).first() + if model_model is not None: + return { + "name": model_model.name + } + return {} + + +def get_member_operation_object(member_id): + user_model = QuerySet(model=User).filter(id=member_id).first() + if user_model is not None: + return { + "name": user_model.username + } + return {} + + +def get_member_operation_object_batch(member_id_list): + user_model_list = QuerySet(model=User).filter(id__in=member_id_list) + if user_model_list is not None: + return { + "name": f'[{",".join([user.username for user in user_model_list])}]', + "user_list": [{'name': user.username} for user in user_model_list] + } + return {} + + +def encryption_str(_value): + if isinstance(_value, str): + return encryption(_value) + return _value + + +def encryption_credential(credential): + if isinstance(credential, dict): + return {key: encryption_str(credential.get(key)) for key in credential} + return credential + + +def get_edit_model_details(request): + path = request.path + body = request.data + query = request.query_params + credential = body.get('credential', {}) + credential_encryption_ed = encryption_credential(credential) + return { + 'path': path, + 'body': {**body, 'credential': credential_encryption_ed}, + 'query': query + } + + +def get_email_details(request): + path = request.path + body = request.data + query = request.query_params + email_host_password = body.get('email_host_password', '') + return { + 'path': path, + 'body': {**body, 'email_host_password': encryption_str(email_host_password)}, + 'query': query + } diff --git a/apps/setting/views/model.py b/apps/setting/views/model.py index 7ba0304fcc0..4fe13d1d95f 100644 --- a/apps/setting/views/model.py +++ b/apps/setting/views/model.py @@ -13,32 +13,42 @@ from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import PermissionConstants +from common.log.log import log from common.response import result from common.util.common import query_params_to_single_dict from setting.models_provider.constants.model_provider_constants import ModelProvideConstants -from setting.serializers.provider_serializers import ProviderSerializer, ModelSerializer +from setting.serializers.provider_serializers import ProviderSerializer, ModelSerializer, \ + get_default_model_params_setting from setting.swagger_api.provide_api import ProvideApi, ModelCreateApi, ModelQueryApi, ModelEditApi +from django.utils.translation import gettext_lazy as _ + +from setting.views.common import get_model_operation_object, get_edit_model_details class Model(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建模型", - operation_id="创建模型", - request_body=ModelCreateApi.get_request_body_api() - , tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Create model'), + operation_id=_('Create model'), + request_body=ModelCreateApi.get_request_body_api(), + manual_parameters=result.get_api_response(ModelCreateApi.get_request_body_api()) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) + @log(menu='model', operate='Create model', + get_operation_object=lambda r, k: {'name': r.data.get('name')}, + get_details=get_edit_model_details) def post(self, request: Request): return result.success( ModelSerializer.Create(data={**request.data, 'user_id': str(request.user.id)}).insert(request.user.id, with_valid=True)) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="下载模型,只试用与Ollama平台", - operation_id="下载模型,只试用与Ollama平台", - request_body=ModelCreateApi.get_request_body_api() - , tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Download model, trial only with Ollama platform'), + operation_id=_('Download model, trial only with Ollama platform'), + request_body=ModelCreateApi.get_request_body_api(), + responses=result.get_api_response(ModelCreateApi.get_request_body_api()) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) def put(self, request: Request): return result.success( @@ -46,10 +56,10 @@ def put(self, request: Request): with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型列表", - operation_id="获取模型列表", + @swagger_auto_schema(operation_summary=_('Get model list'), + operation_id=_('Get model list'), manual_parameters=ModelQueryApi.get_request_params_api() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): return result.success( @@ -61,42 +71,88 @@ class ModelMeta(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="查询模型meta信息,该接口不携带认证信息", - operation_id="查询模型meta信息,该接口不携带认证信息", - tags=["模型"]) + @swagger_auto_schema(operation_summary=_( + 'Query model meta information, this interface does not carry authentication information'), + operation_id=_( + 'Query model meta information, this interface does not carry authentication information'), + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request, model_id: str): return result.success( ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).one_meta(with_valid=True)) + class PauseDownload(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Pause model download'), + operation_id=_('Pause model download'), + tags=[_('model')]) + @has_permissions(PermissionConstants.MODEL_CREATE) + def put(self, request: Request, model_id: str): + return result.success( + ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).pause_download()) + + class ModelParamsForm(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get model parameter form'), + operation_id=_('Get model parameter form'), + manual_parameters=ProvideApi.ModelForm.get_request_params_api(), + tags=[_('model')]) + @has_permissions(PermissionConstants.MODEL_READ) + def get(self, request: Request, model_id: str): + return result.success( + ModelSerializer.ModelParams(data={'id': model_id, 'user_id': request.user.id}).get_model_params()) + + @action(methods=['PUT'], detail=False) + @swagger_auto_schema(operation_summary=_('Save model parameter form'), + operation_id=_('Save model parameter form'), + manual_parameters=ProvideApi.ModelForm.get_request_params_api(), + tags=[_('model')]) + @has_permissions(PermissionConstants.MODEL_READ) + @log(menu='model', operate='Save model parameter form', + get_operation_object=lambda r, k: get_model_operation_object(k.get('model_id'))) + def put(self, request: Request, model_id: str): + return result.success( + ModelSerializer.ModelParamsForm(data={'id': model_id, 'user_id': request.user.id}) + .save_model_params_form(request.data)) + class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改模型", - operation_id="修改模型", - request_body=ModelEditApi.get_request_body_api() - , tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Update model'), + operation_id=_('Update model'), + request_body=ModelEditApi.get_request_body_api(), + responses=result.get_api_response(ModelEditApi.get_request_body_api()) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) + @log(menu='model', operate='Update model', + get_operation_object=lambda r, k: get_model_operation_object(k.get('model_id')), + get_details=get_edit_model_details) def put(self, request: Request, model_id: str): return result.success( ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).edit(request.data, str(request.user.id))) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除模型", - operation_id="删除模型", + @swagger_auto_schema(operation_summary=_('Delete model'), + operation_id=_('Delete model'), responses=result.get_default_response() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_DELETE) + @log(menu='model', operate='Delete model', + get_operation_object=lambda r, k: get_model_operation_object(k.get('model_id'))) def delete(self, request: Request, model_id: str): return result.success( ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).delete()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="查询模型详细信息", - operation_id="查询模型详细信息", - tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Query model details'), + operation_id=_('Query model details'), + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request, model_id: str): return result.success( @@ -110,22 +166,32 @@ class Exec(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="调用供应商函数,获取表单数据", - operation_id="调用供应商函数,获取表单数据", + @swagger_auto_schema(operation_summary=_('Call the supplier function to obtain form data'), + operation_id=_('Call the supplier function to obtain form data'), manual_parameters=ProvideApi.get_request_params_api(), - request_body=ProvideApi.get_request_body_api() - , tags=["模型"]) + request_body=ProvideApi.get_request_body_api(), + responses=result.get_api_response(ProvideApi.get_request_body_api()) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) + @log(menu='model', operate='Call the supplier function to obtain form data') def post(self, request: Request, provider: str, method: str): return result.success( ProviderSerializer(data={'provider': provider, 'method': method}).exec(request.data, with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型供应商数据", - operation_id="获取模型供应商列表" - , tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Get a list of model suppliers'), + operation_id=_('Get a list of model suppliers') + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): + model_type = request.query_params.get('model_type') + if model_type: + providers = [] + for key in ModelProvideConstants.__members__: + if len([item for item in ModelProvideConstants[key].value.get_model_type_list() if + item['value'] == model_type]) > 0: + providers.append(ModelProvideConstants[key].value.get_model_provide_info().to_dict()) + return result.success(providers) return result.success( [ModelProvideConstants[key].value.get_model_provide_info().to_dict() for key in ModelProvideConstants.__members__]) @@ -134,11 +200,11 @@ class ModelTypeList(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型类型列表", - operation_id="获取模型类型类型列表", + @swagger_auto_schema(operation_summary=_('Get a list of model types'), + operation_id=_('Get a list of model types'), manual_parameters=ProvideApi.ModelTypeList.get_request_params_api(), responses=result.get_api_array_response(ProvideApi.ModelTypeList.get_response_body_api()) - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): provider = request.query_params.get('provider') @@ -148,11 +214,11 @@ class ModelList(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型列表", - operation_id="获取模型创建表单", + @swagger_auto_schema(operation_summary=_('Get the model creation form'), + operation_id=_('Get the model creation form'), manual_parameters=ProvideApi.ModelList.get_request_params_api(), responses=result.get_api_array_response(ProvideApi.ModelList.get_response_body_api()) - , tags=["模型"] + , tags=[_('model')] ) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): @@ -163,14 +229,32 @@ def get(self, request: Request): ModelProvideConstants[provider].value.get_model_list( model_type)) + class ModelParamsForm(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get model default parameters'), + operation_id=_('Get the model creation form'), + manual_parameters=ProvideApi.ModelList.get_request_params_api(), + responses=result.get_api_array_response(ProvideApi.ModelList.get_response_body_api()) + , tags=[_('model')] + ) + @has_permissions(PermissionConstants.MODEL_READ) + def get(self, request: Request): + provider = request.query_params.get('provider') + model_type = request.query_params.get('model_type') + model_name = request.query_params.get('model_name') + + return result.success(get_default_model_params_setting(provider, model_type, model_name)) + class ModelForm(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型创建表单", - operation_id="获取模型创建表单", + @swagger_auto_schema(operation_summary=_('Get the model creation form'), + operation_id=_('Get the model creation form'), manual_parameters=ProvideApi.ModelForm.get_request_params_api(), - tags=["模型"]) + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): provider = request.query_params.get('provider') diff --git a/apps/setting/views/model_apply.py b/apps/setting/views/model_apply.py new file mode 100644 index 00000000000..73fb699f039 --- /dev/null +++ b/apps/setting/views/model_apply.py @@ -0,0 +1,49 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: model_apply.py + @date:2024/8/20 20:38 + @desc: +""" +from urllib.request import Request + +from drf_yasg.utils import swagger_auto_schema +from rest_framework.decorators import action +from rest_framework.views import APIView + +from common.response import result +from setting.serializers.model_apply_serializers import ModelApplySerializers +from django.utils.translation import gettext_lazy as _ + + +class ModelApply(APIView): + class EmbedDocuments(APIView): + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Vectorization documentation'), + operation_id=_('Vectorization documentation'), + responses=result.get_default_response(), + tags=[_('model')]) + def post(self, request: Request, model_id): + return result.success( + ModelApplySerializers(data={'model_id': model_id}).embed_documents(request.data)) + + class EmbedQuery(APIView): + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Vectorization documentation'), + operation_id=_('Vectorization documentation'), + responses=result.get_default_response(), + tags=[_('model')]) + def post(self, request: Request, model_id): + return result.success( + ModelApplySerializers(data={'model_id': model_id}).embed_query(request.data)) + + class CompressDocuments(APIView): + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_('Reorder documents'), + operation_id=_('Reorder documents'), + responses=result.get_default_response(), + tags=[_('model')]) + def post(self, request: Request, model_id): + return result.success( + ModelApplySerializers(data={'model_id': model_id}).compress_documents(request.data)) diff --git a/apps/setting/views/system_setting.py b/apps/setting/views/system_setting.py index e08a4702e3a..64dda262e08 100644 --- a/apps/setting/views/system_setting.py +++ b/apps/setting/views/system_setting.py @@ -14,9 +14,13 @@ from common.auth import TokenAuth, has_permissions from common.constants.permission_constants import RoleConstants +from common.log.log import log from common.response import result from setting.serializers.system_setting import SystemSettingSerializer from setting.swagger_api.system_setting import SystemSettingEmailApi +from django.utils.translation import gettext_lazy as _ + +from setting.views.common import get_email_details class SystemSetting(APIView): @@ -24,33 +28,39 @@ class Email(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="创建或者修改邮箱设置", - operation_id="创建或者修改邮箱设置", - request_body=SystemSettingEmailApi.get_request_body_api(), tags=["邮箱设置"], + @swagger_auto_schema(operation_summary=_('Create or update email settings'), + operation_id=_('Create or update email settings'), + request_body=SystemSettingEmailApi.get_request_body_api(), tags=[_('Email settings')], responses=result.get_api_response(SystemSettingEmailApi.get_response_body_api())) @has_permissions(RoleConstants.ADMIN) + @log(menu='Email settings', operate='Create or update email settings', + get_details=get_email_details + ) def put(self, request: Request): return result.success( SystemSettingSerializer.EmailSerializer.Create( data=request.data).update_or_save()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="测试邮箱设置", - operation_id="测试邮箱设置", + @swagger_auto_schema(operation_summary=_('Test email settings'), + operation_id=_('Test email settings'), request_body=SystemSettingEmailApi.get_request_body_api(), responses=result.get_default_response(), - tags=["邮箱设置"]) + tags=[_('Email settings')]) @has_permissions(RoleConstants.ADMIN) + @log(menu='Email settings', operate='Test email settings', + get_details=get_email_details + ) def post(self, request: Request): return result.success( SystemSettingSerializer.EmailSerializer.Create( data=request.data).is_valid()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取邮箱设置", - operation_id="获取邮箱设置", + @swagger_auto_schema(operation_summary=_('Get email settings'), + operation_id=_('Get email settings'), responses=result.get_api_response(SystemSettingEmailApi.get_response_body_api()), - tags=["邮箱设置"]) + tags=[_('Email settings')]) @has_permissions(RoleConstants.ADMIN) def get(self, request: Request): return result.success( diff --git a/apps/setting/views/valid.py b/apps/setting/views/valid.py new file mode 100644 index 00000000000..c52b8905ef2 --- /dev/null +++ b/apps/setting/views/valid.py @@ -0,0 +1,33 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: valid.py + @date:2024/7/8 17:50 + @desc: +""" +from drf_yasg.utils import swagger_auto_schema +from rest_framework.decorators import action +from rest_framework.request import Request +from rest_framework.views import APIView + +from common.auth import TokenAuth, has_permissions +from common.constants.permission_constants import RoleConstants +from common.response import result +from setting.serializers.valid_serializers import ValidSerializer +from setting.swagger_api.valid_api import ValidApi +from django.utils.translation import gettext_lazy as _ + + +class Valid(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_('Get verification results'), + operation_id=_('Get verification results'), + manual_parameters=ValidApi.get_request_params_api(), + responses=result.get_default_response() + , tags=["校验"]) + @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) + def get(self, request: Request, valid_type: str, valid_count: int): + return result.success(ValidSerializer(data={'valid_type': valid_type, 'valid_count': valid_count}).valid()) diff --git a/apps/smartdoc/conf.py b/apps/smartdoc/conf.py index 27e1e8b08fa..8da97883ca9 100644 --- a/apps/smartdoc/conf.py +++ b/apps/smartdoc/conf.py @@ -13,6 +13,7 @@ import re from importlib import import_module from urllib.parse import urljoin, urlparse + import yaml BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -75,25 +76,25 @@ class DoesNotExist(Exception): class Config(dict): defaults = { # 数据库相关配置 - "DB_HOST": "", - "DB_PORT": "", - "DB_USER": "", - "DB_PASSWORD": "", - "DB_ENGINE": "django.db.backends.postgresql_psycopg2", - # 邮件相关配置 - "EMAIL_ADDRESS": "", - "EMAIL_USE_TLS": False, - "EMAIL_USE_SSL": True, - "EMAIL_HOST": "", - "EMAIL_PORT": 465, - "EMAIL_HOST_USER": "", - "EMAIL_HOST_PASSWORD": "", + "DB_HOST": "127.0.0.1", + "DB_PORT": 5432, + "DB_USER": "root", + "DB_PASSWORD": "Password123@postgres", + "DB_ENGINE": "dj_db_conn_pool.backends.postgresql", + "DB_MAX_OVERFLOW": 80, + 'LANGUAGE_CODE': 'zh-CN', # 向量模型 "EMBEDDING_MODEL_NAME": "shibing624/text2vec-base-chinese", "EMBEDDING_DEVICE": "cpu", "EMBEDDING_MODEL_PATH": os.path.join(PROJECT_DIR, 'models'), # 向量库配置 - "VECTOR_STORE_NAME": 'pg_vector' + "VECTOR_STORE_NAME": 'pg_vector', + "DEBUG": False, + 'SANDBOX': False, + 'LOCAL_MODEL_HOST': '127.0.0.1', + 'LOCAL_MODEL_PORT': '11636', + 'LOCAL_MODEL_PROTOCOL': "http", + 'LOCAL_MODEL_HOST_WORKER': 1 } @@ -110,9 +111,17 @@ def get_db_setting(self) -> dict: "PORT": self.get('DB_PORT'), "USER": self.get('DB_USER'), "PASSWORD": self.get('DB_PASSWORD'), - "ENGINE": self.get('DB_ENGINE') + "ENGINE": self.get('DB_ENGINE'), + "POOL_OPTIONS": { + "POOL_SIZE": 20, + "MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW')), + 'RECYCLE': 30 * 60 + } } + def get_language_code(self): + return self.get('LANGUAGE_CODE', 'zh-CN') + def __init__(self, *args): super().__init__(*args) @@ -180,8 +189,36 @@ def load_from_yml(self): loaded = self.from_yaml(i) if loaded: return True + msg = f""" - return False + Error: No config file found. + + You can run `cp config_example.yml {self.root_path}/config.yml`, and edit it. + + """ + raise ImportError(msg) + + def load_from_env(self): + keys = os.environ.keys() + config = {key.replace('MAXKB_', ''): os.environ.get(key) for key in keys if key.startswith('MAXKB_')} + if len(config.keys()) <= 0: + msg = f""" + + Error: No config env found. + + Please set environment variables + MAXKB_CONFIG_TYPE: 配置文件读取方式 FILE: 使用配置文件配置 ENV: 使用ENV配置 + MAXKB_DB_NAME: 数据库名称 + MAXKB_DB_HOST: 数据库主机 + MAXKB_DB_PORT: 数据库端口 + MAXKB_DB_USER: 数据库用户名 + MAXKB_DB_PASSWORD: 数据库密码 + MAXKB_EMBEDDING_MODEL_PATH: 向量模型目录 + MAXKB_EMBEDDING_MODEL_NAME: 向量模型名称 + """ + raise ImportError(msg) + self.from_mapping(config) + return True @classmethod def load_user_config(cls, root_path=None, config_class=None): @@ -190,15 +227,10 @@ def load_user_config(cls, root_path=None, config_class=None): if not root_path: root_path = PROJECT_DIR manager = cls(root_path=root_path) - if manager.load_from_yml(): - config = manager.config + config_type = os.environ.get('MAXKB_CONFIG_TYPE') + if config_type is None or config_type != 'ENV': + manager.load_from_yml() else: - msg = f""" - - Error: No config file found. - - You can run `cp config_example.yml {root_path}/config.yml`, and edit it. - - """ - raise ImportError(msg) + manager.load_from_env() + config = manager.config return config diff --git a/apps/smartdoc/settings/__init__.py b/apps/smartdoc/settings/__init__.py index 2908253e386..4e7ea78e3b3 100644 --- a/apps/smartdoc/settings/__init__.py +++ b/apps/smartdoc/settings/__init__.py @@ -8,3 +8,5 @@ """ from .base import * from .logging import * +from .auth import * +from .lib import * diff --git a/apps/smartdoc/settings/auth.py b/apps/smartdoc/settings/auth.py new file mode 100644 index 00000000000..077f98b3abb --- /dev/null +++ b/apps/smartdoc/settings/auth.py @@ -0,0 +1,19 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: auth.py + @date:2024/7/9 18:47 + @desc: +""" +USER_TOKEN_AUTH = 'common.auth.handle.impl.user_token.UserToken' + +PUBLIC_ACCESS_TOKEN_AUTH = 'common.auth.handle.impl.public_access_token.PublicAccessToken' + +APPLICATION_KEY_AUTH = 'common.auth.handle.impl.application_key.ApplicationKey' + +AUTH_HANDLES = [ + USER_TOKEN_AUTH, + PUBLIC_ACCESS_TOKEN_AUTH, + APPLICATION_KEY_AUTH +] diff --git a/apps/smartdoc/settings/base.py b/apps/smartdoc/settings/base.py index 04e8810e5ee..de81420798a 100644 --- a/apps/smartdoc/settings/base.py +++ b/apps/smartdoc/settings/base.py @@ -3,13 +3,15 @@ import os from pathlib import Path +from PIL import Image + from ..const import CONFIG, PROJECT_DIR mimetypes.add_type("text/css", ".css", True) mimetypes.add_type("text/javascript", ".js", True) # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent - +Image.MAX_IMAGE_PIXELS = 20000000000 # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/ @@ -24,6 +26,8 @@ 'default': CONFIG.get_db_setting() } +SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') + # Application definition INSTALLED_APPS = [ @@ -38,24 +42,30 @@ 'rest_framework', "drf_yasg", # swagger 接口 'django_filters', # 条件过滤 - 'django_apscheduler' + 'django_apscheduler', + 'common', + 'function_lib', + 'django_celery_beat' ] MIDDLEWARE = [ + 'django.middleware.locale.LocaleMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', + 'common.middleware.gzip.GZipMiddleware', 'common.middleware.static_headers_middleware.StaticHeadersMiddleware', - 'common.middleware.cross_domain_middleware.CrossDomainMiddleware' - + 'common.middleware.cross_domain_middleware.CrossDomainMiddleware', + 'common.middleware.doc_headers_middleware.DocHeadersMiddleware' ] JWT_AUTH = { 'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=60 * 60 * 2) # <-- 设置token有效时间 } +APPS_DIR = os.path.join(PROJECT_DIR, 'apps') ROOT_URLCONF = 'smartdoc.urls' # FORCE_SCRIPT_NAME TEMPLATES = [ @@ -76,6 +86,7 @@ SWAGGER_SETTINGS = { 'DEFAULT_AUTO_SCHEMA_CLASS': 'common.config.swagger_conf.CustomSwaggerAutoSchema', + 'DEFAULT_GENERATOR_CLASS': 'common.config.swagger_conf.CustomOpenAPISchemaGenerator', "DEFAULT_MODEL_RENDERING": "example", 'USE_SESSION_AUTH': False, 'SECURITY_DEFINITIONS': { @@ -91,9 +102,20 @@ CACHES = { "default": { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'LOCATION': 'unique-snowflake', + 'TIMEOUT': 60 * 30, + 'OPTIONS': { + 'MAX_ENTRIES': 150, + 'CULL_FREQUENCY': 5, + } + }, + 'default_file': { + 'BACKEND': 'common.cache.file_cache.FileCache', + 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "default_file_cache") # 文件夹路径 }, - 'model_cache': { - 'BACKEND': 'common.cache.mem_cache.MemCache' + 'chat_cache': { + 'BACKEND': 'common.cache.file_cache.FileCache', + 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "chat_cache") # 文件夹路径 }, # 存储用户信息 'user_cache': { @@ -105,8 +127,9 @@ 'BACKEND': 'common.cache.file_cache.FileCache', 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "token_cache") # 文件夹路径 }, - "chat_cache": { - 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'captcha_cache': { + 'BACKEND': 'common.cache.file_cache.FileCache', + 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "captcha_cache") # 文件夹路径 } } @@ -156,13 +179,31 @@ # Internationalization # https://docs.djangoproject.com/en/4.2/topics/i18n/ -LANGUAGE_CODE = 'en-us' - TIME_ZONE = CONFIG.get_time_zone() +# 启用国际化 USE_I18N = True -USE_TZ = False +# 启用本地化 +USE_L10N = True + +# 启用时区 +USE_TZ = True + +# 默认语言 +LANGUAGE_CODE = CONFIG.get("LANGUAGE_CODE") + +# 支持的语言 +LANGUAGES = [ + ('en', 'English'), + ('zh', '中文简体'), + ('zh-hant', '中文繁体') +] + +# 翻译文件路径 +LOCALE_PATHS = [ + os.path.join(BASE_DIR.parent, 'locales') +] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.2/howto/static-files/ diff --git a/apps/smartdoc/settings/lib.py b/apps/smartdoc/settings/lib.py new file mode 100644 index 00000000000..fc1d3244f47 --- /dev/null +++ b/apps/smartdoc/settings/lib.py @@ -0,0 +1,48 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: lib.py + @date:2024/8/16 17:12 + @desc: +""" +import os +import shutil + +from smartdoc.const import CONFIG, PROJECT_DIR + +# celery相关配置 +celery_data_dir = os.path.join(PROJECT_DIR, 'data', 'celery_task') +if not os.path.exists(celery_data_dir) or not os.path.isdir(celery_data_dir): + os.makedirs(celery_data_dir) +broker_path = os.path.join(celery_data_dir, "celery_db.sqlite3") +backend_path = os.path.join(celery_data_dir, "celery_results.sqlite3") +# 使用sql_lite 当做broker 和 响应接收 +CELERY_BROKER_URL = f'sqla+sqlite:///{broker_path}' +CELERY_result_backend = f'db+sqlite:///{backend_path}' +CELERY_timezone = CONFIG.TIME_ZONE +CELERY_ENABLE_UTC = False +CELERY_task_serializer = 'pickle' +CELERY_result_serializer = 'pickle' +CELERY_accept_content = ['json', 'pickle'] +CELERY_RESULT_EXPIRES = 600 +CELERY_WORKER_TASK_LOG_FORMAT = '%(asctime).19s %(message)s' +CELERY_WORKER_LOG_FORMAT = '%(asctime).19s %(message)s' +CELERY_TASK_EAGER_PROPAGATES = True +CELERY_WORKER_REDIRECT_STDOUTS = True +CELERY_WORKER_REDIRECT_STDOUTS_LEVEL = "INFO" +CELERY_TASK_SOFT_TIME_LIMIT = 3600 +CELERY_WORKER_CANCEL_LONG_RUNNING_TASKS_ON_CONNECTION_LOSS = True +CELERY_ACKS_LATE = True +celery_once_path = os.path.join(celery_data_dir, "celery_once") +try: + if os.path.exists(celery_once_path) and os.path.isdir(celery_once_path): + shutil.rmtree(celery_once_path) +except Exception as e: + pass +CELERY_ONCE = { + 'backend': 'celery_once.backends.File', + 'settings': {'location': celery_once_path} +} +CELERY_BROKER_CONNECTION_RETRY_ON_STARTUP = True +CELERY_LOG_DIR = os.path.join(PROJECT_DIR, 'logs', 'celery') diff --git a/apps/smartdoc/settings/logging.py b/apps/smartdoc/settings/logging.py index 2627f12014b..9c3df8c159a 100644 --- a/apps/smartdoc/settings/logging.py +++ b/apps/smartdoc/settings/logging.py @@ -91,7 +91,7 @@ }, 'sqlalchemy': { 'handlers': ['console', 'file', 'syslog'], - 'level': LOG_LEVEL, + 'level': "ERROR", 'propagate': False, }, 'django.db.backends': { @@ -114,6 +114,11 @@ 'level': LOG_LEVEL, 'propagate': False, }, + 'common.event': { + 'handlers': ['console', 'file'], + 'level': "DEBUG", + 'propagate': False, + }, } } diff --git a/apps/smartdoc/urls.py b/apps/smartdoc/urls.py index 9e85a1874a5..b243809cc77 100644 --- a/apps/smartdoc/urls.py +++ b/apps/smartdoc/urls.py @@ -5,13 +5,13 @@ https://docs.djangoproject.com/en/4.2/topics/http/urls/ Examples: Function views - 1. Add an import: forms my_app import views + 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views - 1. Add an import: forms other_app.views import Home + 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf - 1. Import the include() function: forms django.urls import include, path + 1. Import the include() function_lib: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ import os @@ -19,32 +19,23 @@ from django.http import HttpResponse from django.urls import path, re_path, include from django.views import static -from drf_yasg import openapi -from drf_yasg.views import get_schema_view -from rest_framework import permissions, status +from rest_framework import status -from common.auth import AnonymousAuthentication +from application.urls import urlpatterns as application_urlpatterns +from common.cache_data.static_resource_cache import get_index_html +from common.constants.cache_code_constants import CacheCodeConstants +from common.init.init_doc import init_doc from common.response.result import Result +from common.util.cache_util import get_cache from smartdoc import settings from smartdoc.conf import PROJECT_DIR -schema_view = get_schema_view( - - openapi.Info( - title="Python API", - default_version='v1', - description="智能客服平台", - ), - public=True, - permission_classes=[permissions.AllowAny], - authentication_classes=[AnonymousAuthentication] -) - urlpatterns = [ path("api/", include("users.urls")), path("api/", include("dataset.urls")), path("api/", include("setting.urls")), - path("api/", include("application.urls")) + path("api/", include("application.urls")), + path("api/", include("function_lib.urls")) ] @@ -70,23 +61,14 @@ def page_not_found(request, exception): """ if request.path.startswith("/api/"): return Result(response_status=status.HTTP_404_NOT_FOUND, code=404, message="找不到接口") - else: - index_path = os.path.join(PROJECT_DIR, 'apps', "static", 'ui', 'index.html') - if not os.path.exists(index_path): - return HttpResponse("页面不存在", status=404) - file = open(index_path, "r", encoding='utf-8') - content = file.read() - file.close() - if request.path.startswith('/ui/chat/'): - return HttpResponse(content, status=200) - return HttpResponse(content, status=200, headers={'X-Frame-Options': 'DENY'}) + index_path = os.path.join(PROJECT_DIR, 'apps', "static", 'ui', 'index.html') + if not os.path.exists(index_path): + return HttpResponse("页面不存在", status=404) + content = get_index_html(index_path) + if request.path.startswith('/ui/chat/'): + return HttpResponse(content, status=200) + return HttpResponse(content, status=200, headers={'X-Frame-Options': 'DENY'}) handler404 = page_not_found - -urlpatterns += [ - re_path(r'^doc(?P\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), - name='schema-json'), # 导出 - path('doc/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), - path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), -] +init_doc(urlpatterns, application_urlpatterns) diff --git a/apps/smartdoc/wsgi.py b/apps/smartdoc/wsgi.py index e04ec52744d..6c7c6811587 100644 --- a/apps/smartdoc/wsgi.py +++ b/apps/smartdoc/wsgi.py @@ -19,9 +19,10 @@ def post_handler(): from common import event from common import job + from common.models.db_model_manage import DBModelManage event.run() - event.ListenerManagement.init_embedding_model_signal.send() job.run() + DBModelManage.init() post_handler() diff --git a/apps/users/apps.py b/apps/users/apps.py index 1ea7bf62ffe..8e08561521a 100644 --- a/apps/users/apps.py +++ b/apps/users/apps.py @@ -5,3 +5,5 @@ class UsersConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'users' + def ready(self): + from ops.celery import signal_handler diff --git a/apps/users/migrations/0003_user_source.py b/apps/users/migrations/0003_user_source.py new file mode 100644 index 00000000000..7292cc1b595 --- /dev/null +++ b/apps/users/migrations/0003_user_source.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.13 on 2024-07-11 19:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0002_user_create_time_user_update_time'), + ] + + operations = [ + migrations.AddField( + model_name='user', + name='source', + field=models.CharField(default='LOCAL', max_length=10, verbose_name='来源'), + ), + ] diff --git a/apps/users/migrations/0004_alter_user_email.py b/apps/users/migrations/0004_alter_user_email.py new file mode 100644 index 00000000000..c77416ba1d3 --- /dev/null +++ b/apps/users/migrations/0004_alter_user_email.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.13 on 2024-07-16 17:03 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0003_user_source'), + ] + + operations = [ + migrations.AlterField( + model_name='user', + name='email', + field=models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='邮箱'), + ), + ] diff --git a/apps/users/migrations/0005_user_language.py b/apps/users/migrations/0005_user_language.py new file mode 100644 index 00000000000..d2d3092d7a9 --- /dev/null +++ b/apps/users/migrations/0005_user_language.py @@ -0,0 +1,17 @@ +# Generated by Django 4.2.15 on 2025-01-20 06:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ('users', '0004_alter_user_email'), + ] + + operations = [ + migrations.AddField( + model_name='user', + name='language', + field=models.CharField(default=None, null=True, max_length=10, verbose_name='语言'), + ), + ] diff --git a/apps/users/models/user.py b/apps/users/models/user.py index 08b5d8f2c03..4e3fd35adf7 100644 --- a/apps/users/models/user.py +++ b/apps/users/models/user.py @@ -20,6 +20,12 @@ __all__ = ["User", "password_encrypt", 'get_user_dynamics_permission'] +from smartdoc.const import CONFIG + + +def get_language(): + return CONFIG.get_language_code() + def password_encrypt(raw_password): """ @@ -63,13 +69,15 @@ def get_user_dynamics_permission(user_id: str): class User(AppModelMixin): id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id") - email = models.EmailField(unique=True, verbose_name="邮箱") + email = models.EmailField(unique=True, null=True, blank=True, verbose_name="邮箱") phone = models.CharField(max_length=20, verbose_name="电话", default="") nick_name = models.CharField(max_length=150, verbose_name="昵称", default="") username = models.CharField(max_length=150, unique=True, verbose_name="用户名") password = models.CharField(max_length=150, verbose_name="密码") role = models.CharField(max_length=150, verbose_name="角色") + source = models.CharField(max_length=10, verbose_name="来源", default="LOCAL") is_active = models.BooleanField(default=True) + language = models.CharField(max_length=10, verbose_name="语言", null=True, default=None) create_time = models.DateTimeField(verbose_name="创建时间", auto_now_add=True, null=True) update_time = models.DateTimeField(verbose_name="修改时间", auto_now=True, null=True) diff --git a/apps/users/serializers/user_serializers.py b/apps/users/serializers/user_serializers.py index 2d0da7f5651..96a4bb390a0 100644 --- a/apps/users/serializers/user_serializers.py +++ b/apps/users/serializers/user_serializers.py @@ -6,17 +6,22 @@ @date:2023/9/5 16:32 @desc: """ +import base64 import datetime import os import random import re import uuid +from captcha.image import ImageCaptcha +from django.conf import settings from django.core import validators, signing, cache from django.core.mail import send_mail from django.core.mail.backends.smtp import EmailBackend from django.db import transaction -from django.db.models import Q, QuerySet +from django.db.models import Q, QuerySet, Prefetch +from django.utils.translation import get_language +from django.utils.translation import gettext_lazy as _, to_locale from drf_yasg import openapi from rest_framework import serializers @@ -25,25 +30,52 @@ from common.constants.exception_code_constants import ExceptionCodeConstants from common.constants.permission_constants import RoleConstants, get_permission_list_by_role from common.db.search import page_search -from common.event import ListenerManagement from common.exception.app_exception import AppApiException from common.mixins.api_mixin import ApiMixin +from common.models.db_model_manage import DBModelManage from common.response.result import get_api_response +from common.util.common import valid_license, get_random_chars from common.util.field_message import ErrMessage from common.util.lock import lock from dataset.models import DataSet, Document, Paragraph, Problem, ProblemParagraphMapping +from embedding.task import delete_embedding_by_dataset_id_list +from function_lib.models.function import FunctionLib from setting.models import Team, SystemSetting, SettingType, Model, TeamMember, TeamMemberPermission from smartdoc.conf import PROJECT_DIR from users.models.user import User, password_encrypt, get_user_dynamics_permission user_cache = cache.caches['user_cache'] +captcha_cache = cache.caches['captcha_cache'] + + +class CaptchaSerializer(ApiMixin, serializers.Serializer): + @staticmethod + def get_response_body_api(): + return get_api_response(openapi.Schema( + type=openapi.TYPE_STRING, + title="captcha", + default="xxxx", + description="captcha" + )) + + @staticmethod + def generate(): + chars = get_random_chars() + image = ImageCaptcha() + data = image.generate(chars) + captcha = base64.b64encode(data.getbuffer()) + captcha_cache.set(f"LOGIN:{chars.lower()}", chars, timeout=5 * 60) + return 'data:image/png;base64,' + captcha.decode() class SystemSerializer(ApiMixin, serializers.Serializer): @staticmethod def get_profile(): version = os.environ.get('MAXKB_VERSION') - return {'version': version} + xpack_cache = DBModelManage.get_model('xpack_cache') + return {'version': version, 'IS_XPACK': hasattr(settings, 'IS_XPACK'), + 'XPACK_LICENSE_IS_VALID': False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', + False)} @staticmethod def get_response_body_api(): @@ -51,24 +83,31 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=[], properties={ - 'version': openapi.Schema(type=openapi.TYPE_STRING, title="系统版本号", description="系统版本号"), + 'version': openapi.Schema(type=openapi.TYPE_STRING, title=_("System version number"), + description=_("System version number")), } ) class LoginSerializer(ApiMixin, serializers.Serializer): username = serializers.CharField(required=True, - error_messages=ErrMessage.char("用户名")) + error_messages=ErrMessage.char(_("Username"))) + + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password"))) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码")) + captcha = serializers.CharField(required=True, error_messages=ErrMessage.char(_("captcha"))) def is_valid(self, *, raise_exception=False): """ 校验参数 - :param raise_exception: 是否抛出异常 只能是True - :return: 用户信息 + :param raise_exception: Whether to throw an exception can only be True + :return: User information """ super().is_valid(raise_exception=True) + captcha = self.data.get('captcha') + captcha_value = captcha_cache.get(f"LOGIN:{captcha.lower()}") + if captcha_value is None: + raise AppApiException(1005, _("Captcha code error or expiration")) username = self.data.get("username") password = password_encrypt(self.data.get("password")) user = QuerySet(User).filter(Q(username=username, @@ -77,13 +116,13 @@ def is_valid(self, *, raise_exception=False): if user is None: raise ExceptionCodeConstants.INCORRECT_USERNAME_AND_PASSWORD.value.to_app_api_exception() if not user.is_active: - raise AppApiException(1005, "用户已被禁用,请联系管理员!") + raise AppApiException(1005, _("The user has been disabled, please contact the administrator!")) return user def get_user_token(self): """ - 获取用户Token - :return: 用户Token(认证信息) + Get user token + :return: User Token (authentication information) """ user = self.is_valid() token = signing.dumps({'username': user.username, 'id': str(user.id), 'email': user.email, @@ -99,8 +138,9 @@ def get_request_body_api(self): type=openapi.TYPE_OBJECT, required=['username', 'password'], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码") + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")), + 'captcha': openapi.Schema(type=openapi.TYPE_STRING, title=_("captcha"), description=_("captcha")) } ) @@ -115,36 +155,38 @@ def get_response_body_api(self): class RegisterSerializer(ApiMixin, serializers.Serializer): """ - 注册请求对象 + Register request object """ email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) username = serializers.CharField(required=True, - error_messages=ErrMessage.char("用户名"), + error_messages=ErrMessage.char(_("Username")), max_length=20, min_length=6, validators=[ - validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"), - message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等") + validators.RegexValidator(regex=re.compile("^.{6,20}$"), + message=_("Username must be 6-20 characters long")) ]) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) re_password = serializers.CharField(required=True, - error_messages=ErrMessage.char("确认密码"), + error_messages=ErrMessage.char(_("Confirm Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code"))) class Meta: model = User @@ -174,19 +216,19 @@ def is_valid(self, *, raise_exception=False): return True + @valid_license(model=User, count=2, + message=_( + "The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).")) @transaction.atomic def save(self, **kwargs): m = User( **{'id': uuid.uuid1(), 'email': self.data.get("email"), 'username': self.data.get("username"), 'role': RoleConstants.USER.name}) m.set_password(self.data.get("password")) - # 插入用户 m.save() - # 初始化用户团队 - Team(**{'user': m, 'name': m.username + '的团队'}).save() + Team(**{'user': m, 'name': m.username + _("team")}).save() email = self.data.get("email") code_cache_key = email + ":register" - # 删除验证码缓存 user_cache.delete(code_cache_key) @staticmethod @@ -195,11 +237,13 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['username', 'email', 'password', 're_password', 'code'], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", description="确认密码"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码") + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"), + description=_("Confirm Password")), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")) } ) @@ -210,16 +254,18 @@ class CheckCodeSerializer(ApiMixin, serializers.Serializer): """ email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code"))) type = serializers.CharField(required=True, - error_messages=ErrMessage.char("类型"), + error_messages=ErrMessage.char(_("Type")), validators=[ validators.RegexValidator(regex=re.compile("^register|reset_password$"), - message="类型只支持register|reset_password", code=500) + message=_( + "The type only supports register|reset_password"), + code=500) ]) def is_valid(self, *, raise_exception=False): @@ -238,40 +284,56 @@ def get_request_body_api(self): type=openapi.TYPE_OBJECT, required=['email', 'code', 'type'], properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"), - 'type': openapi.Schema(type=openapi.TYPE_STRING, title="类型", description="register|reset_password") + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Type"), description="register|reset_password") } ) def get_response_body_api(self): return get_api_response(openapi.Schema( type=openapi.TYPE_BOOLEAN, - title="是否成功", + title=_('Is it successful'), default=True, - description="错误提示")) + description=_('Error message'))) + + +class SwitchLanguageSerializer(serializers.Serializer): + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id')), ) + language = serializers.CharField(required=True, error_messages=ErrMessage.char(_('language'))) + + def switch(self): + self.is_valid(raise_exception=True) + language = self.data.get('language') + support_language_list = ['zh-CN', 'zh-Hant', 'en-US'] + if not support_language_list.__contains__(language): + raise AppApiException(500, _('language only support:') + ','.join(support_language_list)) + QuerySet(User).filter(id=self.data.get('user_id')).update(language=language) class RePasswordSerializer(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code"))) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) - re_password = serializers.CharField(required=True, error_messages=ErrMessage.char("确认密码"), + re_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Confirm Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")] + , message=_( + "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))] ) class Meta: @@ -309,10 +371,12 @@ def get_request_body_api(self): type=openapi.TYPE_OBJECT, required=['email', 'code', "password", 're_password'], properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", description="确认密码") + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"), + description=_("Confirm Password")) } ) @@ -320,13 +384,13 @@ def get_request_body_api(self): class SendEmailSerializer(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=True - , error_messages=ErrMessage.char("邮箱"), + , error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - type = serializers.CharField(required=True, error_messages=ErrMessage.char("类型"), validators=[ + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Type")), validators=[ validators.RegexValidator(regex=re.compile("^register|reset_password$"), - message="类型只支持register|reset_password", code=500) + message=_("The type only supports register|reset_password"), code=500) ]) class Meta: @@ -344,7 +408,8 @@ def is_valid(self, *, raise_exception=False): code_cache_key_lock = code_cache_key + "_lock" ttl = user_cache.ttl(code_cache_key_lock) if ttl is not None: - raise AppApiException(500, f"{ttl.total_seconds()}秒内请勿重复发送邮件") + raise AppApiException(500, _("Do not send emails again within {seconds} seconds").format( + seconds=int(ttl.total_seconds()))) return True def send(self): @@ -359,8 +424,10 @@ def send(self): code = "".join(list(map(lambda i: random.choice(['1', '2', '3', '4', '5', '6', '7', '8', '9', '0' ]), range(6)))) # 获取邮件模板 - file = open(os.path.join(PROJECT_DIR, "apps", "common", 'template', 'email_template.html'), "r", - encoding='utf-8') + language = get_language() + file = open( + os.path.join(PROJECT_DIR, "apps", "common", 'template', f'email_template_{to_locale(language)}.html'), "r", + encoding='utf-8') content = file.read() file.close() code_cache_key = email + ":" + state @@ -370,7 +437,8 @@ def send(self): system_setting = QuerySet(SystemSetting).filter(type=SettingType.EMAIL.value).first() if system_setting is None: user_cache.delete(code_cache_key_lock) - raise AppApiException(1004, "邮箱未设置,请联系管理员设置") + raise AppApiException(1004, + _("The email service has not been set up. Please contact the administrator to set up the email service in [Email Settings].")) try: connection = EmailBackend(system_setting.meta.get("email_host"), system_setting.meta.get('email_port'), @@ -381,14 +449,15 @@ def send(self): system_setting.meta.get('email_use_ssl') ) # 发送邮件 - send_mail(f'【MaxKB 智能知识库-{"用户注册" if state == "register" else "修改密码"}】', - '', - html_message=f'{content.replace("${code}", code)}', - from_email=system_setting.meta.get('from_email'), - recipient_list=[email], fail_silently=False, connection=connection) + send_mail(_('【Intelligent knowledge base question and answer system-{action}】').format( + action=_('User registration') if state == 'register' else _('Change password')), + '', + html_message=f'{content.replace("${code}", code)}', + from_email=system_setting.meta.get('from_email'), + recipient_list=[email], fail_silently=False, connection=connection) except Exception as e: user_cache.delete(code_cache_key_lock) - raise AppApiException(500, f"{str(e)}邮件发送失败") + raise AppApiException(500, f"{str(e)}" + _("Email sending failed")) user_cache.set(code_cache_key, code, timeout=datetime.timedelta(minutes=30)) return True @@ -397,8 +466,8 @@ def get_request_body_api(self): type=openapi.TYPE_OBJECT, required=['email', 'type'], properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'type': openapi.Schema(type=openapi.TYPE_STRING, title="类型", description="register|reset_password") + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_('Email')), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('Type'), description="register|reset_password") } ) @@ -419,7 +488,8 @@ def get_user_profile(user: User): permission_list += [p.value for p in get_permission_list_by_role(RoleConstants[user.role])] return {'id': user.id, 'username': user.username, 'email': user.email, 'role': user.role, 'permissions': [str(p) for p in permission_list], - 'is_edit_password': user.password == 'd880e722c47a34d8e9fce789fc62389d' if user.role == 'ADMIN' else False} + 'is_edit_password': user.password == 'd880e722c47a34d8e9fce789fc62389d' if user.role == 'ADMIN' else False, + 'language': user.language} @staticmethod def get_response_body_api(): @@ -427,12 +497,13 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['id', 'username', 'email', 'role', 'is_active'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用"), - "permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title="权限列表", description="权限列表", + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active")), + "permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Permissions"), + description=_("Permissions"), items=openapi.Schema(type=openapi.TYPE_STRING)) } ) @@ -449,11 +520,11 @@ def get_response_body_api(self): type=openapi.TYPE_OBJECT, required=['id', 'username', 'email', 'role', 'is_active'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active")) } ) @@ -466,7 +537,7 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='邮箱或者用户名')] + description=_("Email or username"))] @staticmethod def get_response_body_api(): @@ -474,9 +545,10 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['username', 'email', 'id'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title='用户主键id', description="用户主键id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title='ID', description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), + description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")) } ) @@ -484,14 +556,50 @@ def list(self, with_valid=True): if with_valid: self.is_valid(raise_exception=True) email_or_username = self.data.get('email_or_username') - return [{'id': user_model.id, 'username': user_model.username, 'email': user_model.email} for user_model in + return [{'id': user_model.id, 'username': user_model.username, 'email': user_model.email} for user_model + in QuerySet(User).filter(Q(username=email_or_username) | Q(email=email_or_username))] + def listByType(self, type, user_id): + teamIds = TeamMember.objects.filter(user_id=user_id).values_list('id', flat=True) + targets = TeamMemberPermission.objects.filter( + member_id__in=teamIds, + auth_target_type=type, + operate__contains=['USE'] + ).values_list('target', flat=True) + prefetch_users = Prefetch('user', queryset=User.objects.only('id', 'username')) + + user_list = [] + if type == 'DATASET': + user_list = DataSet.objects.filter( + Q(id__in=targets) | Q(user_id=user_id) + ).prefetch_related(prefetch_users).distinct('user_id') + elif type == 'APPLICATION': + user_list = Application.objects.filter( + Q(id__in=targets) | Q(user_id=user_id) + ).prefetch_related(prefetch_users).distinct('user_id') + elif type == 'FUNCTION': + user_list = FunctionLib.objects.filter( + Q(permission_type='PUBLIC') | Q(user_id=user_id) + ).prefetch_related(prefetch_users).distinct('user_id') + + other_users = [ + {'id': app.user.id, 'username': app.user.username} + for app in user_list if app.user.id != user_id + ] + users = [ + {'id': 'all', 'username': _('All')}, + {'id': user_id, 'username': _('Me')} + ] + users.extend(other_users) + return users + class UserInstanceSerializer(ApiMixin, serializers.ModelSerializer): class Meta: model = User - fields = ['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time', 'update_time'] + fields = ['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time', 'update_time', + 'source'] @staticmethod def get_response_body_api(): @@ -500,15 +608,19 @@ def get_response_body_api(): required=['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time', 'update_time'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活", description="是否激活"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description="修改时间"), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description="修改时间") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"), + description=_("Is active")), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")), + 'source': openapi.Schema(type=openapi.TYPE_STRING, title=_("Source"), description=_("Source")), + 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Create time"), + description=_("Create time")), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Update time"), + description=_("Update time")) } ) @@ -518,7 +630,7 @@ def get_request_params_api(): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='用户名id') + description='ID') ] @@ -526,7 +638,7 @@ def get_request_params_api(): class UserManageSerializer(serializers.Serializer): class Query(ApiMixin, serializers.Serializer): email_or_username = serializers.CharField(required=False, allow_null=True, - error_messages=ErrMessage.char("邮箱或者用户名")) + error_messages=ErrMessage.char(_('Email or username'))) @staticmethod def get_request_params_api(): @@ -534,7 +646,7 @@ def get_request_params_api(): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='邮箱或者用户名')] + description=_("Email or username"))] @staticmethod def get_response_body_api(): @@ -542,9 +654,10 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['username', 'email', 'id'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title='用户主键id', description="用户主键id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title='ID', description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), + description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")) } ) @@ -573,27 +686,29 @@ def page(self, current_page: int, page_size: int, with_valid=True): class UserInstance(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) username = serializers.CharField(required=True, - error_messages=ErrMessage.char("用户名"), + error_messages=ErrMessage.char(_("Username")), max_length=20, min_length=6, validators=[ - validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"), - message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等") + validators.RegexValidator(regex=re.compile("^.{6,20}$"), + message=_( + 'Username must be 6-20 characters long')) ]) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) - nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char("姓名"), max_length=64, + nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64, allow_null=True, allow_blank=True) - phone = serializers.CharField(required=False, error_messages=ErrMessage.char("手机号"), max_length=20, + phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20, allow_null=True, allow_blank=True) def is_valid(self, *, raise_exception=True): @@ -613,55 +728,61 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['username', 'email', 'password'], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"), - 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名") + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), + description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")), + 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")), + 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")) } ) class UserEditInstance(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=False, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char("姓名"), max_length=64, + nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64, allow_null=True, allow_blank=True) - phone = serializers.CharField(required=False, error_messages=ErrMessage.char("手机号"), max_length=20, + phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20, allow_null=True, allow_blank=True) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("是否可用")) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_("Is active"))) def is_valid(self, *, user_id=None, raise_exception=False): super().is_valid(raise_exception=True) - if QuerySet(User).filter(email=self.data.get('email')).exclude(id=user_id).exists(): - raise AppApiException(1004, "邮箱已经被使用") + if self.data.get('email') is not None and QuerySet(User).filter(email=self.data.get('email')).exclude( + id=user_id).exists(): + raise AppApiException(1004, _('Email is already in use')) @staticmethod def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱"), - 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名"), - 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")), + 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"), + description=_("Is active")), } ) class RePasswordInstance(ApiMixin, serializers.Serializer): - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")]) - re_password = serializers.CharField(required=True, error_messages=ErrMessage.char("确认密码"), + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) + re_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Confirm Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")] + , message=_( + "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))] ) @staticmethod @@ -670,9 +791,10 @@ def get_request_body_api(): type=openapi.TYPE_OBJECT, required=['password', 're_password'], properties={ - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", - description="确认密码"), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"), + description=_("Confirm Password")), } ) @@ -681,6 +803,9 @@ def is_valid(self, *, raise_exception=False): if self.data.get('password') != self.data.get('re_password'): raise ExceptionCodeConstants.PASSWORD_NOT_EQ_RE_PASSWORD.value.to_app_api_exception() + @valid_license(model=User, count=2, + message=_( + 'The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).')) @transaction.atomic def save(self, instance, with_valid=True): if with_valid: @@ -690,20 +815,20 @@ def save(self, instance, with_valid=True): phone="" if instance.get('phone') is None else instance.get('phone'), nick_name="" if instance.get('nick_name') is None else instance.get('nick_name') , username=instance.get('username'), password=password_encrypt(instance.get('password')), - role=RoleConstants.USER.name, + role=RoleConstants.USER.name, source="LOCAL", is_active=True) user.save() # 初始化用户团队 - Team(**{'user': user, 'name': user.username + '的团队'}).save() + Team(**{'user': user, 'name': user.username + _('team')}).save() return UserInstanceSerializer(user).data class Operate(serializers.Serializer): - id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id")) + id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("ID")) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(User).filter(id=self.data.get('id')).exists(): - raise AppApiException(1004, "用户不存在") + raise AppApiException(1004, _('User does not exist')) @transaction.atomic def delete(self, with_valid=True): @@ -711,7 +836,7 @@ def delete(self, with_valid=True): self.is_valid(raise_exception=True) user = QuerySet(User).filter(id=self.data.get('id')).first() if user.role == RoleConstants.ADMIN.name: - raise AppApiException(1004, "无法删除管理员") + raise AppApiException(1004, _('Unable to delete administrator')) user_id = self.data.get('id') team_member_list = QuerySet(TeamMember).filter(Q(user_id=user_id) | Q(team_id=user_id)) @@ -729,7 +854,7 @@ def delete(self, with_valid=True): QuerySet(Paragraph).filter(dataset_id__in=dataset_id_list).delete() QuerySet(ProblemParagraphMapping).filter(dataset_id__in=dataset_id_list).delete() QuerySet(Problem).filter(dataset_id__in=dataset_id_list).delete() - ListenerManagement.delete_embedding_by_dataset_id_list_signal.send(dataset_id_list) + delete_embedding_by_dataset_id_list(dataset_id_list) dataset_list.delete() # 删除团队 QuerySet(Team).filter(user_id=self.data.get('id')).delete() @@ -748,7 +873,7 @@ def edit(self, instance, with_valid=True): user = QuerySet(User).filter(id=self.data.get('id')).first() if user.role == RoleConstants.ADMIN.name and 'is_active' in instance and instance.get( 'is_active') is not None: - raise AppApiException(1004, "不能修改管理员状态") + raise AppApiException(1004, _('Cannot modify administrator status')) update_keys = ['email', 'nick_name', 'phone', 'is_active'] for update_key in update_keys: if update_key in instance and instance.get(update_key) is not None: diff --git a/apps/users/task/__init__.py b/apps/users/task/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/apps/users/urls.py b/apps/users/urls.py index 55388d894c1..a9d1e134c90 100644 --- a/apps/users/urls.py +++ b/apps/users/urls.py @@ -6,6 +6,8 @@ urlpatterns = [ path('profile', views.Profile.as_view()), path('user', views.User.as_view(), name="profile"), + path('user/captcha', views.CaptchaView.as_view(), name='captcha'), + path('user/language', views.SwitchUserLanguageView.as_view(), name='language'), path('user/list', views.User.Query.as_view()), path('user/login', views.Login.as_view(), name='login'), path('user/logout', views.Logout.as_view(), name='logout'), @@ -21,4 +23,5 @@ name="user_manage_re_password"), path("user_manage//", views.UserManage.Page.as_view(), name="user_manage_re_password"), + path('user/list/', views.UserListView.as_view()), ] diff --git a/apps/users/views/common.py b/apps/users/views/common.py new file mode 100644 index 00000000000..c3f086b7b1c --- /dev/null +++ b/apps/users/views/common.py @@ -0,0 +1,32 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: common.py + @date:2025/3/25 16:46 + @desc: +""" +from common.util.common import encryption +from users.models import User +from django.db.models import QuerySet + + +def get_user_operation_object(user_id): + user_model = QuerySet(model=User).filter(id=user_id).first() + if user_model is not None: + return { + "name": user_model.username + } + return {} + + +def get_re_password_details(request): + path = request.path + body = request.data + query = request.query_params + return { + 'path': path, + 'body': {**body, 'password': encryption(body.get('password', '')), + 're_password': encryption(body.get('re_password', ''))}, + 'query': query + } diff --git a/apps/users/views/user.py b/apps/users/views/user.py index e691ff4b989..d4b9f8a6a41 100644 --- a/apps/users/views/user.py +++ b/apps/users/views/user.py @@ -7,6 +7,7 @@ @desc: """ from django.core import cache +from django.utils.translation import gettext_lazy as _ from drf_yasg import openapi from drf_yasg.utils import swagger_auto_schema from rest_framework.decorators import action @@ -18,11 +19,15 @@ from common.auth.authenticate import TokenAuth from common.auth.authentication import has_permissions from common.constants.permission_constants import PermissionConstants, CompareConstants, ViewPermission, RoleConstants +from common.log.log import log from common.response import result +from common.util.common import encryption from smartdoc.settings import JWT_AUTH from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \ RePasswordSerializer, \ - SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer + SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer, \ + SwitchLanguageSerializer, CaptchaSerializer +from users.views.common import get_user_operation_object, get_re_password_details user_cache = cache.caches['user_cache'] token_cache = cache.caches['token_cache'] @@ -30,10 +35,10 @@ class Profile(APIView): @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取MaxKB相关信息", - operation_id="获取MaxKB相关信息", + @swagger_auto_schema(operation_summary=_("Get MaxKB related information"), + operation_id=_("Get MaxKB related information"), responses=result.get_api_response(SystemSerializer.get_response_body_api()), - tags=['系统参数']) + tags=[_('System parameters')]) def get(self, request: Request): return result.success(SystemSerializer.get_profile()) @@ -42,10 +47,10 @@ class User(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取当前用户信息", - operation_id="获取当前用户信息", + @swagger_auto_schema(operation_summary=_("Get current user information"), + operation_id=_("Get current user information"), responses=result.get_api_response(UserProfile.get_response_body_api()), - tags=['用户']) + tags=[]) @has_permissions(PermissionConstants.USER_READ) def get(self, request: Request): return result.success(UserProfile.get_user_profile(request.user)) @@ -54,35 +59,63 @@ class Query(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取用户列表", - operation_id="获取用户列表", + @swagger_auto_schema(operation_summary=_("Get user list"), + operation_id=_("Get user list"), manual_parameters=UserSerializer.Query.get_request_params_api(), responses=result.get_api_array_response(UserSerializer.Query.get_response_body_api()), - tags=['用户']) + tags=[_("User management")]) @has_permissions(PermissionConstants.USER_READ) def get(self, request: Request): return result.success( UserSerializer.Query(data={'email_or_username': request.query_params.get('email_or_username')}).list()) +class SwitchUserLanguageView(APIView): + authentication_classes = [TokenAuth] + + @action(methods=['POST'], detail=False) + @swagger_auto_schema(operation_summary=_("Switch Language"), + operation_id=_("Switch Language"), + request_body=openapi.Schema( + type=openapi.TYPE_OBJECT, + required=['language'], + properties={ + 'language': openapi.Schema(type=openapi.TYPE_STRING, title=_("language"), + description=_("language")), + } + ), + responses=result.get_default_response(), + tags=[_("User management")]) + @log(menu='User management', operate='Switch Language', + get_operation_object=lambda r, k: {'name': r.user.username}) + def post(self, request: Request): + data = {**request.data, 'user_id': request.user.id} + return result.success(SwitchLanguageSerializer(data=data).switch()) + + class ResetCurrentUserPasswordView(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="修改当前用户密码", - operation_id="修改当前用户密码", + @swagger_auto_schema(operation_summary=_("Modify current user password"), + operation_id=_("Modify current user password"), request_body=openapi.Schema( type=openapi.TYPE_OBJECT, required=['email', 'code', "password", 're_password'], properties={ - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", - description="密码") + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")) } ), - responses=RePasswordSerializer().get_response_body_api(), - tags=['用户']) + responses=result.get_default_response(), + tags=[_("User management")]) + @log(menu='User management', operate='Modify current user password', + get_operation_object=lambda r, k: {'name': r.user.username}, + get_details=get_re_password_details) def post(self, request: Request): data = {'email': request.user.email} data.update(request.data) @@ -90,7 +123,7 @@ def post(self, request: Request): if serializer_obj.reset_password(): token_cache.delete(request.META.get('HTTP_AUTHORIZATION')) return result.success(True) - return result.error("修改密码失败") + return result.error(_("Failed to change password")) class SendEmailToCurrentUserView(APIView): @@ -98,10 +131,12 @@ class SendEmailToCurrentUserView(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="发送邮件到当前用户", - operation_id="发送邮件到当前用户", + @swagger_auto_schema(operation_summary=_("Send email to current user"), + operation_id=_("Send email to current user"), responses=SendEmailSerializer().get_response_body_api(), - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='Send email to current user', + get_operation_object=lambda r, k: {'name': r.user.username}) def post(self, request: Request): serializer_obj = SendEmailSerializer(data={'email': request.user.email, 'type': "reset_password"}) if serializer_obj.is_valid(raise_exception=True): @@ -113,24 +148,52 @@ class Logout(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="登出", - operation_id="登出", + @swagger_auto_schema(operation_summary=_("Sign out"), + operation_id=_("Sign out"), responses=SendEmailSerializer().get_response_body_api(), - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='Sign out', + get_operation_object=lambda r, k: {'name': r.user.username}) def post(self, request: Request): token_cache.delete(request.META.get('HTTP_AUTHORIZATION')) return result.success(True) +def _get_details(request): + path = request.path + body = request.data + query = request.query_params + return { + 'path': path, + 'body': {**body, 'password': encryption(body.get('password', ''))}, + 'query': query + } + + +class CaptchaView(APIView): + + @action(methods=['GET'], detail=False) + @swagger_auto_schema(operation_summary=_("Obtain graphical captcha"), + operation_id=_("Obtain graphical captcha"), + responses=CaptchaSerializer().get_response_body_api(), + security=[], + tags=[_("User management")]) + def get(self, request: Request): + return result.success(CaptchaSerializer().generate()) + + class Login(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="登录", - operation_id="登录", + @swagger_auto_schema(operation_summary=_("Log in"), + operation_id=_("Log in"), request_body=LoginSerializer().get_request_body_api(), responses=LoginSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='Log in', get_user=lambda r: {'username': r.data.get('username', None)}, + get_details=_get_details, + get_operation_object=lambda r, k: {'name': r.data.get('username')}) def post(self, request: Request): login_request = LoginSerializer(data=request.data) # 校验请求参数 @@ -144,29 +207,36 @@ class Register(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="用户注册", - operation_id="用户注册", + @swagger_auto_schema(operation_summary=_("User registration"), + operation_id=_("User registration"), request_body=RegisterSerializer().get_request_body_api(), responses=RegisterSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='User registration', + get_operation_object=lambda r, k: {'name': r.data.get('username', None)}, + get_user=lambda r: {'user_name': r.data.get('username', None)}) def post(self, request: Request): serializer_obj = RegisterSerializer(data=request.data) if serializer_obj.is_valid(raise_exception=True): serializer_obj.save() - return result.success("注册成功") + return result.success(_("Registration successful")) class RePasswordView(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="修改密码", - operation_id="修改密码", + @swagger_auto_schema(operation_summary=_("Change password"), + operation_id=_("Change password"), request_body=RePasswordSerializer().get_request_body_api(), responses=RePasswordSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='Change password', + get_operation_object=lambda r, k: {'name': r.data.get('email', None)}, + get_user=lambda r: {'user_name': None, 'email': r.data.get('email', None)}, + get_details=get_re_password_details) def post(self, request: Request): serializer_obj = RePasswordSerializer(data=request.data) return result.success(serializer_obj.reset_password()) @@ -176,12 +246,15 @@ class CheckCode(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="校验验证码是否正确", - operation_id="校验验证码是否正确", + @swagger_auto_schema(operation_summary=_("Check whether the verification code is correct"), + operation_id=_("Check whether the verification code is correct"), request_body=CheckCodeSerializer().get_request_body_api(), responses=CheckCodeSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='Check whether the verification code is correct', + get_operation_object=lambda r, k: {'name': r.data.get('email', None)}, + get_user=lambda r: {'user_name': None, 'email': r.data.get('email', None)}) def post(self, request: Request): return result.success(CheckCodeSerializer(data=request.data).is_valid(raise_exception=True)) @@ -189,12 +262,15 @@ def post(self, request: Request): class SendEmail(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="发送邮件", - operation_id="发送邮件", + @swagger_auto_schema(operation_summary=_("Send email"), + operation_id=_("Send email"), request_body=SendEmailSerializer().get_request_body_api(), responses=SendEmailSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User management")]) + @log(menu='User management', operate='Send email', + get_operation_object=lambda r, k: {'name': r.data.get('email', None)}, + get_user=lambda r: {'user_name': None, 'email': r.data.get('email', None)}) def post(self, request: Request): serializer_obj = SendEmailSerializer(data=request.data) if serializer_obj.is_valid(raise_exception=True): @@ -205,16 +281,18 @@ class UserManage(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="添加用户", - operation_id="添加用户", + @swagger_auto_schema(operation_summary=_("Add user"), + operation_id=_("Add user"), request_body=UserManageSerializer.UserInstance.get_request_body_api(), responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()), - tags=["用户管理"] + tags=[_("User management")] ) @has_permissions(ViewPermission( [RoleConstants.ADMIN], [PermissionConstants.USER_READ], compare=CompareConstants.AND)) + @log(menu='User management', operate='Add user', + get_operation_object=lambda r, k: {'name': r.data.get('username', None)}) def post(self, request: Request): return result.success(UserManageSerializer().save(request.data)) @@ -222,9 +300,9 @@ class Page(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取用户分页列表", - operation_id="获取用户分页列表", - tags=["用户管理"], + @swagger_auto_schema(operation_summary=_("Get user paginated list"), + operation_id=_("Get user paginated list"), + tags=[_("User management")], manual_parameters=UserManageSerializer.Query.get_request_params_api(), responses=result.get_page_api_response(UserInstanceSerializer.get_response_body_api()), ) @@ -242,16 +320,19 @@ class RePassword(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改密码", - operation_id="修改密码", + @swagger_auto_schema(operation_summary=_("Change password"), + operation_id=_("Change password"), manual_parameters=UserInstanceSerializer.get_request_params_api(), request_body=UserManageSerializer.RePasswordInstance.get_request_body_api(), responses=result.get_default_response(), - tags=["用户管理"]) + tags=[_("User management")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN], [PermissionConstants.USER_READ], compare=CompareConstants.AND)) + @log(menu='User management', operate='Change password', + get_operation_object=lambda r, k: get_user_operation_object(k.get('user_id')), + get_details=get_re_password_details) def put(self, request: Request, user_id): return result.success( UserManageSerializer.Operate(data={'id': user_id}).re_password(request.data, with_valid=True)) @@ -260,24 +341,26 @@ class Operate(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除用户", - operation_id="删除用户", + @swagger_auto_schema(operation_summary=_("Delete user"), + operation_id=_("Delete user"), manual_parameters=UserInstanceSerializer.get_request_params_api(), responses=result.get_default_response(), - tags=["用户管理"]) + tags=[_("User management")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN], [PermissionConstants.USER_READ], compare=CompareConstants.AND)) + @log(menu='User management', operate='Delete user', + get_operation_object=lambda r, k: get_user_operation_object(k.get('user_id'))) def delete(self, request: Request, user_id): return result.success(UserManageSerializer.Operate(data={'id': user_id}).delete(with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取用户信息", - operation_id="获取用户信息", + @swagger_auto_schema(operation_summary=_("Get user information"), + operation_id=_("Get user information"), manual_parameters=UserInstanceSerializer.get_request_params_api(), responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()), - tags=["用户管理"] + tags=[_("User management")] ) @has_permissions(ViewPermission( [RoleConstants.ADMIN], @@ -287,17 +370,32 @@ def get(self, request: Request, user_id): return result.success(UserManageSerializer.Operate(data={'id': user_id}).one(with_valid=True)) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改用户信息", - operation_id="修改用户信息", + @swagger_auto_schema(operation_summary=_("Update user information"), + operation_id=_("Update user information"), manual_parameters=UserInstanceSerializer.get_request_params_api(), request_body=UserManageSerializer.UserEditInstance.get_request_body_api(), responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()), - tags=["用户管理"] + tags=[_("User management")] ) @has_permissions(ViewPermission( [RoleConstants.ADMIN], [PermissionConstants.USER_READ], compare=CompareConstants.AND)) + @log(menu='User management', operate='Update user information', + get_operation_object=lambda r, k: get_user_operation_object(k.get('user_id'))) def put(self, request: Request, user_id): return result.success( UserManageSerializer.Operate(data={'id': user_id}).edit(request.data, with_valid=True)) + + +class UserListView(APIView): + authentication_classes = [TokenAuth] + + @swagger_auto_schema(operation_summary=_("Get user list by type"), + operation_id=_("Get user list by type"), + manual_parameters=UserSerializer.Query.get_request_params_api(), + responses=result.get_api_array_response(UserSerializer.Query.get_response_body_api()), + tags=[_("User management")]) + @has_permissions(PermissionConstants.USER_READ) + def get(self, request: Request, type): + return result.success(UserSerializer().listByType(type, request.user.id)) diff --git a/config_example.yml b/config_example.yml index b4310642751..824de3aa44b 100644 --- a/config_example.yml +++ b/config_example.yml @@ -1,12 +1,3 @@ -# 邮箱配置 -EMAIL_ADDRESS: -EMAIL_USE_TLS: False -EMAIL_USE_SSL: True -EMAIL_HOST: smtp.qq.com -EMAIL_PORT: 465 -EMAIL_HOST_USER: -EMAIL_HOST_PASSWORD: - # 数据库链接信息 DB_NAME: maxkb DB_HOST: localhost @@ -18,3 +9,4 @@ DB_ENGINE: django.db.backends.postgresql_psycopg2 DEBUG: false TIME_ZONE: Asia/Shanghai + diff --git a/installer/Dockerfile b/installer/Dockerfile index 6462a8f5d53..796e8535ef3 100644 --- a/installer/Dockerfile +++ b/installer/Dockerfile @@ -1,11 +1,11 @@ -FROM ghcr.io/1panel-dev/maxkb-vector-model:v1.0.1 as vector-model -FROM node:18-alpine3.18 as web-build +FROM ghcr.io/1panel-dev/maxkb-vector-model:v1.0.1 AS vector-model +FROM node:18-alpine3.18 AS web-build COPY ui ui RUN cd ui && \ npm install && \ npm run build && \ rm -rf ./node_modules -FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.6 as stage-build +FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.8 AS stage-build ARG DEPENDENCIES=" \ python3-pip" @@ -17,40 +17,63 @@ RUN apt-get update && \ COPY . /opt/maxkb/app RUN mkdir -p /opt/maxkb/app /opt/maxkb/model /opt/maxkb/conf && \ - cp -f /opt/maxkb/app/installer/config.yaml /opt/maxkb/conf && \ rm -rf /opt/maxkb/app/ui + COPY --from=web-build ui /opt/maxkb/app/ui WORKDIR /opt/maxkb/app RUN python3 -m venv /opt/py3 && \ - pip install poetry --break-system-packages && \ + pip install poetry==1.8.5 --break-system-packages && \ poetry config virtualenvs.create false && \ . /opt/py3/bin/activate && \ - if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "^2.2.1+cpu", source = "pytorch"}/g' pyproject.toml; fi && \ - poetry install + if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "2.6.0+cpu", source = "pytorch"}/g' pyproject.toml; fi && \ + poetry install && \ + export MAXKB_CONFIG_TYPE=ENV && python3 /opt/maxkb/app/apps/manage.py compilemessages -FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.6 +FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.8 ARG DOCKER_IMAGE_TAG=dev \ BUILD_AT \ GITHUB_COMMIT -ENV MAXKB_VERSION ${DOCKER_IMAGE_TAG} (build at ${BUILD_AT}, commit: ${GITHUB_COMMIT}) +ENV MAXKB_VERSION="${DOCKER_IMAGE_TAG} (build at ${BUILD_AT}, commit: ${GITHUB_COMMIT})" \ + MAXKB_CONFIG_TYPE=ENV \ + MAXKB_DB_NAME=maxkb \ + MAXKB_DB_HOST=127.0.0.1 \ + MAXKB_DB_PORT=5432 \ + MAXKB_DB_USER=root \ + MAXKB_DB_PASSWORD=Password123@postgres \ + MAXKB_DB_MAX_OVERFLOW=80 \ + MAXKB_EMBEDDING_MODEL_NAME=/opt/maxkb/model/embedding/shibing624_text2vec-base-chinese \ + MAXKB_EMBEDDING_MODEL_PATH=/opt/maxkb/model/embedding \ + MAXKB_SANDBOX=1 \ + LANG=en_US.UTF-8 \ + PATH=/opt/py3/bin:$PATH \ + POSTGRES_USER=root \ + POSTGRES_PASSWORD=Password123@postgres \ + POSTGRES_MAX_CONNECTIONS=1000 \ + PIP_TARGET=/opt/maxkb/app/sandbox/python-packages \ + PYTHONPATH=/opt/maxkb/app/sandbox/python-packages \ + PYTHONUNBUFFERED=1 + WORKDIR /opt/maxkb/app COPY --from=stage-build /opt/maxkb /opt/maxkb COPY --from=stage-build /opt/py3 /opt/py3 COPY --from=vector-model /opt/maxkb/app/model /opt/maxkb/model -ENV LANG=en_US.UTF-8 \ - PATH=/opt/py3/bin:$PATH - -ENV POSTGRES_USER root -ENV POSTGRES_PASSWORD Password123@postgres - RUN chmod 755 /opt/maxkb/app/installer/run-maxkb.sh && \ cp -r /opt/maxkb/model/base/hub /opt/maxkb/model/tokenizer && \ cp -f /opt/maxkb/app/installer/run-maxkb.sh /usr/bin/run-maxkb.sh && \ - cp -f /opt/maxkb/app/installer/init.sql /docker-entrypoint-initdb.d + cp -f /opt/maxkb/app/installer/init.sql /docker-entrypoint-initdb.d && \ + curl -L --connect-timeout 120 -m 1800 https://resource.fit2cloud.com/maxkb/ffmpeg/get-ffmpeg-linux | sh && \ + mkdir -p /opt/maxkb/app/sandbox/python-packages && \ + find /opt/maxkb/app -mindepth 1 -not -name 'sandbox' -exec chmod 700 {} + && \ + chmod 755 /tmp && \ + useradd --no-create-home --home /opt/maxkb/app/sandbox sandbox -g root && \ + chown -R sandbox:root /opt/maxkb/app/sandbox && \ + chmod g-x /usr/local/bin/* /usr/bin/* /bin/* /usr/sbin/* /sbin/* /usr/lib/postgresql/15/bin/* && \ + chmod g+x /usr/local/bin/python* && \ + find /etc/ -type f ! -path '/etc/resolv.conf' ! -path '/etc/hosts' | xargs chmod g-rx EXPOSE 8080 ENTRYPOINT ["bash", "-c"] -CMD [ "/usr/bin/run-maxkb.sh" ] \ No newline at end of file +CMD [ "/usr/bin/run-maxkb.sh" ] diff --git a/installer/Dockerfile-python-pg b/installer/Dockerfile-python-pg index eb250146881..f871ac4ef4f 100644 --- a/installer/Dockerfile-python-pg +++ b/installer/Dockerfile-python-pg @@ -1,14 +1,19 @@ -FROM postgres:15.6-bookworm +FROM python:3.11-slim-bullseye AS python-stage +FROM postgres:15.8-bullseye ARG DEPENDENCIES=" \ + libexpat1-dev \ + libffi-dev \ curl \ + ca-certificates \ vim \ - python3.11-mini \ - python3.11-venv \ + gettext \ postgresql-15-pgvector" RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ echo "Asia/Shanghai" > /etc/timezone && \ apt-get update && apt-get install -y --no-install-recommends $DEPENDENCIES && \ apt-get clean all && \ - rm -rf /var/lib/apt/lists/* \ No newline at end of file + rm -rf /var/lib/apt/lists/* + +COPY --from=python-stage /usr/local /usr/local \ No newline at end of file diff --git a/installer/Dockerfile-vector-model b/installer/Dockerfile-vector-model index b44cadc454c..a7326614bc8 100644 --- a/installer/Dockerfile-vector-model +++ b/installer/Dockerfile-vector-model @@ -1,4 +1,4 @@ -FROM python:3.11-slim-bookworm as vector-model +FROM python:3.11-slim-bookworm AS vector-model COPY installer/install_model.py install_model.py RUN pip3 install --upgrade pip setuptools && \ diff --git a/installer/config.yaml b/installer/config.yaml index c9f45db869f..8127fc9ab67 100644 --- a/installer/config.yaml +++ b/installer/config.yaml @@ -13,7 +13,7 @@ DB_HOST: 127.0.0.1 DB_PORT: 5432 DB_USER: root DB_PASSWORD: Password123@postgres -DB_ENGINE: django.db.backends.postgresql_psycopg2 +DB_ENGINE: dj_db_conn_pool.backends.postgresql EMBEDDING_MODEL_PATH: /opt/maxkb/model/embedding EMBEDDING_MODEL_NAME: /opt/maxkb/model/embedding/shibing624_text2vec-base-chinese diff --git a/installer/run-maxkb.sh b/installer/run-maxkb.sh index 597da7f025c..238875e06b1 100644 --- a/installer/run-maxkb.sh +++ b/installer/run-maxkb.sh @@ -1,7 +1,7 @@ #!/bin/bash - +rm -f /opt/maxkb/app/tmp/*.pid # Start postgresql -docker-entrypoint.sh postgres & +docker-entrypoint.sh postgres -c max_connections=${POSTGRES_MAX_CONNECTIONS} & sleep 10 # Wait postgresql until pg_isready --host=127.0.0.1; do sleep 1 && echo "waiting for postgres"; done diff --git a/installer/start-maxkb.sh b/installer/start-maxkb.sh new file mode 100644 index 00000000000..4e88eff52b6 --- /dev/null +++ b/installer/start-maxkb.sh @@ -0,0 +1,3 @@ +#!/bin/bash +rm -f /opt/maxkb/app/tmp/*.pid +python /opt/maxkb/app/main.py start \ No newline at end of file diff --git a/main.py b/main.py index dbe48e7e32d..5c6413ee4a4 100644 --- a/main.py +++ b/main.py @@ -2,6 +2,7 @@ import logging import os import sys +import time import django from django.core import management @@ -43,8 +44,40 @@ def perform_db_migrate(): def start_services(): - management.call_command('migrate') - management.call_command('runserver', "0.0.0.0:8080") + services = args.services if isinstance(args.services, list) else [args.services] + start_args = [] + if args.daemon: + start_args.append('--daemon') + if args.force: + start_args.append('--force') + if args.worker: + start_args.extend(['--worker', str(args.worker)]) + else: + worker = os.environ.get('CORE_WORKER') + if isinstance(worker, str) and worker.isdigit(): + start_args.extend(['--worker', worker]) + + try: + management.call_command(action, *services, *start_args) + except KeyboardInterrupt: + logging.info('Cancel ...') + time.sleep(2) + except Exception as exc: + logging.error("Start service error {}: {}".format(services, exc)) + time.sleep(2) + + +def dev(): + services = args.services if isinstance(args.services, list) else args.services + if services.__contains__('web'): + management.call_command('runserver', "0.0.0.0:8080") + elif services.__contains__('celery'): + management.call_command('celery', 'celery') + elif services.__contains__('local_model'): + os.environ.setdefault('SERVER_NAME', 'local_model') + from smartdoc.const import CONFIG + bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}' + management.call_command('runserver', bind) if __name__ == '__main__': @@ -60,16 +93,31 @@ def start_services(): ) parser.add_argument( 'action', type=str, - choices=("start", "upgrade_db", "collect_static"), + choices=("start", "dev", "upgrade_db", "collect_static"), help="Action to run" ) - args = parser.parse_args() + args, e = parser.parse_known_args() + parser.add_argument( + "services", type=str, default='all' if args.action == 'start' else 'web', nargs="*", + choices=("all", "web", "task") if args.action == 'start' else ("web", "celery", 'local_model'), + help="The service to start", + ) + parser.add_argument('-d', '--daemon', nargs="?", const=True) + parser.add_argument('-w', '--worker', type=int, nargs="?") + parser.add_argument('-f', '--force', nargs="?", const=True) + args = parser.parse_args() action = args.action if action == "upgrade_db": perform_db_migrate() elif action == "collect_static": collect_static() + elif action == 'dev': + collect_static() + perform_db_migrate() + dev() else: collect_static() + perform_db_migrate() start_services() + diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000000..b9a9b8c63c4 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,7 @@ +{ + "name": "MaxKB", + "lockfileVersion": 3, + "requires": true, + "packages": {} +} + diff --git a/pyproject.toml b/pyproject.toml index 3eddbace0c8..ea87b807d8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,44 +1,74 @@ [tool.poetry] name = "maxkb" version = "0.1.0" -description = "智能知识库" +description = "智能知识库问答系统" authors = ["shaohuzhang1 "] readme = "README.md" +package-mode = false [tool.poetry.dependencies] -python = "^3.11" -django = "4.1.13" -djangorestframework = "3.14.0" +python = ">=3.11,<3.12" +django = "4.2.20" +djangorestframework = "3.16.0" drf-yasg = "1.21.7" django-filter = "23.2" -langchain = "^0.1.11" -psycopg2-binary = "2.9.7" -jieba = "^0.42.1" -diskcache = "^5.6.3" -pillow = "^10.2.0" -filetype = "^1.2.0" -torch = "^2.2.1" -sentence-transformers = "^2.2.2" -blinker = "^1.6.3" -openai = "^1.13.3" -tiktoken = "^0.5.1" -qianfan = "^0.3.6.1" -pycryptodome = "^3.19.0" -beautifulsoup4 = "^4.12.2" -html2text = "^2024.2.26" -langchain-openai = "^0.0.8" -django-ipware = "^6.0.4" -django-apscheduler = "^0.6.2" -pymupdf = "1.24.1" -python-docx = "^1.1.0" -xlwt = "^1.3.0" -dashscope = "^1.17.0" -zhipuai = "^2.0.1" -httpx = "^0.27.0" -httpx-sse = "^0.4.0" -websocket-client = "^1.7.0" -langchain-google-genai = "^1.0.3" - +langchain = "0.3.23" +langchain-openai = "0.3.12" +langchain-anthropic = "0.3.12" +langchain-community = "0.3.21" +langchain-deepseek = "0.1.3" +langchain-google-genai = "2.1.2" +langchain-mcp-adapters = "0.0.11" +langchain-huggingface = "0.1.2" +langchain-ollama = "0.3.2" +langgraph = "0.3.27" +mcp = "1.8.0" +psycopg2-binary = "2.9.10" +jieba = "0.42.1" +diskcache = "5.6.3" +pillow = "10.4.0" +filetype = "1.2.0" +torch = "2.6.0" +sentence-transformers = "4.0.2" +openai = "1.72.0" +tiktoken = "0.7.0" +qianfan = "0.3.18" +pycryptodome = "3.22.0" +beautifulsoup4 = "4.13.3" +html2text = "2024.2.26" +django-ipware = "6.0.5" +django-apscheduler = "0.6.2" +pymupdf = "1.24.9" +pypdf = "4.3.1" +rapidocr-onnxruntime = "1.3.24" +python-docx = "1.1.2" +xlwt = "1.3.0" +dashscope = "1.23.1" +zhipuai = "2.1.5.20250410" +httpx = "0.27.2" +httpx-sse = "0.4.0" +websockets = "13.1" +openpyxl = "3.1.5" +xlrd = "2.0.1" +gunicorn = "23.0.0" +python-daemon = "3.0.1" +boto3 = "1.37.31" +tencentcloud-sdk-python = "3.0.1357" +xinference-client = "1.4.1" +psutil = "6.1.1" +celery = { extras = ["sqlalchemy"], version = "5.5.1" } +django-celery-beat = "2.7.0" +celery-once = "3.0.1" +anthropic = "0.49.0" +pylint = "3.3.6" +pydub = "0.25.1" +cffi = "1.17.1" +pysilk = "0.0.1" +django-db-connection-pool = "1.2.5" +opencv-python-headless = "4.11.0.86" +pymysql = "1.1.1" +accelerate = "1.6.0" +captcha = "0.7.1" [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" diff --git a/ui/env.d.ts b/ui/env.d.ts index 52f54527078..08bb5e826b0 100644 --- a/ui/env.d.ts +++ b/ui/env.d.ts @@ -1,5 +1,7 @@ /// declare module 'element-plus/dist/locale/zh-cn.mjs' +declare module 'element-plus/dist/locale/en.mjs' +declare module 'element-plus/dist/locale/zh-tw.mjs' declare module 'markdown-it-task-lists' declare module 'markdown-it-abbr' declare module 'markdown-it-anchor' @@ -8,7 +10,10 @@ declare module 'markdown-it-sub' declare module 'markdown-it-sup' declare module 'markdown-it-toc-done-right' declare module 'katex' +interface Window { + sendMessage: ?((message: string, other_params_data: any) => void) +} interface ImportMeta { readonly env: ImportMetaEnv } -declare type Recordable = Record; +declare type Recordable = Record diff --git a/ui/index.html b/ui/index.html index 1f9ca13a8b2..09bec9ae48c 100644 --- a/ui/index.html +++ b/ui/index.html @@ -3,8 +3,12 @@ - - + + %VITE_APP_TITLE% diff --git a/ui/package.json b/ui/package.json index 229b9d3a701..cee7a41c8fd 100644 --- a/ui/package.json +++ b/ui/package.json @@ -4,7 +4,7 @@ "private": true, "scripts": { "dev": "vite", - "build": "run-p type-check build-only", + "build": "set NODE_OPTIONS=--max_old_space_size=4096 && run-p type-check build-only", "preview": "vite preview", "test:unit": "vitest", "build-only": "vite build", @@ -13,26 +13,28 @@ "format": "prettier --write src/" }, "dependencies": { + "@antv/layout": "^0.3.1", + "@codemirror/theme-one-dark": "^6.1.2", "@ctrl/tinycolor": "^4.1.0", + "@logicflow/core": "^1.2.27", + "@logicflow/extension": "^1.2.27", + "@types/sortablejs": "^1.15.8", + "@vavt/cm-extension": "^1.6.0", "@vueuse/core": "^10.9.0", - "axios": "^0.28.0", + "@wecom/jssdk": "^2.1.0", + "axios": "^1.8.3", + "codemirror": "^6.0.1", "cropperjs": "^1.6.2", + "dingtalk-jsapi": "^2.15.6", "echarts": "^5.5.0", - "element-plus": "^2.5.6", + "element-plus": "^2.9.1", + "file-saver": "^2.0.5", + "highlight.js": "^11.9.0", "install": "^0.13.0", "katex": "^0.16.10", "lodash": "^4.17.21", - "markdown-it": "^13.0.2", - "markdown-it-abbr": "^1.0.4", - "markdown-it-anchor": "^8.6.7", - "markdown-it-footnote": "^3.0.3", - "markdown-it-highlightjs": "^4.0.1", - "markdown-it-sub": "^1.0.0", - "markdown-it-sup": "^1.0.0", - "markdown-it-task-lists": "^2.1.1", - "markdown-it-toc-done-right": "^4.2.0", - "md-editor-v3": "4.12.1", - "medium-zoom": "^1.1.0", + "marked": "^12.0.2", + "md-editor-v3": "^4.16.7", "mermaid": "^10.9.0", "mitt": "^3.0.0", "moment": "^2.30.1", @@ -40,18 +42,25 @@ "nprogress": "^0.2.0", "pinia": "^2.1.6", "pinyin-pro": "^3.18.2", + "recorder-core": "^1.3.24040900", "screenfull": "^6.0.2", + "sortablejs": "^1.15.6", + "use-element-plus-theme": "^0.0.5", "vue": "^3.3.4", "vue-clipboard3": "^2.0.0", + "vue-codemirror": "^6.1.1", + "vue-demi": "latest", + "vue-draggable-plus": "^0.6.0", "vue-i18n": "^9.13.1", - "vue-router": "^4.2.4" + "vue-router": "^4.2.4", + "vue3-menus": "^1.1.2", + "vuedraggable": "^4.1.0" }, "devDependencies": { "@rushstack/eslint-patch": "^1.3.2", "@tsconfig/node18": "^18.2.0", + "@types/file-saver": "^2.0.7", "@types/jsdom": "^21.1.1", - "@types/markdown-it": "^13.0.7", - "@types/markdown-it-highlightjs": "^3.3.4", "@types/node": "^18.17.5", "@types/nprogress": "^0.2.0", "@vitejs/plugin-vue": "^4.3.1", @@ -64,7 +73,7 @@ "jsdom": "^22.1.0", "npm-run-all": "^4.1.5", "prettier": "^3.0.0", - "sass": "^1.66.1", + "sass": "1.66.1", "typescript": "~5.1.6", "unplugin-vue-define-options": "^1.3.18", "vite": "^4.4.9", diff --git a/ui/public/MaxKB.gif b/ui/public/MaxKB.gif new file mode 100644 index 00000000000..055d49a6a11 Binary files /dev/null and b/ui/public/MaxKB.gif differ diff --git a/ui/public/embeb.js b/ui/public/embeb.js deleted file mode 100644 index 23517cf59d4..00000000000 --- a/ui/public/embeb.js +++ /dev/null @@ -1,307 +0,0 @@ -function auth(token, protocol, host) { - const XML = new XMLHttpRequest() - XML.open('POST', `${protocol}//${host}/api/application/authentication`, false) - XML.setRequestHeader('Content-Type', 'application/json') - res = XML.send(JSON.stringify({ access_token: token })) - return XML.status == 200 -} - -const guideHtml=` -
-
-
-
-
- - - -
- -
🌟 遇见问题,不再有障碍!
-

你好,我是你的智能小助手。
- 点我,开启高效解答模式,让问题变成过去式。

-
- -
- -
-` -const chatButtonHtml= -`
- - - - - - - - - - - - - - - - - - - - - - - -
` - - - -const getChatContainerHtml=(protocol,host,token)=>{ - return `
- -
-
- -
-
- - -
` -} -/** - * 初始化引导 - * @param {*} root - */ -const initGuide=(root)=>{ - root.insertAdjacentHTML("beforeend",guideHtml) - const button=root.querySelector(".maxkb-button") - const close_icon=root.querySelector('.maxkb-close') - const close_func=()=>{ - root.removeChild(root.querySelector('.maxkb-tips')) - root.removeChild(root.querySelector('.maxkb-mask')) - localStorage.setItem('maxkbMaskTip',true) - } - button.onclick=close_func - close_icon.onclick=close_func -} -const initChat=(root)=>{ - // 添加对话icon - root.insertAdjacentHTML("beforeend",chatButtonHtml) - // 添加对话框 - root.insertAdjacentHTML('beforeend',getChatContainerHtml(window.maxkbChatConfig.protocol,window.maxkbChatConfig.host,window.maxkbChatConfig.token)) - // 按钮元素 - const chat_button=root.querySelector('.maxkb-chat-button') - // 对话框元素 - const chat_container=root.querySelector('#maxkb-chat-container') - - const viewport=root.querySelector('.maxkb-openviewport') - const closeviewport=root.querySelector('.maxkb-closeviewport') - const close_func=()=>{ - chat_container.style['display']=chat_container.style['display']=='block'?'none':'block' - } - close_icon=chat_container.querySelector('.maxkb-close') - chat_button.onclick = close_func - close_icon.onclick=close_func - const viewport_func=()=>{ - if(chat_container.classList.contains('maxkb-enlarge')){ - chat_container.classList.remove("maxkb-enlarge"); - viewport.classList.remove('maxkb-viewportnone') - closeviewport.classList.add('maxkb-viewportnone') - }else{ - chat_container.classList.add("maxkb-enlarge"); - viewport.classList.add('maxkb-viewportnone') - closeviewport.classList.remove('maxkb-viewportnone') - } - } - viewport.onclick=viewport_func - closeviewport.onclick=viewport_func -} -/** - * 第一次进来的引导提示 - */ -function initMaxkb(){ - const maxkb=document.createElement('div') - const root=document.createElement('div') - root.id="maxkb" - initMaxkbStyle(maxkb) - maxkb.appendChild(root) - document.body.appendChild(maxkb) - const maxkbMaskTip=localStorage.getItem('maxkbMaskTip') - if(maxkbMaskTip==null){ - initGuide(root) - } - initChat(root) -} - - -// 初始化全局样式 -function initMaxkbStyle(root){ - style=document.createElement('style') - style.type='text/css' - style.innerText= ` - /* 放大 */ - #maxkb .maxkb-enlarge { - width: 50%!important; - height: 100%!important; - bottom: 0!important; - right: 0 !important; - } - @media only screen and (max-width: 768px){ - #maxkb .maxkb-enlarge { - width: 100%!important; - height: 100%!important; - right: 0 !important; - bottom: 0!important; - } - } - - /* 引导 */ - - #maxkb .maxkb-mask { - position: fixed; - z-index: 999; - background-color: transparent; - height: 100%; - width: 100%; - top: 0; - left: 0; - } - #maxkb .maxkb-mask .maxkb-content { - width: 45px; - height: 50px; - box-shadow: 1px 1px 1px 2000px rgba(0,0,0,.6); - border-radius: 50% 0 0 50%; - position: absolute; - right: 0; - bottom: 42px; - z-index: 1000; - } - #maxkb .maxkb-tips { - position: fixed; - bottom: 30px; - right: 60px; - padding: 22px 24px 24px; - border-radius: 6px; - color: #ffffff; - font-size: 14px; - background: #3370FF; - z-index: 1000; - } - #maxkb .maxkb-tips .maxkb-arrow { - position: absolute; - background: #3370FF; - width: 10px; - height: 10px; - pointer-events: none; - transform: rotate(45deg); - box-sizing: border-box; - /* left */ - right: -5px; - bottom: 33px; - border-left-color: transparent; - border-bottom-color: transparent - } - #maxkb .maxkb-tips .maxkb-title { - font-size: 20px; - font-weight: 500; - margin-bottom: 8px; - } - #maxkb .maxkb-tips .maxkb-button { - text-align: right; - margin-top: 24px; - } - #maxkb .maxkb-tips .maxkb-button button { - border-radius: 4px; - background: #FFF; - padding: 3px 12px; - color: #3370FF; - cursor: pointer; - outline: none; - border: none; - } - #maxkb .maxkb-tips .maxkb-button button::after{ - border: none; - } - #maxkb .maxkb-tips .maxkb-close { - position: absolute; - right: 20px; - top: 20px; - cursor: pointer; - - } - #maxkb-chat-container { - width: 420px; - height: 600px; - display:none; - } - @media only screen and (max-width: 768px) { - #maxkb-chat-container { - width: 100%; - height: 70%; - right: 0 !important; - } - } - - #maxkb .maxkb-chat-button{ - position: fixed; - bottom: 30px; - right: 0; - cursor: pointer; - } - #maxkb #maxkb-chat-container{ - z-index:10000;position: relative; - border-radius: 8px; - border: 1px solid var(--N300, #DEE0E3); - background: linear-gradient(188deg, rgba(235, 241, 255, 0.20) 39.6%, rgba(231, 249, 255, 0.20) 94.3%), #EFF0F1; - box-shadow: 0px 4px 8px 0px rgba(31, 35, 41, 0.10); - position: fixed;bottom: 20px;right: 45px;overflow: hidden; - } - #maxkb #maxkb-chat-container .maxkb-chat-close{ - position: absolute; - top: 15px; - right: 10px; - cursor: pointer; - } - #maxkb #maxkb-chat-container .maxkb-openviewport{ - position: absolute; - top: 15px; - right: 50px; - cursor: pointer; - } - #maxkb #maxkb-chat-container .maxkb-closeviewport{ - position: absolute; - top: 15px; - right: 50px; - cursor: pointer; - } - #maxkb #maxkb-chat-container .maxkb-viewportnone{ - display:none; - } - #maxkb #maxkb-chat-container #maxkb-chat{ - height:100%; - width:100%; - border: none; -} - #maxkb #maxkb-chat-container { - animation: appear .4s ease-in-out; - } - @keyframes appear { - from { - height: 0;; - } - - to { - height: 600px; - } - }` - root.appendChild(style) -} - -function embedChatbot() { - const t = window.maxkbChatConfig - check = auth(t.token, t.protocol, t.host) - if (t && t.token && t.protocol && t.host && check) { - // 初始化maxkb智能小助手 - initMaxkb() - } else console.error('invalid parameter') -} -window.onload = embedChatbot diff --git a/ui/public/fx/bochaai/detail.md b/ui/public/fx/bochaai/detail.md new file mode 100644 index 00000000000..35481d2658c --- /dev/null +++ b/ui/public/fx/bochaai/detail.md @@ -0,0 +1,16 @@ +## 概述 + +博查工具是一个支持自然语言搜索的 Web Search API,从近百亿网页和生态内容源中搜索高质量世界知识,包括新闻、图片、视频、百科、机酒、学术等。 + + +## 配置 + +1. 获取API Key  +在[博查开放平台](https://open.bochaai.com/overview) 上申请 API 密钥。 +![API Key](/ui/fx/img/bocha_APIKey.jpg) +2. 在函数库中配置 +在函数库的博查函数面板中,点击 … > 启用参数,填写 API 密钥,并启用该函数。 +![启动参数](/ui/fx/img/bocha_setting.jpg) +3. 在应用中使用 +在高级编排应用中,点击添加组件->函数库->博查,设置使用参数。 +![应用中使用](/ui/fx/img/bocha_app_used.jpg) diff --git a/ui/public/fx/bochaai/icon.png b/ui/public/fx/bochaai/icon.png new file mode 100644 index 00000000000..530a086ed20 Binary files /dev/null and b/ui/public/fx/bochaai/icon.png differ diff --git a/ui/public/fx/google_search/detail.md b/ui/public/fx/google_search/detail.md new file mode 100644 index 00000000000..ab68b6ab588 --- /dev/null +++ b/ui/public/fx/google_search/detail.md @@ -0,0 +1,21 @@ +## 概述 + +Google 搜索工具是一个实时 API,可提取搜索引擎结果,提供来自 Google 的结构化数据。它支持各种搜索类型,包括 Web、图像、新闻和地图。 + +## 配置 + +1. 创建 Google Custom Search Engine +在[Programmable Search Engine](https://programmablesearchengine.google.com/)中 添加 Search Engine +![google 创建引擎](/ui/fx/img/google_AddSearchEngine.jpg) +2. 获取cx参数 +进入添加的引擎详情中,在【基本】菜单中获取搜索引擎的ID,即cx。 +![google cx ](/ui/fx/img/google_cx.jpg) +3. 获取 API Key +打开 https://developers.google.com/custom-search/v1/overview?hl=zh-cn 获取API Key。 +![google API Key](/ui/fx/img/google_APIKey.jpg) +4. 配置启动参数 +在Google 搜索函数的启动参数中填写配置以上参数,并启用该函数。 +![启动参数](/ui/fx/img/google_setting.jpg) +5. 在应用中使用 +在高级编排应用中,点击添加组件->函数库->Google搜索,设置使用参数。 +![应用中使用](/ui/fx/img/google_app_used.jpg) diff --git a/ui/public/fx/google_search/icon.png b/ui/public/fx/google_search/icon.png new file mode 100644 index 00000000000..7b903159b0c Binary files /dev/null and b/ui/public/fx/google_search/icon.png differ diff --git a/ui/public/fx/img/MySQL_app_used.jpg b/ui/public/fx/img/MySQL_app_used.jpg new file mode 100644 index 00000000000..42db47f9c4c Binary files /dev/null and b/ui/public/fx/img/MySQL_app_used.jpg differ diff --git a/ui/public/fx/img/MySQL_setting.jpg b/ui/public/fx/img/MySQL_setting.jpg new file mode 100644 index 00000000000..206c35a8b23 Binary files /dev/null and b/ui/public/fx/img/MySQL_setting.jpg differ diff --git a/ui/public/fx/img/PostgreSQL_app_used.jpg b/ui/public/fx/img/PostgreSQL_app_used.jpg new file mode 100644 index 00000000000..7fee014da03 Binary files /dev/null and b/ui/public/fx/img/PostgreSQL_app_used.jpg differ diff --git a/ui/public/fx/img/PostgreSQL_setting.jpg b/ui/public/fx/img/PostgreSQL_setting.jpg new file mode 100644 index 00000000000..e279a26c907 Binary files /dev/null and b/ui/public/fx/img/PostgreSQL_setting.jpg differ diff --git a/ui/public/fx/img/bocha_APIKey.jpg b/ui/public/fx/img/bocha_APIKey.jpg new file mode 100644 index 00000000000..998aa06f0f3 Binary files /dev/null and b/ui/public/fx/img/bocha_APIKey.jpg differ diff --git a/ui/public/fx/img/bocha_app_used.jpg b/ui/public/fx/img/bocha_app_used.jpg new file mode 100644 index 00000000000..71fece501d0 Binary files /dev/null and b/ui/public/fx/img/bocha_app_used.jpg differ diff --git a/ui/public/fx/img/bocha_setting.jpg b/ui/public/fx/img/bocha_setting.jpg new file mode 100644 index 00000000000..86daddd31d3 Binary files /dev/null and b/ui/public/fx/img/bocha_setting.jpg differ diff --git a/ui/public/fx/img/google_APIKey.jpg b/ui/public/fx/img/google_APIKey.jpg new file mode 100644 index 00000000000..4b6e069e0e6 Binary files /dev/null and b/ui/public/fx/img/google_APIKey.jpg differ diff --git a/ui/public/fx/img/google_AddSearchEngine.jpg b/ui/public/fx/img/google_AddSearchEngine.jpg new file mode 100644 index 00000000000..c0182b406d1 Binary files /dev/null and b/ui/public/fx/img/google_AddSearchEngine.jpg differ diff --git a/ui/public/fx/img/google_app_used.jpg b/ui/public/fx/img/google_app_used.jpg new file mode 100644 index 00000000000..7b4b492db90 Binary files /dev/null and b/ui/public/fx/img/google_app_used.jpg differ diff --git a/ui/public/fx/img/google_cx.jpg b/ui/public/fx/img/google_cx.jpg new file mode 100644 index 00000000000..ce13b5c4a28 Binary files /dev/null and b/ui/public/fx/img/google_cx.jpg differ diff --git a/ui/public/fx/img/google_setting.jpg b/ui/public/fx/img/google_setting.jpg new file mode 100644 index 00000000000..501c580f6ba Binary files /dev/null and b/ui/public/fx/img/google_setting.jpg differ diff --git a/ui/public/fx/img/langsearch_APIKey.jpg b/ui/public/fx/img/langsearch_APIKey.jpg new file mode 100644 index 00000000000..0705d54a309 Binary files /dev/null and b/ui/public/fx/img/langsearch_APIKey.jpg differ diff --git a/ui/public/fx/img/langsearch_app_used.jpg b/ui/public/fx/img/langsearch_app_used.jpg new file mode 100644 index 00000000000..85db6755f17 Binary files /dev/null and b/ui/public/fx/img/langsearch_app_used.jpg differ diff --git a/ui/public/fx/img/langsearch_setting.jpg b/ui/public/fx/img/langsearch_setting.jpg new file mode 100644 index 00000000000..967cd304084 Binary files /dev/null and b/ui/public/fx/img/langsearch_setting.jpg differ diff --git a/ui/public/fx/langsearch/detail.md b/ui/public/fx/langsearch/detail.md new file mode 100644 index 00000000000..49f7fa79168 --- /dev/null +++ b/ui/public/fx/langsearch/detail.md @@ -0,0 +1,17 @@ +## 概述 + +LangSearch 是一个提供免费Web Search API和Rerank API的服务,支持新闻、图像、视频等内容。它结合了关键词和向量进行混合搜索,以提高准确性。 + + +## 配置 + +1. 获取API Key  +在[LangSearch](https://langsearch.com/overview) 上申请 API 密钥。 +![API Key](/ui/fx/img/langsearch_APIKey.jpg) +2. 在函数库中配置 +在函数库的LangSearch函数面板中,点击 … > 启用参数,填写 API 密钥,并启用该函数。 +![启动参数](/ui/fx/img/langsearch_setting.jpg) +3. 在应用中使用 +在高级编排应用中,点击添加组件->函数库->LangSearch,设置使用参数。 +![应用中使用](/ui/fx/img/langsearch_app_used.jpg) + \ No newline at end of file diff --git a/ui/public/fx/langsearch/icon.png b/ui/public/fx/langsearch/icon.png new file mode 100644 index 00000000000..72ca125f366 Binary files /dev/null and b/ui/public/fx/langsearch/icon.png differ diff --git a/ui/public/fx/mysql/detail.md b/ui/public/fx/mysql/detail.md new file mode 100644 index 00000000000..6900b32ad81 --- /dev/null +++ b/ui/public/fx/mysql/detail.md @@ -0,0 +1,14 @@ +## 概述 + +MySQL查询是一个连接MySQL数据库执行SQL查询的工具。 + + +## 配置 +  +1. 在函数库中配置启动参数 +在函数库的MySQL函数面板中,点击 … > 启用参数,填写数据库连接参数,并启用该函数。 +![启动参数](/ui/fx/img/MySQL_setting.jpg) +2. 在应用中使用 +在高级编排应用中,点击添加组件->函数库->MySQL查询,设置查询内容。 +![应用中使用](/ui/fx/img/MySQL_app_used.jpg) + \ No newline at end of file diff --git a/ui/public/fx/mysql/icon.png b/ui/public/fx/mysql/icon.png new file mode 100644 index 00000000000..7367c5e2e02 Binary files /dev/null and b/ui/public/fx/mysql/icon.png differ diff --git a/ui/public/fx/postgresql/detail.md b/ui/public/fx/postgresql/detail.md new file mode 100644 index 00000000000..f11a4201131 --- /dev/null +++ b/ui/public/fx/postgresql/detail.md @@ -0,0 +1,14 @@ +## 概述 + +PostgreSQL查询是一个连接PostgreSQL数据库执行SQL查询的工具。 + + +## 配置 +  +1. 在函数库中配置启动参数 +在函数库的PostgreSQL函数面板中,点击 … > 启用参数,填写数据库连接参数,并启用该函数。 +![启动参数](/ui/fx/img/PostgreSQL_setting.jpg) +2. 在应用中使用 +在高级编排应用中,点击添加组件->函数库->PostgreSQL查询,设置查询内容。 +![应用中使用](/ui/fx/img/PostgreSQL_app_used.jpg) + \ No newline at end of file diff --git a/ui/public/fx/postgresql/icon.png b/ui/public/fx/postgresql/icon.png new file mode 100644 index 00000000000..7893e74eaf9 Binary files /dev/null and b/ui/public/fx/postgresql/icon.png differ diff --git a/ui/src/App.vue b/ui/src/App.vue index d59d59725b4..86643068a13 100644 --- a/ui/src/App.vue +++ b/ui/src/App.vue @@ -1,9 +1,7 @@ - + diff --git a/ui/src/api/application-xpack.ts b/ui/src/api/application-xpack.ts new file mode 100644 index 00000000000..25e973f5717 --- /dev/null +++ b/ui/src/api/application-xpack.ts @@ -0,0 +1,41 @@ +import { Result } from '@/request/Result' +import { get, put } from '@/request/index' +import { type Ref } from 'vue' + +const prefix = '/application' + +/** + * 替换社区版-获取AccessToken + * @param 参数 application_id + */ +const getAccessToken: (application_id: string, loading?: Ref) => Promise> = ( + application_id, + loading +) => { + return get(`${prefix}/${application_id}/setting`, undefined, loading) +} + +/** + * 替换社区版-修改AccessToken + * @param 参数 application_id + * data { + * "show_source": boolean, + * "show_history": boolean, + * "draggable": boolean, + * "show_guide": boolean, + * "avatar": file, + * "float_icon": file, + * } + */ +const putAccessToken: ( + application_id: string, + data: any, + loading?: Ref +) => Promise> = (application_id, data, loading) => { + return put(`${prefix}/${application_id}/setting`, data, undefined, loading) +} + +export default { + getAccessToken, + putAccessToken +} diff --git a/ui/src/api/application.ts b/ui/src/api/application.ts index c83ff3c8e26..efd4a4985a8 100644 --- a/ui/src/api/application.ts +++ b/ui/src/api/application.ts @@ -1,8 +1,9 @@ import { Result } from '@/request/Result' -import { get, post, postStream, del, put } from '@/request/index' +import { get, post, postStream, del, put, request, download, exportFile } from '@/request/index' import type { pageRequest } from '@/api/type/common' import type { ApplicationFormType } from '@/api/type/application' import { type Ref } from 'vue' +import type { FormField } from '@/components/dynamics-form/type' const prefix = '/application' @@ -17,12 +18,12 @@ const getAllAppilcation: () => Promise> = () => { /** * 获取分页应用 * page { - "current_page": "string", - "page_size": "string", - } + "current_page": "string", + "page_size": "string", + } * param { - "name": "string", - } + "name": "string", + } */ const getApplication: ( page: pageRequest, @@ -45,8 +46,7 @@ const postApplication: ( /** * 修改应用 - * @param 参数 - + * @param 参数 */ const putApplication: ( application_id: String, @@ -117,49 +117,65 @@ const putAccessToken: ( /** * 应用认证 - * @param 参数 + * @param 参数 { - "access_token": "string" -} + "access_token": "string" + } */ -const postAppAuthentication: (access_token: string, loading?: Ref) => Promise = ( - access_token, - loading -) => { - return post(`${prefix}/authentication`, { access_token }, undefined, loading) +const postAppAuthentication: ( + access_token: string, + loading?: Ref, + authentication_value?: any +) => Promise = (access_token, loading, authentication_value) => { + return post( + `${prefix}/authentication`, + { access_token: access_token, authentication_value }, + undefined, + loading + ) } /** * 对话获取应用相关信息 - * @param 参数 + * @param 参数 { - "access_token": "string" -} + "access_token": "string" + } */ -const getProfile: (loading?: Ref) => Promise = (loading) => { +const getAppProfile: (loading?: Ref) => Promise = (loading) => { return get(`${prefix}/profile`, undefined, loading) } /** * 获得临时回话Id - * @param 参数 + * @param 参数 -} + } */ const postChatOpen: (data: ApplicationFormType) => Promise> = (data) => { return post(`${prefix}/chat/open`, data) } +/** + * 获得工作流临时回话Id + * @param 参数 + + } + */ +const postWorkflowChatOpen: (data: ApplicationFormType) => Promise> = (data) => { + return post(`${prefix}/chat_workflow/open`, data) +} + /** * 正式回话Id - * @param 参数 + * @param 参数 * { - "model_id": "string", - "multiple_rounds_dialogue": true, - "dataset_id_list": [ - "string" - ] -} + "model_id": "string", + "multiple_rounds_dialogue": true, + "dataset_id_list": [ + "string" + ] + } */ const getChatOpen: (application_id: String) => Promise> = (application_id) => { return get(`${prefix}/${application_id}/chat/open`) @@ -176,11 +192,11 @@ const postChatMessage: (chat_id: string, data: any) => Promise = (chat_id, /** * 点赞、点踩 - * @param 参数 + * @param 参数 * application_id : string; chat_id : string; chat_record_id : string * { - "vote_status": "string", // -1 0 1 - } + "vote_status": "string", // -1 0 1 + } */ const putChatVote: ( application_id: string, @@ -228,6 +244,301 @@ const getApplicationModel: ( return get(`${prefix}/${application_id}/model`, loading) } +/** + * 获取当前用户可使用的模型列表 + * @param application_id + * @param loading + * @query { query_text: string, top_number: number, similarity: number } + * @returns + */ +const getApplicationRerankerModel: ( + application_id: string, + loading?: Ref +) => Promise>> = (application_id, loading) => { + return get(`${prefix}/${application_id}/model`, { model_type: 'RERANKER' }, loading) +} + +/** + * 获取当前用户可使用的模型列表 + * @param application_id + * @param loading + * @query { query_text: string, top_number: number, similarity: number } + * @returns + */ +const getApplicationSTTModel: ( + application_id: string, + loading?: Ref +) => Promise>> = (application_id, loading) => { + return get(`${prefix}/${application_id}/model`, { model_type: 'STT' }, loading) +} + +/** + * 获取当前用户可使用的模型列表 + * @param application_id + * @param loading + * @query { query_text: string, top_number: number, similarity: number } + * @returns + */ +const getApplicationTTSModel: ( + application_id: string, + loading?: Ref +) => Promise>> = (application_id, loading) => { + return get(`${prefix}/${application_id}/model`, { model_type: 'TTS' }, loading) +} + +const getApplicationImageModel: ( + application_id: string, + loading?: Ref +) => Promise>> = (application_id, loading) => { + return get(`${prefix}/${application_id}/model`, { model_type: 'IMAGE' }, loading) +} + +const getApplicationTTIModel: ( + application_id: string, + loading?: Ref +) => Promise>> = (application_id, loading) => { + return get(`${prefix}/${application_id}/model`, { model_type: 'TTI' }, loading) +} + +/** + * 发布应用 + * @param 参数 + */ +const putPublishApplication: ( + application_id: String, + data: ApplicationFormType, + loading?: Ref +) => Promise> = (application_id, data, loading) => { + return put(`${prefix}/${application_id}/publish`, data, undefined, loading) +} +/** + * 获取应用所属的函数库列表 + * @param application_id 应用id + * @param loading + * @returns + */ +const listFunctionLib: (application_id: String, loading?: Ref) => Promise> = ( + application_id, + loading +) => { + return get(`${prefix}/${application_id}/function_lib`, undefined, loading) +} +/** + * 获取当前人的所有应用列表 + * @param application_id 应用id + * @param loading + * @returns + */ +export const getApplicationList: ( + application_id: string, + loading?: Ref +) => Promise> = (application_id, loading) => { + return get(`${prefix}/${application_id}/application`, undefined, loading) +} +/** + * 获取应用所属的函数库 + * @param application_id + * @param function_lib_id + * @param loading + * @returns + */ +const getFunctionLib: ( + application_id: String, + function_lib_id: String, + loading?: Ref +) => Promise> = (application_id, function_lib_id, loading) => { + return get(`${prefix}/${application_id}/function_lib/${function_lib_id}`, undefined, loading) +} + +const getMcpTools: ( + data: any, + loading?: Ref +) => Promise> = (data, loading) => { + return get(`${prefix}/mcp_servers`, data, loading) +} + +const getApplicationById: ( + application_id: String, + app_id: String, + loading?: Ref +) => Promise> = (application_id, app_id, loading) => { + return get(`${prefix}/${application_id}/application/${app_id}`, undefined, loading) +} +/** + * 获取模型参数表单 + * @param application_id 应用id + * @param model_id 模型id + * @param loading + * @returns + */ +const getModelParamsForm: ( + application_id: String, + model_id: String, + loading?: Ref +) => Promise>> = (application_id, model_id, loading) => { + return get(`${prefix}/${application_id}/model_params_form/${model_id}`, undefined, loading) +} + +/** + * 上传文档图片附件 + */ +const uploadFile: ( + application_id: String, + chat_id: String, + data: any, + loading?: Ref +) => Promise> = (application_id, chat_id, data, loading) => { + return post(`${prefix}/${application_id}/chat/${chat_id}/upload_file`, data, undefined, loading) +} + +/** + * 语音转文本 + */ +const postSpeechToText: ( + application_id: String, + data: any, + loading?: Ref +) => Promise> = (application_id, data, loading) => { + return post(`${prefix}/${application_id}/speech_to_text`, data, undefined, loading) +} + +/** + * 文本转语音 + */ +const postTextToSpeech: ( + application_id: String, + data: any, + loading?: Ref +) => Promise> = (application_id, data, loading) => { + return download(`${prefix}/${application_id}/text_to_speech`, 'post', data, undefined, loading) +} + +/** + * 播放测试文本 + */ +const playDemoText: ( + application_id: String, + data: any, + loading?: Ref +) => Promise> = (application_id, data, loading) => { + return download(`${prefix}/${application_id}/play_demo_text`, 'post', data, undefined, loading) +} +/** + * 获取平台状态 + */ +const getPlatformStatus: (application_id: string) => Promise> = (application_id) => { + return get(`/platform/${application_id}/status`) +} +/** + * 获取平台配置 + */ +const getPlatformConfig: (application_id: string, type: string) => Promise> = ( + application_id, + type +) => { + return get(`/platform/${application_id}/${type}`) +} +/** + * 更新平台配置 + */ +const updatePlatformConfig: ( + application_id: string, + type: string, + data: any, + loading?: Ref +) => Promise> = (application_id, type, data, loading) => { + return post(`/platform/${application_id}/${type}`, data, undefined, loading) +} +/** + * 更新平台状态 + */ +const updatePlatformStatus: (application_id: string, data: any) => Promise> = ( + application_id, + data +) => { + return post(`/platform/${application_id}/status`, data) +} +/** + * 验证密码 + */ +const validatePassword: ( + application_id: string, + password: string, + loading?: Ref +) => Promise> = (application_id, password, loading) => { + return get(`/application/${application_id}/auth/${password}`, undefined, loading) +} + +/** + * workflow历史版本 + */ +const getWorkFlowVersion: ( + application_id: string, + loading?: Ref +) => Promise> = (application_id, loading) => { + return get(`/application/${application_id}/work_flow_version`, undefined, loading) +} + +/** + * workflow历史版本详情 + */ +const getWorkFlowVersionDetail: ( + application_id: string, + application_version_id: string, + loading?: Ref +) => Promise> = (application_id, application_version_id, loading) => { + return get( + `/application/${application_id}/work_flow_version/${application_version_id}`, + undefined, + loading + ) +} +/** + * 修改workflow历史版本 + */ +const putWorkFlowVersion: ( + application_id: string, + application_version_id: string, + data: any, + loading?: Ref +) => Promise> = (application_id, application_version_id, data, loading) => { + return put( + `/application/${application_id}/work_flow_version/${application_version_id}`, + data, + undefined, + loading + ) +} + +const getUserList: (type: string, loading?: Ref) => Promise> = ( + type, + loading +) => { + return get(`/user/list/${type}`, undefined, loading) +} + +const exportApplication = ( + application_id: string, + application_name: string, + loading?: Ref +) => { + return exportFile( + application_name + '.mk', + `/application/${application_id}/export`, + undefined, + loading + ) +} + +/** + * 导入应用 + */ +const importApplication: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/import`, data, undefined, loading) +} export default { getAllAppilcation, getApplication, @@ -242,8 +553,36 @@ export default { getAccessToken, putAccessToken, postAppAuthentication, - getProfile, + getAppProfile, putChatVote, getApplicationHitTest, - getApplicationModel + getApplicationModel, + putPublishApplication, + postWorkflowChatOpen, + listFunctionLib, + getFunctionLib, + getModelParamsForm, + getApplicationRerankerModel, + getApplicationSTTModel, + getApplicationTTSModel, + getApplicationImageModel, + getApplicationTTIModel, + postSpeechToText, + postTextToSpeech, + getPlatformStatus, + getPlatformConfig, + updatePlatformConfig, + updatePlatformStatus, + validatePassword, + getWorkFlowVersion, + getWorkFlowVersionDetail, + putWorkFlowVersion, + playDemoText, + getUserList, + getApplicationList, + uploadFile, + exportApplication, + importApplication, + getApplicationById, + getMcpTools } diff --git a/ui/src/api/auth-setting.ts b/ui/src/api/auth-setting.ts new file mode 100644 index 00000000000..e1d239ba2e7 --- /dev/null +++ b/ui/src/api/auth-setting.ts @@ -0,0 +1,39 @@ +import {Result} from '@/request/Result' +import {get, post, del, put} from '@/request/index' +import type {pageRequest} from '@/api/type/common' +import {type Ref} from 'vue' + +const prefix = '/auth' +/** + * 获取认证设置 + */ +const getAuthSetting: (auth_type: string, loading?: Ref) => Promise> = (auth_type, loading) => { + return get(`${prefix}/${auth_type}/detail`, undefined, loading) +} + +/** + * 邮箱测试 + */ +const postAuthSetting: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/connection`, data, undefined, loading) +} + +/** + * 修改邮箱设置 + */ +const putAuthSetting: (auth_type: string, data: any, loading?: Ref) => Promise> = ( + auth_type, + data, + loading +) => { + return put(`${prefix}/${auth_type}/info`, data, undefined, loading) +} + +export default { + getAuthSetting, + postAuthSetting, + putAuthSetting +} diff --git a/ui/src/api/dataset.ts b/ui/src/api/dataset.ts index 702731a267b..a5a663b03c7 100644 --- a/ui/src/api/dataset.ts +++ b/ui/src/api/dataset.ts @@ -1,21 +1,22 @@ import { Result } from '@/request/Result' -import { get, post, del, put } from '@/request/index' +import { get, post, del, put, exportExcel, exportFile } from '@/request/index' import type { datasetData } from '@/api/type/dataset' import type { pageRequest } from '@/api/type/common' import type { ApplicationFormType } from '@/api/type/application' import { type Ref } from 'vue' + const prefix = '/dataset' /** * 获取分页知识库 - * @param 参数 + * @param 参数 * page { - "current_page": "string", - "page_size": "string", - } + "current_page": "string", + "page_size": "string", + } * param { - "name": "string", - } + "name": "string", + } */ const getDataset: ( page: pageRequest, @@ -46,28 +47,28 @@ const delDataset: (dataset_id: String, loading?: Ref) => Promise) => Promise> = ( data, @@ -78,13 +79,13 @@ const postDataset: (data: datasetData, loading?: Ref) => Promise) => Promise> = ( data, @@ -92,6 +93,39 @@ const postWebDataset: (data: any, loading?: Ref) => Promise ) => { return post(`${prefix}/web`, data, undefined, loading) } +/** + * 创建Lark知识库 + * @param 参数 + * { + "name": "string", + "desc": "string", + "app_id": "string", + "app_secret": "string", + "folder_token": "string", + } + */ +const postLarkDataset: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/lark/save`, data, undefined, loading) +} + +/** + * 创建QA知识库 + * @param 参数 formData + * { + "file": "file", + "name": "string", + "desc": "string", + } + */ +const postQADataset: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/qa`, data, undefined, loading) +} /** * 知识库详情 @@ -106,18 +140,26 @@ const getDatasetDetail: (dataset_id: string, loading?: Ref) => Promise< /** * 修改知识库信息 - * @param 参数 + * @param 参数 * dataset_id * { - "name": "string", - "desc": true - } + "name": "string", + "desc": true + } */ -const putDataset: (dataset_id: string, data: any) => Promise> = ( - dataset_id, - data: any -) => { - return put(`${prefix}/${dataset_id}`, data) +const putDataset: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return put(`${prefix}/${dataset_id}`, data, undefined, loading) +} +const putLarkDataset: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return put(`${prefix}/lark/${dataset_id}`, data, undefined, loading) } /** * 获取知识库 可关联的应用列表 @@ -160,6 +202,96 @@ const putSyncWebDataset: ( return put(`${prefix}/${dataset_id}/sync_web`, undefined, { sync_type }, loading) } +/** + * 向量化知识库 + * @param 参数 dataset_id + */ +const putReEmbeddingDataset: ( + dataset_id: string, + loading?: Ref +) => Promise> = (dataset_id, loading) => { + return put(`${prefix}/${dataset_id}/re_embedding`, undefined, undefined, loading) +} + +/** + * 导出知识库 + * @param dataset_name 知识库名称 + * @param dataset_id 知识库id + * @returns + */ +const exportDataset: ( + dataset_name: string, + dataset_id: string, + loading?: Ref +) => Promise = (dataset_name, dataset_id, loading) => { + return exportExcel(dataset_name + '.xlsx', `dataset/${dataset_id}/export`, undefined, loading) +} +/** + *导出Zip知识库 + * @param dataset_name 知识库名称 + * @param dataset_id 知识库id + * @param loading 加载器 + * @returns + */ +const exportZipDataset: ( + dataset_name: string, + dataset_id: string, + loading?: Ref +) => Promise = (dataset_name, dataset_id, loading) => { + return exportFile(dataset_name + '.zip', `dataset/${dataset_id}/export_zip`, undefined, loading) +} + +/** + * 获取当前用户可使用的模型列表 + * @param application_id + * @param loading + * @query { query_text: string, top_number: number, similarity: number } + * @returns + */ +const getDatasetModel: ( + dataset_id: string, + loading?: Ref +) => Promise>> = (dataset_id, loading) => { + return get(`${prefix}/${dataset_id}/model`, loading) +} +/** + * 获取飞书文档列表 + * @param dataset_id + * @param folder_token + * @param loading + * @returns + */ +const getLarkDocumentList: ( + dataset_id: string, + folder_token: string, + data: any, + loading?: Ref +) => Promise>> = (dataset_id, folder_token, data, loading) => { + return post(`${prefix}/lark/${dataset_id}/${folder_token}/doc_list`, data, null, loading) +} + +const importLarkDocument: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise>> = (dataset_id, data, loading) => { + return post(`${prefix}/lark/${dataset_id}/import`, data, null, loading) +} +/** + * 生成关联问题 + * @param dataset_id 知识库id + * @param data + * @param loading + * @returns + */ +const generateRelated: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise>> = (dataset_id, data, loading) => { + return put(`${prefix}/${dataset_id}/generate_related`, data, null, loading) +} + export default { getDataset, getAllDataset, @@ -170,5 +302,15 @@ export default { listUsableApplication, getDatasetHitTest, postWebDataset, - putSyncWebDataset + putSyncWebDataset, + putReEmbeddingDataset, + postQADataset, + exportDataset, + getDatasetModel, + exportZipDataset, + postLarkDataset, + getLarkDocumentList, + importLarkDocument, + putLarkDataset, + generateRelated } diff --git a/ui/src/api/document.ts b/ui/src/api/document.ts index 2d2fc1f65f1..ad4792af7f4 100644 --- a/ui/src/api/document.ts +++ b/ui/src/api/document.ts @@ -1,8 +1,9 @@ import { Result } from '@/request/Result' -import { get, post, del, put } from '@/request/index' +import { get, post, del, put, exportExcel, exportFile } from '@/request/index' import type { Ref } from 'vue' import type { KeyValue } from '@/api/type/common' import type { pageRequest } from '@/api/type/common' + const prefix = '/dataset' /** @@ -26,14 +27,14 @@ const listSplitPattern: ( /** * 文档分页列表 - * @param 参数 dataset_id, + * @param 参数 dataset_id, * page { - "current_page": "string", - "page_size": "string", - } -* param { - "name": "string", - } + "current_page": "string", + "page_size": "string", + } + * param { + "name": "string", + } */ const getDocument: ( @@ -58,22 +59,22 @@ const getAllDocument: (dataset_id: string, loading?: Ref) => Promise Promise> = (dataset_id, data, loading) => { return del(`${prefix}/${dataset_id}/document/_bach`, undefined, { id_list: data }, loading) } + +const batchRefresh: ( + dataset_id: string, + data: any, + stateList: Array, + loading?: Ref +) => Promise> = (dataset_id, data, stateList, loading) => { + return put( + `${prefix}/${dataset_id}/document/batch_refresh`, + { id_list: data, state_list: stateList }, + undefined, + loading + ) +} /** * 文档详情 * @param 参数 dataset_id @@ -137,20 +152,42 @@ const getDocumentDetail: (dataset_id: string, document_id: string) => Promise, loading?: Ref -) => Promise> = (dataset_id, document_id, loading) => { +) => Promise> = (dataset_id, document_id, state_list, loading) => { return put( `${prefix}/${dataset_id}/document/${document_id}/refresh`, + { state_list }, + undefined, + loading + ) +} + +/** + * 同步web站点类型 + * @param 参数 + * dataset_id, document_id, + */ +const putDocumentSync: ( + dataset_id: string, + document_id: string, + loading?: Ref +) => Promise> = (dataset_id, document_id, loading) => { + return put(`${prefix}/${dataset_id}/document/${document_id}/sync`, undefined, undefined, loading) +} +const putLarkDocumentSync: ( + dataset_id: string, + document_id: string, + loading?: Ref +) => Promise> = (dataset_id, document_id, loading) => { + return put( + `${prefix}/lark/${dataset_id}/document/${document_id}/sync`, undefined, undefined, loading @@ -168,17 +205,24 @@ const delMulSyncDocument: ( ) => Promise> = (dataset_id, data, loading) => { return put(`${prefix}/${dataset_id}/document/_bach`, { id_list: data }, undefined, loading) } +const delMulLarkSyncDocument: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return put(`${prefix}/lark/${dataset_id}/_batch`, { id_list: data }, undefined, loading) +} /** * 创建Web站点文档 - * @param 参数 + * @param 参数 * { - "source_url_list": [ - "string" - ], - "selector": "string" + "source_url_list": [ + "string" + ], + "selector": "string" + } } -} */ const postWebDocument: ( dataset_id: string, @@ -188,6 +232,33 @@ const postWebDocument: ( return post(`${prefix}/${dataset_id}/document/web`, data, undefined, loading) } +/** + * 导入QA文档 + * @param 参数 + * file + } + */ +const postQADocument: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return post(`${prefix}/${dataset_id}/document/qa`, data, undefined, loading) +} + +/** + * 导入表格 + * @param 参数 + * file + */ +const postTableDocument: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return post(`${prefix}/${dataset_id}/document/table`, data, undefined, loading) +} + /** * 批量迁移文档 * @param 参数 dataset_id,target_dataset_id, @@ -220,6 +291,103 @@ const batchEditHitHandling: ( ) => Promise> = (dataset_id, data, loading) => { return put(`${prefix}/${dataset_id}/document/batch_hit_handling`, data, undefined, loading) } + +/** + * 获得QA模版 + * @param 参数 fileName,type, + */ +const exportQATemplate: (fileName: string, type: string, loading?: Ref) => void = ( + fileName, + type, + loading +) => { + return exportExcel(fileName, `${prefix}/document/template/export`, { type }, loading) +} + +/** + * 获得table模版 + * @param 参数 fileName,type, + */ +const exportTableTemplate: (fileName: string, type: string, loading?: Ref) => void = ( + fileName, + type, + loading +) => { + return exportExcel(fileName, `${prefix}/document/table_template/export`, { type }, loading) +} + +/** + * 导出文档 + * @param document_name 文档名称 + * @param dataset_id 数据集id + * @param document_id 文档id + * @param loading 加载器 + * @returns + */ +const exportDocument: ( + document_name: string, + dataset_id: string, + document_id: string, + loading?: Ref +) => Promise = (document_name, dataset_id, document_id, loading) => { + return exportExcel( + document_name + '.xlsx', + `${prefix}/${dataset_id}/document/${document_id}/export`, + {}, + loading + ) +} +/** + * 导出文档 + * @param document_name 文档名称 + * @param dataset_id 数据集id + * @param document_id 文档id + * @param loading 加载器 + * @returns + */ +const exportDocumentZip: ( + document_name: string, + dataset_id: string, + document_id: string, + loading?: Ref +) => Promise = (document_name, dataset_id, document_id, loading) => { + return exportFile( + document_name + '.zip', + `${prefix}/${dataset_id}/document/${document_id}/export_zip`, + {}, + loading + ) +} +const batchGenerateRelated: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return put(`${prefix}/${dataset_id}/document/batch_generate_related`, data, undefined, loading) +} + +const cancelTask: ( + dataset_id: string, + document_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, document_id, data, loading) => { + return put( + `${prefix}/${dataset_id}/document/${document_id}/cancel_task`, + data, + undefined, + loading + ) +} + +const batchCancelTask: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return put(`${prefix}/${dataset_id}/document/cancel_task/_batch`, data, undefined, loading) +} + export default { postSplitDocument, getDocument, @@ -231,8 +399,21 @@ export default { getDocumentDetail, listSplitPattern, putDocumentRefresh, + putDocumentSync, delMulSyncDocument, postWebDocument, putMigrateMulDocument, - batchEditHitHandling + batchEditHitHandling, + exportQATemplate, + exportTableTemplate, + postQADocument, + postTableDocument, + exportDocument, + batchRefresh, + batchGenerateRelated, + cancelTask, + exportDocumentZip, + batchCancelTask, + putLarkDocumentSync, + delMulLarkSyncDocument } diff --git a/ui/src/api/function-lib.ts b/ui/src/api/function-lib.ts new file mode 100644 index 00000000000..4fcf64cff7b --- /dev/null +++ b/ui/src/api/function-lib.ts @@ -0,0 +1,150 @@ +import { Result } from '@/request/Result' +import { get, post, del, put, exportFile } from '@/request/index' +import type { pageRequest } from '@/api/type/common' +import type { functionLibData } from '@/api/type/function-lib' +import { type Ref } from 'vue' + +const prefix = '/function_lib' + +/** + * 获取函数列表 + * param { + "name": "string", + } + */ +const getAllFunctionLib: (param?: any, loading?: Ref) => Promise> = ( + param, + loading +) => { + return get(`${prefix}`, param || {}, loading) +} + +/** + * 获取分页函数列表 + * page { + "current_page": "string", + "page_size": "string", + } + * param { + "name": "string", + } + */ +const getFunctionLib: ( + page: pageRequest, + param: any, + loading?: Ref +) => Promise> = (page, param, loading) => { + return get(`${prefix}/${page.current_page}/${page.page_size}`, param, loading) +} + +/** + * 创建函数 + * @param 参数 + */ +const postFunctionLib: (data: functionLibData, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}`, data, undefined, loading) +} + +/** + * 修改函数 + * @param 参数 + + */ +const putFunctionLib: ( + function_lib_id: string, + data: functionLibData, + loading?: Ref +) => Promise> = (function_lib_id, data, loading) => { + return put(`${prefix}/${function_lib_id}`, data, undefined, loading) +} + +/** + * 调试函数 + * @param 参数 + + */ +const postFunctionLibDebug: (data: any, loading?: Ref) => Promise> = ( + data: any, + loading +) => { + return post(`${prefix}/debug`, data, undefined, loading) +} + +/** + * 删除函数 + * @param 参数 function_lib_id + */ +const delFunctionLib: ( + function_lib_id: String, + loading?: Ref +) => Promise> = (function_lib_id, loading) => { + return del(`${prefix}/${function_lib_id}`, undefined, {}, loading) +} +/** + * 获取函数详情 + * @param function_lib_id 函数id + * @param loading 加载器 + * @returns 函数详情 + */ +const getFunctionLibById: ( + function_lib_id: String, + loading?: Ref +) => Promise> = (function_lib_id, loading) => { + return get(`${prefix}/${function_lib_id}`, undefined, loading) +} +const pylint: (code: string, loading?: Ref) => Promise> = (code, loading) => { + return post(`${prefix}/pylint`, { code }, {}, loading) +} + +const exportFunctionLib = ( + id: string, + name: string, + loading?: Ref +) => { + return exportFile( + name + '.fx', + `${prefix}/${id}/export`, + undefined, + loading + ) +} + +const putFunctionLibIcon: ( + id: string, + data: any, + loading?: Ref +) => Promise> = (id, data, loading) => { + return put(`${prefix}/${id}/edit_icon`, data, undefined, loading) +} + +const addInternalFunction: ( + id: string, + data: any, + loading?: Ref +) => Promise> = (id, data, loading) => { + return post(`${prefix}/${id}/add_internal_fun`, data, undefined, loading) +} + +const importFunctionLib: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/import`, data, undefined, loading) +} +export default { + getFunctionLib, + postFunctionLib, + putFunctionLib, + postFunctionLibDebug, + getAllFunctionLib, + delFunctionLib, + getFunctionLibById, + exportFunctionLib, + importFunctionLib, + pylint, + putFunctionLibIcon, + addInternalFunction +} diff --git a/ui/src/api/license.ts b/ui/src/api/license.ts new file mode 100644 index 00000000000..16e5acdf6aa --- /dev/null +++ b/ui/src/api/license.ts @@ -0,0 +1,24 @@ +import { Result } from '@/request/Result' +import { get, post, del, put } from '@/request/index' +import { type Ref } from 'vue' + +const prefix = '/license' + +/** + * 获得license信息 + */ +const getLicense: (loading?: Ref) => Promise> = (loading) => { + return get(`${prefix}/profile`, undefined, loading) +} +/** + * 更新license信息 + * @param 参数 license_file:file + */ +const putLicense: (data: any, loading?: Ref) => Promise> = (data, loading) => { + return put(`${prefix}/profile`, data, undefined, loading) +} + +export default { + getLicense, + putLicense +} diff --git a/ui/src/api/log.ts b/ui/src/api/log.ts index 1904e96b0a1..edcd4d93e33 100644 --- a/ui/src/api/log.ts +++ b/ui/src/api/log.ts @@ -1,5 +1,5 @@ import { Result } from '@/request/Result' -import { get, post, del, put, exportExcel } from '@/request/index' +import { get, del, put, exportExcel, exportExcelPost, post } from '@/request/index' import type { pageRequest } from '@/api/type/common' import { type Ref } from 'vue' @@ -34,9 +34,16 @@ const exportChatLog: ( application_id: string, application_name: string, param: any, + data: any, loading?: Ref -) => void = (application_id, application_name, param, loading) => { - exportExcel(application_name, `${prefix}/${application_id}/chat/export`, param, loading) +) => void = (application_id, application_name, param, data, loading) => { + exportExcelPost( + application_name + '.xlsx', + `${prefix}/${application_id}/chat/export`, + param, + data, + loading + ) } /** @@ -64,11 +71,12 @@ const getChatRecordLog: ( application_id: String, chart_id: String, page: pageRequest, - loading?: Ref -) => Promise> = (application_id, chart_id, page, loading) => { + loading?: Ref, + order_asc?: boolean +) => Promise> = (application_id, chart_id, page, loading, order_asc) => { return get( `${prefix}/${application_id}/chat/${chart_id}/chat_record/${page.current_page}/${page.page_size}`, - undefined, + { order_asc: order_asc !== undefined ? order_asc : true }, loading ) } @@ -106,7 +114,22 @@ const putChatRecordLog: ( loading ) } +/** + * 对话记录提交至知识库 + * @param data + * @param loading + * @param application_id + * @param dataset_id + */ +const postChatRecordLog: ( + application_id: string, + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (application_id, dataset_id, data, loading) => { + return post(`${prefix}/${application_id}/dataset/${dataset_id}/improve`, data, undefined, loading) +} /** * 获取标注段落列表信息 * @param 参数 @@ -173,6 +196,53 @@ const getRecordDetail: ( ) } +const getChatLogClient: ( + application_id: String, + page: pageRequest, + loading?: Ref +) => Promise> = (application_id, page, loading) => { + return get( + `${prefix}/${application_id}/chat/client/${page.current_page}/${page.page_size}`, + null, + loading + ) +} + +/** + * 客户端删除日志 + * @param 参数 application_id, chat_id, + */ +const delChatClientLog: ( + application_id: string, + chat_id: string, + loading?: Ref +) => Promise> = (application_id, chat_id, loading) => { + return del(`${prefix}/${application_id}/chat/client/${chat_id}`, undefined, {}, loading) +} + +/** + * 修改历史日志abstract + * @param 参数 + * application_id, chat_id, + * data { + "abstract": "string", + } + */ + +const putChatClientLog: ( + application_id: string, + chat_id: string, + data: any, + loading?: Ref +) => Promise> = (application_id, chat_id, data, loading) => { + return put( + `${prefix}/${application_id}/chat/client/${chat_id}`, + data, + undefined, + loading + ) +} + export default { getChatLog, delChatLog, @@ -181,5 +251,9 @@ export default { getMarkRecord, getRecordDetail, delMarkRecord, - exportChatLog + exportChatLog, + getChatLogClient, + delChatClientLog, + postChatRecordLog, + putChatClientLog } diff --git a/ui/src/api/model.ts b/ui/src/api/model.ts index bb98984f83d..5129dd05572 100644 --- a/ui/src/api/model.ts +++ b/ui/src/api/model.ts @@ -34,6 +34,13 @@ const getProvider: (loading?: Ref) => Promise>> return get(`${prefix_provider}`, {}, loading) } +/** + * 获得供应商列表 + */ +const getProviderByModelType: (model_type: string, loading?: Ref) => Promise>> = (model_type, loading) => { + return get(`${prefix_provider}`, {model_type}, loading) +} + /** * 获取模型创建表单 * @param provider @@ -51,6 +58,18 @@ const getModelCreateForm: ( return get(`${prefix_provider}/model_form`, { provider, model_type, model_name }, loading) } +/** + * 获取模型参数表单 + * @param model_id 模型id + * @param loading + * @returns + */ +const getModelParamsForm: ( + model_id: string, + loading?: Ref +) => Promise>> = (model_id, loading) => { + return get(`model/${model_id}/model_params_form`, {}, loading) +} /** * 获取模型类型列表 * @param provider 供应商 @@ -79,6 +98,15 @@ const listBaseModel: ( return get(`${prefix_provider}/model_list`, { provider, model_type }, loading) } +const listBaseModelParamsForm: ( + provider: string, + model_type: string, + model_name: string, + loading?: Ref +) => Promise>> = (provider, model_type, model_name, loading) => { + return get(`${prefix_provider}/model_params_form`, { provider, model_type, model_name}, loading) +} + /** * 创建模型 * @param request 请求对象 @@ -106,6 +134,20 @@ const updateModel: ( return put(`${prefix}/${model_id}`, request, {}, loading) } +/** + * 修改模型参数配置 + * @param request 請求對象 + * @param loading 加載器 + * @returns + */ +const updateModelParamsForm: ( + model_id: string, + request: any[], + loading?: Ref +) => Promise> = (model_id, request, loading) => { + return put(`${prefix}/${model_id}/model_params_form`, request, {}, loading) +} + /** * 获取模型详情根据模型id 包括认证信息 * @param model_id 模型id @@ -130,7 +172,18 @@ const getModelMetaById: (model_id: string, loading?: Ref) => Promise { return get(`${prefix}/${model_id}/meta`, {}, loading) } - +/** + * 暂停下载 + * @param model_id 模型id + * @param loading 加载器 + * @returns + */ +const pauseDownload: (model_id: string, loading?: Ref) => Promise> = ( + model_id, + loading +) => { + return put(`${prefix}/${model_id}/pause_download`, undefined, {}, loading) +} const deleteModel: (model_id: string, loading?: Ref) => Promise> = ( model_id, loading @@ -143,9 +196,14 @@ export default { getModelCreateForm, listModelType, listBaseModel, + listBaseModelParamsForm, createModel, updateModel, deleteModel, getModelById, - getModelMetaById + getModelMetaById, + pauseDownload, + getModelParamsForm, + updateModelParamsForm, + getProviderByModelType } diff --git a/ui/src/api/operate-log.ts b/ui/src/api/operate-log.ts new file mode 100644 index 00000000000..1dfc2d4ae56 --- /dev/null +++ b/ui/src/api/operate-log.ts @@ -0,0 +1,46 @@ +import { Result } from '@/request/Result' +import { get, exportExcelPost } from '@/request/index' +import type { pageRequest } from '@/api/type/common' +import { type Ref } from 'vue' + +const prefix = '/operate_log' +/** + * 日志分页列表 + * @param 参数 + * page { + "current_page": "string", + "page_size": "string", + } + * @query 参数 + param: any + */ +const getOperateLog: ( + page: pageRequest, + param: any, + loading?: Ref +) => Promise> = (page, param, loading) => { + return get(`${prefix}/${page.current_page}/${page.page_size}`, param, loading) +} + +const getMenuList: () => Promise> = () => { + return get(`${prefix}/menu_operate_option/`, undefined, undefined) +} + +const exportOperateLog: ( + param: any, + loading?: Ref +) => void = (param, loading) => { + exportExcelPost( + 'log.xlsx', + `${prefix}/export/`, + param, + undefined, + loading + ) +} + +export default { + getOperateLog, + getMenuList, + exportOperateLog +} diff --git a/ui/src/api/paragraph.ts b/ui/src/api/paragraph.ts index 675fa6efab9..4a7d29b8aff 100644 --- a/ui/src/api/paragraph.ts +++ b/ui/src/api/paragraph.ts @@ -226,6 +226,21 @@ const disassociationProblem: ( ) } +const batchGenerateRelated: ( + dataset_id: string, + document_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, document_id, data, loading) => { + return put( + `${prefix}/${dataset_id}/document/${document_id}/paragraph/batch_generate_related`, + data, + undefined, + loading + ) +} + + export default { getParagraph, delParagraph, @@ -236,5 +251,6 @@ export default { disassociationProblem, associationProblem, delMulParagraph, - putMigrateMulParagraph + putMigrateMulParagraph, + batchGenerateRelated } diff --git a/ui/src/api/platform-source.ts b/ui/src/api/platform-source.ts new file mode 100644 index 00000000000..defcc840fec --- /dev/null +++ b/ui/src/api/platform-source.ts @@ -0,0 +1,28 @@ +import { Result } from '@/request/Result' +import { get, post, del, put } from '@/request/index' +import type { pageRequest } from '@/api/type/common' +import { type Ref } from 'vue' + +const prefix = '/platform' +const getPlatformInfo: (loading?: Ref) => Promise> = (loading) => { + return get(`${prefix}/source`, undefined, loading) +} + +const updateConfig: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/source`, data, undefined, loading) +} + +const validateConnection: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return put(`${prefix}/source`, data, undefined, loading) +} +export default { + getPlatformInfo, + updateConfig, + validateConnection +} diff --git a/ui/src/api/problem.ts b/ui/src/api/problem.ts index 7d8d16226a2..4625d6de63a 100644 --- a/ui/src/api/problem.ts +++ b/ui/src/api/problem.ts @@ -97,11 +97,28 @@ const getDetailProblems: ( return get(`${prefix}/${dataset_id}/problem/${problem_id}/paragraph`, undefined, loading) } +/** + * 批量关联段落 + * @param 参数 dataset_id, + * { + "problem_id_list": "Array", + "paragraph_list": "Array", + } + */ +const postMulAssociationProblem: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return post(`${prefix}/${dataset_id}/problem/_batch`, data, undefined, loading) +} + export default { getProblems, postProblems, delProblems, putProblems, getDetailProblems, - delMulProblem + delMulProblem, + postMulAssociationProblem } diff --git a/ui/src/api/system-api-key.ts b/ui/src/api/system-api-key.ts new file mode 100644 index 00000000000..9d66bc7cb34 --- /dev/null +++ b/ui/src/api/system-api-key.ts @@ -0,0 +1,58 @@ +import {Result} from '@/request/Result' +import {get, post, del, put} from '@/request/index' + +import {type Ref} from 'vue' + +const prefix = '/system/api_key' + +/** + * API_KEY列表 + */ +const getAPIKey: (loading?: Ref) => Promise> = () => { + return get(`${prefix}/`) +} + +/** + * 新增API_KEY + */ +const postAPIKey: (loading?: Ref) => Promise> = ( + loading +) => { + return post(`${prefix}/`, {}, undefined, loading) +} + +/** + * 删除API_KEY + * @param 参数 application_id api_key_id + */ +const delAPIKey: ( + api_key_id: String, + loading?: Ref +) => Promise> = (api_key_id, loading) => { + return del(`${prefix}/${api_key_id}/`, undefined, undefined, loading) +} + +/** + * 修改API_KEY + * data { + * is_active: boolean + * } + * @param api_key_id + * @param data + * @param loading + */ +const putAPIKey: ( + api_key_id: String, + data: any, + loading?: Ref +) => Promise> = (api_key_id, data, loading) => { + return put(`${prefix}/${api_key_id}/`, data, undefined, loading) +} + + +export default { + getAPIKey, + postAPIKey, + delAPIKey, + putAPIKey +} diff --git a/ui/src/api/team.ts b/ui/src/api/team.ts index 82e8f986e46..462534b0eba 100644 --- a/ui/src/api/team.ts +++ b/ui/src/api/team.ts @@ -36,7 +36,7 @@ const getMemberPermissions: (member_id: String) => Promise> = (membe } /** - * 获取成员权限 + * 修改成员权限 * @param 参数 member_id * @param 参数 { "team_member_permission_list": [ diff --git a/ui/src/api/theme.ts b/ui/src/api/theme.ts new file mode 100644 index 00000000000..6e696e5fd4c --- /dev/null +++ b/ui/src/api/theme.ts @@ -0,0 +1,35 @@ +import { Result } from '@/request/Result' +import { get, post, del, put } from '@/request/index' +import type { Ref } from 'vue' +const prefix = '/display' + +/** + * 查看外观设置 + */ +const getThemeInfo: (loading?: Ref) => Promise> = (loading) => { + return get(`${prefix}/info`, undefined, loading) +} + +/** + * 更新外观设置 + * @param 参数 + * * formData { + * theme + * icon + * loginLogo + * loginImage + * title + * slogan + * } + */ +const postThemeInfo: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post(`${prefix}/update`, data, undefined, loading) +} + +export default { + getThemeInfo, + postThemeInfo +} diff --git a/ui/src/api/type/application.ts b/ui/src/api/type/application.ts index 6da9dd84ae2..c423f11105a 100644 --- a/ui/src/api/type/application.ts +++ b/ui/src/api/type/application.ts @@ -1,22 +1,60 @@ import { type Dict } from '@/api/type/common' import { type Ref } from 'vue' +import bus from '@/bus' interface ApplicationFormType { name?: string desc?: string model_id?: string - multiple_rounds_dialogue?: boolean + dialogue_number?: number prologue?: string dataset_id_list?: string[] dataset_setting?: any model_setting?: any problem_optimization?: boolean + problem_optimization_prompt?: string icon?: string | undefined + type?: string + work_flow?: any + model_params_setting?: any + tts_model_params_setting?: any + stt_model_id?: string + tts_model_id?: string + stt_model_enable?: boolean + tts_model_enable?: boolean + tts_type?: string + tts_autoplay?: boolean + stt_autosend?: boolean +} +interface Chunk { + real_node_id: string + chat_id: string + chat_record_id: string + content: string + reasoning_content: string + node_id: string + up_node_id: string + is_end: boolean + node_is_end: boolean + node_type: string + view_type: string + runtime_node_id: string + child_node: any } interface chatType { id: string problem_text: string answer_text: string buffer: Array + answer_text_list: Array< + Array<{ + content: string + reasoning_content: string + chat_record_id?: string + runtime_node_id?: string + child_node?: any + real_node_id?: string + }> + > /** * 是否写入结束 */ @@ -26,9 +64,34 @@ interface chatType { */ is_stop?: boolean record_id: string + chat_id: string vote_status: string + status?: number + execution_details: any[] + upload_meta?: { + document_list: Array + image_list: Array + audio_list: Array + other_list: Array + } } +interface Node { + buffer: Array + node_id: string + up_node_id: string + node_type: string + view_type: string + index: number + is_end: boolean +} +interface WriteNodeInfo { + current_node: any + answer_text_list_index: number + current_up_node?: any + divider_content?: Array + divider_reasoning_content?: Array +} export class ChatRecordManage { id?: any ms: number @@ -37,6 +100,8 @@ export class ChatRecordManage { write_ed?: boolean is_stop?: boolean loading?: Ref + node_list: Array + write_node_info?: WriteNodeInfo constructor(chat: chatType, ms?: number, loading?: Ref) { this.ms = ms ? ms : 10 this.chat = chat @@ -44,31 +109,237 @@ export class ChatRecordManage { this.is_stop = false this.is_close = false this.write_ed = false + this.node_list = [] + } + append_answer( + chunk_answer: string, + reasoning_content: string, + index?: number, + chat_record_id?: string, + runtime_node_id?: string, + child_node?: any, + real_node_id?: string + ) { + if (chunk_answer || reasoning_content) { + const set_index = index != undefined ? index : this.chat.answer_text_list.length - 1 + let card_list = this.chat.answer_text_list[set_index] + if (!card_list) { + card_list = [] + this.chat.answer_text_list[set_index] = card_list + } + const answer_value = card_list.find((item) => item.real_node_id == real_node_id) + const content = answer_value ? answer_value.content + chunk_answer : chunk_answer + const _reasoning_content = answer_value + ? answer_value.reasoning_content + reasoning_content + : reasoning_content + if (answer_value) { + answer_value.content = content + answer_value.reasoning_content = _reasoning_content + } else { + card_list.push({ + content: content, + reasoning_content: _reasoning_content, + chat_record_id, + runtime_node_id, + child_node, + real_node_id + }) + } + } + this.chat.answer_text = this.chat.answer_text + chunk_answer + bus.emit('change:answer', { record_id: this.chat.record_id, is_end: false }) + } + get_current_up_node(run_node: any) { + const index = this.node_list.findIndex((item) => item == run_node) + if (index > 0) { + const n = this.node_list[index - 1] + return n + } + return undefined + } + get_run_node() { + if ( + this.write_node_info && + (this.write_node_info.current_node.reasoning_content_buffer.length > 0 || + this.write_node_info.current_node.buffer.length > 0 || + !this.write_node_info.current_node.is_end) + ) { + return this.write_node_info + } + const run_node = this.node_list.filter( + (item) => item.reasoning_content_buffer.length > 0 || item.buffer.length > 0 || !item.is_end + )[0] + + if (run_node) { + const index = this.node_list.indexOf(run_node) + let current_up_node = undefined + if (index > 0) { + current_up_node = this.get_current_up_node(run_node) + } + let answer_text_list_index = 0 + if ( + current_up_node == undefined || + run_node.view_type == 'single_view' || + current_up_node.view_type == 'single_view' + ) { + const none_index = this.findIndex( + this.chat.answer_text_list, + (item) => (item.length == 1 && item[0].content == '') || item.length == 0, + 'index' + ) + if (none_index > -1) { + answer_text_list_index = none_index + } else { + answer_text_list_index = this.chat.answer_text_list.length + } + } else { + const none_index = this.findIndex( + this.chat.answer_text_list, + (item) => (item.length == 1 && item[0].content == '') || item.length == 0, + 'index' + ) + if (none_index > -1) { + answer_text_list_index = none_index + } else { + answer_text_list_index = this.chat.answer_text_list.length - 1 + } + } + + this.write_node_info = { + current_node: run_node, + current_up_node: current_up_node, + answer_text_list_index: answer_text_list_index + } + + return this.write_node_info + } + return undefined + } + findIndex(array: Array, find: (item: T) => boolean, type: 'last' | 'index') { + let set_index = -1 + for (let index = 0; index < array.length; index++) { + const element = array[index] + if (find(element)) { + set_index = index + if (type == 'index') { + break + } + } + } + return set_index + } + closeInterval() { + this.chat.write_ed = true + this.write_ed = true + if (this.loading) { + this.loading.value = false + } + bus.emit('change:answer', { record_id: this.chat.record_id, is_end: true }) + if (this.id) { + clearInterval(this.id) + } + const last_index = this.findIndex( + this.chat.answer_text_list, + (item) => (item.length == 1 && item[0].content == '') || item.length == 0, + 'last' + ) + if (last_index > 0) { + this.chat.answer_text_list.splice(last_index, 1) + } } write() { this.chat.is_stop = false this.is_stop = false + if (!this.is_close) { + this.is_close = false + } + + this.write_ed = false + this.chat.write_ed = false if (this.loading) { this.loading.value = true } this.id = setInterval(() => { - if (this.chat.buffer.length > 20) { - this.chat.answer_text = - this.chat.answer_text + this.chat.buffer.splice(0, this.chat.buffer.length - 20).join('') - } else if (this.is_close) { - this.chat.answer_text = this.chat.answer_text + this.chat.buffer.splice(0).join('') - this.chat.write_ed = true - this.write_ed = true - if (this.loading) { - this.loading.value = false + const node_info = this.get_run_node() + if (node_info == undefined) { + if (this.is_close) { + this.closeInterval() } - if (this.id) { - clearInterval(this.id) + return + } + const { current_node, answer_text_list_index } = node_info + + if (current_node.buffer.length > 20) { + const context = current_node.is_end + ? current_node.buffer.splice(0) + : current_node.buffer.splice( + 0, + current_node.is_end ? undefined : current_node.buffer.length - 20 + ) + const reasoning_content = current_node.is_end + ? current_node.reasoning_content_buffer.splice(0) + : current_node.reasoning_content_buffer.splice( + 0, + current_node.is_end ? undefined : current_node.reasoning_content_buffer.length - 20 + ) + this.append_answer( + context.join(''), + reasoning_content.join(''), + answer_text_list_index, + current_node.chat_record_id, + current_node.runtime_node_id, + current_node.child_node, + current_node.real_node_id + ) + } else if (this.is_close) { + while (true) { + const node_info = this.get_run_node() + + if (node_info == undefined) { + break + } + this.append_answer( + node_info.current_node.buffer.splice(0).join(''), + node_info.current_node.reasoning_content_buffer.splice(0).join(''), + node_info.answer_text_list_index, + node_info.current_node.chat_record_id, + node_info.current_node.runtime_node_id, + node_info.current_node.child_node, + node_info.current_node.real_node_id + ) + + if ( + node_info.current_node.buffer.length == 0 && + node_info.current_node.reasoning_content_buffer.length == 0 + ) { + node_info.current_node.is_end = true + } } + this.closeInterval() } else { - const s = this.chat.buffer.shift() + const s = current_node.buffer.shift() + const reasoning_content = current_node.reasoning_content_buffer.shift() if (s !== undefined) { - this.chat.answer_text = this.chat.answer_text + s + this.append_answer( + s, + '', + answer_text_list_index, + current_node.chat_record_id, + current_node.runtime_node_id, + current_node.child_node, + current_node.real_node_id + ) + } + if (reasoning_content !== undefined) { + this.append_answer( + '', + reasoning_content, + answer_text_list_index, + current_node.chat_record_id, + current_node.runtime_node_id, + current_node.child_node, + current_node.real_node_id + ) } } }, this.ms) @@ -84,10 +355,57 @@ export class ChatRecordManage { close() { this.is_close = true } - append(answer_text_block: string) { - for (let index = 0; index < answer_text_block.length; index++) { - this.chat.buffer.push(answer_text_block[index]) + open() { + this.is_close = false + this.is_stop = false + } + appendChunk(chunk: Chunk) { + let n = this.node_list.find((item) => item.real_node_id == chunk.real_node_id) + if (n) { + n.buffer.push(...chunk.content) + n.content += chunk.content + if (chunk.reasoning_content) { + n.reasoning_content_buffer.push(...chunk.reasoning_content) + n.reasoning_content += chunk.reasoning_content + } + } else { + n = { + buffer: [...chunk.content], + reasoning_content_buffer: chunk.reasoning_content ? [...chunk.reasoning_content] : [], + reasoning_content: chunk.reasoning_content ? chunk.reasoning_content : '', + content: chunk.content, + real_node_id: chunk.real_node_id, + node_id: chunk.node_id, + chat_record_id: chunk.chat_record_id, + up_node_id: chunk.up_node_id, + runtime_node_id: chunk.runtime_node_id, + child_node: chunk.child_node, + node_type: chunk.node_type, + index: this.node_list.length, + view_type: chunk.view_type, + is_end: false + } + this.node_list.push(n) + } + if (chunk.node_is_end) { + n['is_end'] = true + } + } + append(answer_text_block: string, reasoning_content?: string) { + let set_index = this.findIndex( + this.chat.answer_text_list, + (item) => item.length == 1 && item[0].content == '', + 'index' + ) + if (set_index <= -1) { + set_index = 0 } + this.chat.answer_text_list[set_index] = [ + { + content: answer_text_block, + reasoning_content: reasoning_content ? reasoning_content : '' + } + ] } } @@ -97,10 +415,22 @@ export class ChatManagement { static addChatRecord(chat: chatType, ms: number, loading?: Ref) { this.chatMessageContainer[chat.id] = new ChatRecordManage(chat, ms, loading) } - static append(chatRecordId: string, content: string) { + static appendChunk(chatRecordId: string, chunk: Chunk) { + const chatRecord = this.chatMessageContainer[chatRecordId] + if (chatRecord) { + chatRecord.appendChunk(chunk) + } + } + static append(chatRecordId: string, content: string, reasoning_content?: string) { const chatRecord = this.chatMessageContainer[chatRecordId] if (chatRecord) { - chatRecord.append(content) + chatRecord.append(content, reasoning_content) + } + } + static updateStatus(chatRecordId: string, code: number) { + const chatRecord = this.chatMessageContainer[chatRecordId] + if (chatRecord) { + chatRecord.chat.status = code } } /** @@ -113,6 +443,12 @@ export class ChatManagement { chatRecord.write() } } + static open(chatRecordId: string) { + const chatRecord = this.chatMessageContainer[chatRecordId] + if (chatRecord) { + chatRecord.open() + } + } /** * 等待所有数据输出完毕后 才会关闭流 * @param chatRecordId 对话记录id diff --git a/ui/src/api/type/dataset.ts b/ui/src/api/type/dataset.ts index 6ec73c323c9..a30c5c98e60 100644 --- a/ui/src/api/type/dataset.ts +++ b/ui/src/api/type/dataset.ts @@ -3,6 +3,7 @@ interface datasetData { desc: String documents?: Array type?: String + embedding_mode_id?: String } export type { datasetData } diff --git a/ui/src/api/type/function-lib.ts b/ui/src/api/type/function-lib.ts new file mode 100644 index 00000000000..0f51764e09d --- /dev/null +++ b/ui/src/api/type/function-lib.ts @@ -0,0 +1,13 @@ +interface functionLibData { + id?: String + name?: String + icon?: String + desc?: String + code?: String + permission_type?: 'PRIVATE' | 'PUBLIC' + input_field_list?: Array + init_field_list?: Array + is_active?: Boolean +} + +export type { functionLibData } diff --git a/ui/src/api/type/model.ts b/ui/src/api/type/model.ts index e07b3615921..667292055bd 100644 --- a/ui/src/api/type/model.ts +++ b/ui/src/api/type/model.ts @@ -53,6 +53,9 @@ interface Model { * 模型类型 */ model_type: string + user_id: string + username: string + permission_type: 'PUBLIC' | 'PRIVATE' /** * 基础模型 */ @@ -68,11 +71,15 @@ interface Model { /** * 状态 */ - status: 'SUCCESS' | 'DOWNLOAD' | 'ERROR' + status: 'SUCCESS' | 'DOWNLOAD' | 'ERROR' | 'PAUSE_DOWNLOAD' /** * 元数据 */ meta: Dict + /** + * 模型参数配置 + */ + model_params_form: Dict[] } interface CreateModelRequest { /** diff --git a/ui/src/api/type/user.ts b/ui/src/api/type/user.ts index 6724252c959..ef22b55d982 100644 --- a/ui/src/api/type/user.ts +++ b/ui/src/api/type/user.ts @@ -23,6 +23,9 @@ interface User { * 是否需要修改密码 */ is_edit_password?: boolean + IS_XPACK?: boolean + XPACK_LICENSE_IS_VALID?: boolean + language: string } interface LoginRequest { @@ -34,6 +37,10 @@ interface LoginRequest { * 密码 */ password: string + /** + * 验证码 + */ + captcha: string } interface RegisterRequest { diff --git a/ui/src/api/user.ts b/ui/src/api/user.ts index e11d70168cc..d14a65e2f07 100644 --- a/ui/src/api/user.ts +++ b/ui/src/api/user.ts @@ -12,16 +12,28 @@ import type { Ref } from 'vue' /** * 登录 + * @param auth_type * @param request 登录接口请求表单 * @param loading 接口加载器 * @returns 认证数据 */ -const login: (request: LoginRequest, loading?: Ref) => Promise> = ( - request, - loading -) => { +const login: ( + auth_type: string, + request: LoginRequest, + loading?: Ref +) => Promise> = (auth_type, request, loading) => { + if (auth_type !== '') { + return post(`/${auth_type}/login`, request, undefined, loading) + } return post('/user/login', request, undefined, loading) } +/** + * 获取图形验证码 + * @returns + */ +const getCaptcha: () => Promise> = () => { + return get('user/captcha') +} /** * 登出 * @param loading 接口加载器 @@ -125,12 +137,82 @@ const getUserList: (email_or_username: string, loading?: Ref) => Promis } /** - * 获取version + * 获取profile */ -const getVersion: (loading?: Ref) => Promise> = (loading) => { +const getProfile: (loading?: Ref) => Promise> = (loading) => { return get('/profile', undefined, loading) } +/** + * 获取校验 + * @param valid_type 校验类型: application|dataset|user + * @param valid_count 校验数量: 5 | 50 | 2 + */ +const getValid: ( + valid_type: string, + valid_count: number, + loading?: Ref +) => Promise> = (valid_type, valid_count, loading) => { + return get(`/valid/${valid_type}/${valid_count}`, undefined, loading) +} +/** + * 获取登录方式 + */ +const getAuthType: (loading?: Ref) => Promise> = (loading) => { + return get('auth/types', undefined, loading) +} + +/** + * 获取二维码类型 + */ +const getQrType: (loading?: Ref) => Promise> = (loading) => { + return get('qr_type', undefined, loading) +} + +const getQrSource: (loading?: Ref) => Promise> = (loading) => { + return get('qr_type/source', undefined, loading) +} + +const getDingCallback: (code: string, loading?: Ref) => Promise> = ( + code, + loading +) => { + return get('dingtalk', { code }, loading) +} + +const getDingOauth2Callback: (code: string, loading?: Ref) => Promise> = ( + code, + loading +) => { + return get('dingtalk/oauth2', { code }, loading) +} + +const getWecomCallback: (code: string, loading?: Ref) => Promise> = ( + code, + loading +) => { + return get('wecom', { code }, loading) +} +const getlarkCallback: (code: string, loading?: Ref) => Promise> = ( + code, + loading +) => { + return get('feishu/oauth2', { code }, loading) +} + +/** + * 设置语言 + * data: { + * "language": "string" + * } + */ +const postLanguage: (data: any, loading?: Ref) => Promise> = ( + data, + loading +) => { + return post('/user/language', data, undefined, loading) +} + export default { login, register, @@ -142,5 +224,15 @@ export default { resetCurrentUserPassword, logout, getUserList, - getVersion + getProfile, + getValid, + getAuthType, + getDingCallback, + getQrType, + getWecomCallback, + postLanguage, + getDingOauth2Callback, + getlarkCallback, + getQrSource, + getCaptcha } diff --git a/ui/src/assets/acoustic-color.svg b/ui/src/assets/acoustic-color.svg new file mode 100644 index 00000000000..d9cfa1498c6 --- /dev/null +++ b/ui/src/assets/acoustic-color.svg @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ui/src/assets/acoustic.svg b/ui/src/assets/acoustic.svg new file mode 100644 index 00000000000..a400eff9be4 --- /dev/null +++ b/ui/src/assets/acoustic.svg @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ui/src/assets/display-bg1.png b/ui/src/assets/display-bg1.png new file mode 100644 index 00000000000..dbf63be2b69 Binary files /dev/null and b/ui/src/assets/display-bg1.png differ diff --git a/ui/src/assets/display-bg2.png b/ui/src/assets/display-bg2.png new file mode 100644 index 00000000000..606a5d918c5 Binary files /dev/null and b/ui/src/assets/display-bg2.png differ diff --git a/ui/src/assets/display-bg3.png b/ui/src/assets/display-bg3.png new file mode 100644 index 00000000000..52d0f92599e Binary files /dev/null and b/ui/src/assets/display-bg3.png differ diff --git a/ui/src/assets/csv-icon.svg b/ui/src/assets/fileType/csv-icon.svg similarity index 100% rename from ui/src/assets/csv-icon.svg rename to ui/src/assets/fileType/csv-icon.svg diff --git a/ui/src/assets/doc-icon.svg b/ui/src/assets/fileType/doc-icon.svg similarity index 100% rename from ui/src/assets/doc-icon.svg rename to ui/src/assets/fileType/doc-icon.svg diff --git a/ui/src/assets/docx-icon.svg b/ui/src/assets/fileType/docx-icon.svg similarity index 100% rename from ui/src/assets/docx-icon.svg rename to ui/src/assets/fileType/docx-icon.svg diff --git a/ui/src/assets/fileType/file-icon.svg b/ui/src/assets/fileType/file-icon.svg new file mode 100644 index 00000000000..59b1958879c --- /dev/null +++ b/ui/src/assets/fileType/file-icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/assets/fileType/html-icon.svg b/ui/src/assets/fileType/html-icon.svg new file mode 100644 index 00000000000..b59a48826e4 --- /dev/null +++ b/ui/src/assets/fileType/html-icon.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/src/assets/md-icon.svg b/ui/src/assets/fileType/md-icon.svg similarity index 100% rename from ui/src/assets/md-icon.svg rename to ui/src/assets/fileType/md-icon.svg diff --git a/ui/src/assets/pdf-icon.svg b/ui/src/assets/fileType/pdf-icon.svg similarity index 100% rename from ui/src/assets/pdf-icon.svg rename to ui/src/assets/fileType/pdf-icon.svg diff --git a/ui/src/assets/txt-icon.svg b/ui/src/assets/fileType/txt-icon.svg similarity index 100% rename from ui/src/assets/txt-icon.svg rename to ui/src/assets/fileType/txt-icon.svg diff --git a/ui/src/assets/unknow-icon.svg b/ui/src/assets/fileType/unknown-icon.svg similarity index 100% rename from ui/src/assets/unknow-icon.svg rename to ui/src/assets/fileType/unknown-icon.svg diff --git a/ui/src/assets/fileType/xls-icon.svg b/ui/src/assets/fileType/xls-icon.svg new file mode 100644 index 00000000000..22cb869537f --- /dev/null +++ b/ui/src/assets/fileType/xls-icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/fileType/xlsx-icon.svg b/ui/src/assets/fileType/xlsx-icon.svg new file mode 100644 index 00000000000..22cb869537f --- /dev/null +++ b/ui/src/assets/fileType/xlsx-icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/fileType/zip-icon.svg b/ui/src/assets/fileType/zip-icon.svg new file mode 100644 index 00000000000..ad5d625fb63 --- /dev/null +++ b/ui/src/assets/fileType/zip-icon.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/src/assets/icon_and.svg b/ui/src/assets/icon_and.svg new file mode 100644 index 00000000000..9c4842bff13 --- /dev/null +++ b/ui/src/assets/icon_and.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/src/assets/icon_assigner.svg b/ui/src/assets/icon_assigner.svg new file mode 100644 index 00000000000..269f075c381 --- /dev/null +++ b/ui/src/assets/icon_assigner.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/icon_condition.svg b/ui/src/assets/icon_condition.svg new file mode 100644 index 00000000000..2bc80a2125e --- /dev/null +++ b/ui/src/assets/icon_condition.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/src/assets/icon_docs.svg b/ui/src/assets/icon_docs.svg new file mode 100644 index 00000000000..9bec0be4a8f --- /dev/null +++ b/ui/src/assets/icon_docs.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui/src/assets/icon_file-audio.svg b/ui/src/assets/icon_file-audio.svg new file mode 100644 index 00000000000..13f1f72160a --- /dev/null +++ b/ui/src/assets/icon_file-audio.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/icon_file-doc.svg b/ui/src/assets/icon_file-doc.svg new file mode 100644 index 00000000000..86ac2d3f174 --- /dev/null +++ b/ui/src/assets/icon_file-doc.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/icon_file-folder_colorful.svg b/ui/src/assets/icon_file-folder_colorful.svg new file mode 100644 index 00000000000..7aa4703d7ed --- /dev/null +++ b/ui/src/assets/icon_file-folder_colorful.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui/src/assets/icon_file-image.svg b/ui/src/assets/icon_file-image.svg new file mode 100644 index 00000000000..4511c36f5d8 --- /dev/null +++ b/ui/src/assets/icon_file-image.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/src/assets/icon_form.svg b/ui/src/assets/icon_form.svg new file mode 100644 index 00000000000..22a10210da3 --- /dev/null +++ b/ui/src/assets/icon_form.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/ui/src/assets/icon_function_outlined.svg b/ui/src/assets/icon_function_outlined.svg new file mode 100644 index 00000000000..dbdef4c24ce --- /dev/null +++ b/ui/src/assets/icon_function_outlined.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/ui/src/assets/icon_globe_color.svg b/ui/src/assets/icon_globe_color.svg new file mode 100644 index 00000000000..7ede591d590 --- /dev/null +++ b/ui/src/assets/icon_globe_color.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/assets/icon_hi.svg b/ui/src/assets/icon_hi.svg new file mode 100644 index 00000000000..84bb36ac2b2 --- /dev/null +++ b/ui/src/assets/icon_hi.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/icon_image.svg b/ui/src/assets/icon_image.svg new file mode 100644 index 00000000000..f6ee5f5519a --- /dev/null +++ b/ui/src/assets/icon_image.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui/src/assets/icon_mcp.svg b/ui/src/assets/icon_mcp.svg new file mode 100644 index 00000000000..1544370a29e --- /dev/null +++ b/ui/src/assets/icon_mcp.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui/src/assets/icon_or.svg b/ui/src/assets/icon_or.svg new file mode 100644 index 00000000000..d38f014720d --- /dev/null +++ b/ui/src/assets/icon_or.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/src/assets/icon_qr_outlined.svg b/ui/src/assets/icon_qr_outlined.svg new file mode 100644 index 00000000000..1d3cf43437d --- /dev/null +++ b/ui/src/assets/icon_qr_outlined.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/src/assets/icon_reply.svg b/ui/src/assets/icon_reply.svg new file mode 100644 index 00000000000..430fc7fc1f1 --- /dev/null +++ b/ui/src/assets/icon_reply.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/src/assets/icon_reranker.svg b/ui/src/assets/icon_reranker.svg new file mode 100644 index 00000000000..e56112278dd --- /dev/null +++ b/ui/src/assets/icon_reranker.svg @@ -0,0 +1,21 @@ + + + + + + + + + \ No newline at end of file diff --git a/ui/src/assets/icon_setting.svg b/ui/src/assets/icon_setting.svg new file mode 100644 index 00000000000..afa97360f1e --- /dev/null +++ b/ui/src/assets/icon_setting.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/src/assets/icon_speech_to_text.svg b/ui/src/assets/icon_speech_to_text.svg new file mode 100644 index 00000000000..a81d0aac2a4 --- /dev/null +++ b/ui/src/assets/icon_speech_to_text.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/ui/src/assets/icon_start.svg b/ui/src/assets/icon_start.svg new file mode 100644 index 00000000000..0b8d730643d --- /dev/null +++ b/ui/src/assets/icon_start.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui/src/assets/icon_text-image.svg b/ui/src/assets/icon_text-image.svg new file mode 100644 index 00000000000..e59aa52c419 --- /dev/null +++ b/ui/src/assets/icon_text-image.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/icon_text_to_speech.svg b/ui/src/assets/icon_text_to_speech.svg new file mode 100644 index 00000000000..6c0235744c9 --- /dev/null +++ b/ui/src/assets/icon_text_to_speech.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/ui/src/assets/load_error.png b/ui/src/assets/load_error.png new file mode 100644 index 00000000000..695abfd885e Binary files /dev/null and b/ui/src/assets/load_error.png differ diff --git a/ui/src/assets/logo.png b/ui/src/assets/logo.png deleted file mode 100644 index 7d9781edbb6..00000000000 Binary files a/ui/src/assets/logo.png and /dev/null differ diff --git a/ui/src/assets/logo/MaxKB-logo-currentColor.svg b/ui/src/assets/logo/MaxKB-logo-currentColor.svg new file mode 100644 index 00000000000..94281645f97 --- /dev/null +++ b/ui/src/assets/logo/MaxKB-logo-currentColor.svg @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/ui/src/assets/logo/MaxKB-logo.svg b/ui/src/assets/logo/MaxKB-logo.svg new file mode 100644 index 00000000000..beb86aa5197 --- /dev/null +++ b/ui/src/assets/logo/MaxKB-logo.svg @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ui/src/assets/logo/logo-currentColor.svg b/ui/src/assets/logo/logo-currentColor.svg new file mode 100644 index 00000000000..5f50e4cf31f --- /dev/null +++ b/ui/src/assets/logo/logo-currentColor.svg @@ -0,0 +1 @@ +MaxKB \ No newline at end of file diff --git a/ui/src/assets/logo/logo.svg b/ui/src/assets/logo/logo.svg new file mode 100644 index 00000000000..2e601bb46b2 --- /dev/null +++ b/ui/src/assets/logo/logo.svg @@ -0,0 +1 @@ +MaxKB \ No newline at end of file diff --git a/ui/src/assets/logo_dingtalk.svg b/ui/src/assets/logo_dingtalk.svg new file mode 100644 index 00000000000..64d957d4f5a --- /dev/null +++ b/ui/src/assets/logo_dingtalk.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/src/assets/logo_lark.svg b/ui/src/assets/logo_lark.svg new file mode 100644 index 00000000000..938c5055db6 --- /dev/null +++ b/ui/src/assets/logo_lark.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/ui/src/assets/logo_slack.svg b/ui/src/assets/logo_slack.svg new file mode 100644 index 00000000000..972c87fcef0 --- /dev/null +++ b/ui/src/assets/logo_slack.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/assets/logo_wechat-work.svg b/ui/src/assets/logo_wechat-work.svg new file mode 100644 index 00000000000..ea860124957 --- /dev/null +++ b/ui/src/assets/logo_wechat-work.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/ui/src/assets/logo_wechat.svg b/ui/src/assets/logo_wechat.svg new file mode 100644 index 00000000000..6c0e78de852 --- /dev/null +++ b/ui/src/assets/logo_wechat.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/src/assets/sort.svg b/ui/src/assets/sort.svg new file mode 100644 index 00000000000..e24e0450aec --- /dev/null +++ b/ui/src/assets/sort.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/assets/login.jpg b/ui/src/assets/theme/default.jpg similarity index 100% rename from ui/src/assets/login.jpg rename to ui/src/assets/theme/default.jpg diff --git a/ui/src/assets/theme/green.jpg b/ui/src/assets/theme/green.jpg new file mode 100644 index 00000000000..937e8a03c8f Binary files /dev/null and b/ui/src/assets/theme/green.jpg differ diff --git a/ui/src/assets/theme/orange.jpg b/ui/src/assets/theme/orange.jpg new file mode 100644 index 00000000000..64b4c6abeb8 Binary files /dev/null and b/ui/src/assets/theme/orange.jpg differ diff --git a/ui/src/assets/theme/purple.jpg b/ui/src/assets/theme/purple.jpg new file mode 100644 index 00000000000..843a425855f Binary files /dev/null and b/ui/src/assets/theme/purple.jpg differ diff --git a/ui/src/assets/theme/red.jpg b/ui/src/assets/theme/red.jpg new file mode 100644 index 00000000000..cabf84f6fd3 Binary files /dev/null and b/ui/src/assets/theme/red.jpg differ diff --git a/ui/src/assets/tipIMG.jpg b/ui/src/assets/tipIMG.jpg new file mode 100644 index 00000000000..9f6955db530 Binary files /dev/null and b/ui/src/assets/tipIMG.jpg differ diff --git a/ui/src/assets/window3.png b/ui/src/assets/window3.png new file mode 100644 index 00000000000..4c2a3a563f2 Binary files /dev/null and b/ui/src/assets/window3.png differ diff --git a/ui/src/components/ai-chat/ExecutionDetailDialog.vue b/ui/src/components/ai-chat/ExecutionDetailDialog.vue new file mode 100644 index 00000000000..98ec1e6fa3e --- /dev/null +++ b/ui/src/components/ai-chat/ExecutionDetailDialog.vue @@ -0,0 +1,760 @@ + + + diff --git a/ui/src/components/ai-chat/KnowledgeSource.vue b/ui/src/components/ai-chat/KnowledgeSource.vue new file mode 100644 index 00000000000..729a6317b62 --- /dev/null +++ b/ui/src/components/ai-chat/KnowledgeSource.vue @@ -0,0 +1,122 @@ + + + diff --git a/ui/src/components/ai-chat/LogOperationButton.vue b/ui/src/components/ai-chat/LogOperationButton.vue deleted file mode 100644 index 59cc486345f..00000000000 --- a/ui/src/components/ai-chat/LogOperationButton.vue +++ /dev/null @@ -1,87 +0,0 @@ - - - diff --git a/ui/src/components/ai-chat/OperationButton.vue b/ui/src/components/ai-chat/OperationButton.vue deleted file mode 100644 index 35b289735fb..00000000000 --- a/ui/src/components/ai-chat/OperationButton.vue +++ /dev/null @@ -1,96 +0,0 @@ - - - diff --git a/ui/src/components/ai-chat/ParagraphSourceDialog.vue b/ui/src/components/ai-chat/ParagraphSourceDialog.vue index 2c8749dc3a7..32195a5c4b0 100644 --- a/ui/src/components/ai-chat/ParagraphSourceDialog.vue +++ b/ui/src/components/ai-chat/ParagraphSourceDialog.vue @@ -1,61 +1,31 @@ + diff --git a/ui/src/components/dynamics-form/DemoConstructor.vue b/ui/src/components/dynamics-form/DemoConstructor.vue new file mode 100644 index 00000000000..b8e0b60824d --- /dev/null +++ b/ui/src/components/dynamics-form/DemoConstructor.vue @@ -0,0 +1,55 @@ + + + diff --git a/ui/src/components/dynamics-form/FormItem.vue b/ui/src/components/dynamics-form/FormItem.vue index 89a64d9b447..83297ccffb8 100644 --- a/ui/src/components/dynamics-form/FormItem.vue +++ b/ui/src/components/dynamics-form/FormItem.vue @@ -2,13 +2,18 @@ (false) +const isString = (value: any) => { + return typeof value === 'string' +} const itemValue = computed({ get: () => { return props.modelValue @@ -72,7 +81,13 @@ const itemValue = computed({ } }) const componentFormRef = ref() - +const label_attrs = computed(() => { + return props.formfield.label && + typeof props.formfield.label !== 'string' && + props.formfield.label.attrs + ? props.formfield.label.attrs + : {} +}) const props_info = computed(() => { return props.formfield.props_info ? props.formfield.props_info : {} }) @@ -87,18 +102,34 @@ const formItemStyle = computed(() => { * 表单错误Msg */ const errMsg = computed(() => { - return props_info.value.err_msg ? props_info.value.err_msg : props.formfield.label + '不能为空' + return props_info.value.err_msg + ? props_info.value.err_msg + : isString(props.formfield.label) + ? props.formfield.label + ' ' + t('dynamicsForm.tip.requiredMessage') + : props.formfield.label.label + ' ' + t('dynamicsForm.tip.requiredMessage') }) +/** + * 反序列化 + * @param rule + */ +const to_rule = (rule: any) => { + if (rule.validator) { + let validator = (rule: any, value: string, callback: any) => {} + eval(rule.validator) + return { ...rule, validator } + } + return rule +} /** * 校验 */ const rules = computed(() => { return props_info.value.rules - ? props_info.value.rules + ? props_info.value.rules.map(to_rule) : { message: errMsg.value, - trigger: 'blur', + trigger: props.formfield.input_type === 'Slider' ? 'blur' : ['blur', 'change'], required: props.formfield.required === false ? false : true } }) diff --git a/ui/src/components/dynamics-form/constructor/data.ts b/ui/src/components/dynamics-form/constructor/data.ts new file mode 100644 index 00000000000..98ee4b0f6e9 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/data.ts @@ -0,0 +1,44 @@ +import { t } from '@/locales' +const input_type_list = [ + { + label: t('dynamicsForm.input_type_list.TextInput'), + value: 'TextInput' + }, + { + label: t('dynamicsForm.input_type_list.PasswordInput'), + value: 'PasswordInput' + }, + { + label: t('dynamicsForm.input_type_list.Slider'), + value: 'Slider' + }, + { + label: t('dynamicsForm.input_type_list.SwitchInput'), + value: 'SwitchInput' + }, + { + label: t('dynamicsForm.input_type_list.SingleSelect'), + value: 'SingleSelect' + }, + { + label: t('dynamicsForm.input_type_list.MultiSelect'), + value: 'MultiSelect' + }, + { + label: t('dynamicsForm.input_type_list.DatePicker'), + value: 'DatePicker' + }, + { + label: t('dynamicsForm.input_type_list.JsonInput'), + value: 'JsonInput' + }, + { + label: t('dynamicsForm.input_type_list.RadioCard'), + value: 'RadioCard' + }, + { + label: t('dynamicsForm.input_type_list.RadioRow'), + value: 'RadioRow' + } +] +export { input_type_list } diff --git a/ui/src/components/dynamics-form/constructor/index.vue b/ui/src/components/dynamics-form/constructor/index.vue new file mode 100644 index 00000000000..09127ee6d4c --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/index.vue @@ -0,0 +1,144 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/DatePickerConstructor.vue b/ui/src/components/dynamics-form/constructor/items/DatePickerConstructor.vue new file mode 100644 index 00000000000..366885fce54 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/DatePickerConstructor.vue @@ -0,0 +1,138 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/JsonInputConstructor.vue b/ui/src/components/dynamics-form/constructor/items/JsonInputConstructor.vue new file mode 100644 index 00000000000..ba4ea896345 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/JsonInputConstructor.vue @@ -0,0 +1,84 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/MultiSelectConstructor.vue b/ui/src/components/dynamics-form/constructor/items/MultiSelectConstructor.vue new file mode 100644 index 00000000000..8dc7a186de6 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/MultiSelectConstructor.vue @@ -0,0 +1,156 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/PasswordInputConstructor.vue b/ui/src/components/dynamics-form/constructor/items/PasswordInputConstructor.vue new file mode 100644 index 00000000000..fd533607c63 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/PasswordInputConstructor.vue @@ -0,0 +1,194 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/RadioCardConstructor.vue b/ui/src/components/dynamics-form/constructor/items/RadioCardConstructor.vue new file mode 100644 index 00000000000..51ca7ca29b3 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/RadioCardConstructor.vue @@ -0,0 +1,149 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/RadioRowConstructor.vue b/ui/src/components/dynamics-form/constructor/items/RadioRowConstructor.vue new file mode 100644 index 00000000000..0b91d6acd49 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/RadioRowConstructor.vue @@ -0,0 +1,151 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/SingleSelectConstructor.vue b/ui/src/components/dynamics-form/constructor/items/SingleSelectConstructor.vue new file mode 100644 index 00000000000..eb671ee84fc --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/SingleSelectConstructor.vue @@ -0,0 +1,150 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/SliderConstructor.vue b/ui/src/components/dynamics-form/constructor/items/SliderConstructor.vue new file mode 100644 index 00000000000..942e11445a5 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/SliderConstructor.vue @@ -0,0 +1,162 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/SwitchInputConstructor.vue b/ui/src/components/dynamics-form/constructor/items/SwitchInputConstructor.vue new file mode 100644 index 00000000000..96cdea6abca --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/SwitchInputConstructor.vue @@ -0,0 +1,48 @@ + + + diff --git a/ui/src/components/dynamics-form/constructor/items/TextInputConstructor.vue b/ui/src/components/dynamics-form/constructor/items/TextInputConstructor.vue new file mode 100644 index 00000000000..c557a9e0701 --- /dev/null +++ b/ui/src/components/dynamics-form/constructor/items/TextInputConstructor.vue @@ -0,0 +1,183 @@ + + + diff --git a/ui/src/components/dynamics-form/index.ts b/ui/src/components/dynamics-form/index.ts index ba8cbfb092c..f4d69a0c130 100644 --- a/ui/src/components/dynamics-form/index.ts +++ b/ui/src/components/dynamics-form/index.ts @@ -16,7 +16,9 @@ const install = (app: App) => { const commentName: string = key .substring(key.lastIndexOf('/') + 1, key.length) .replace('.vue', '') - app.component(commentName, components[key].default) + if (key !== '/src/components/dynamics-form/constructor/index.vue') { + app.component(commentName, components[key].default) + } }) app.component('DynamicsForm', DynamicsForm) } diff --git a/ui/src/components/dynamics-form/index.vue b/ui/src/components/dynamics-form/index.vue index d9d9b17742d..061f8a8c52d 100644 --- a/ui/src/components/dynamics-form/index.vue +++ b/ui/src/components/dynamics-form/index.vue @@ -33,7 +33,7 @@ import type { Dict } from '@/api/type/common' import FormItem from '@/components/dynamics-form/FormItem.vue' import type { FormField } from '@/components/dynamics-form/type' -import { ref, onMounted, watch, type Ref } from 'vue' +import { ref, onBeforeMount, watch, type Ref } from 'vue' import type { FormInstance } from 'element-plus' import triggerApi from '@/api/provider' import type Result from '@/request/Result' @@ -146,14 +146,17 @@ const initDefaultData = (formField: FormField) => { formField.default_value && (formValue.value[formField.field] === undefined || formValue.value[formField.field] === null || - !formValue.value[formField.field]) + !formValue.value[formField.field]) && + formValue.value[formField.field] != false ) { - formValue.value[formField.field] = formField.default_value + if (formField.show_default_value === true) { + formValue.value[formField.field] = formField.default_value + } } } -onMounted(() => { - render(props.render_data, {}) +onBeforeMount(() => { + render(props.render_data, props.modelValue) }) const render = ( @@ -171,8 +174,37 @@ const render = ( formFieldList.value = ok.data }) } - if (data) { - formValue.value = data + const form_data = data ? data : {} + if (form_data) { + const value = formFieldList.value + .map((item) => { + if (form_data[item.field] !== undefined) { + if (item.value_field && item.option_list && item.option_list.length > 0) { + const value_field = item.value_field + const find = item.option_list?.find((i) => { + if (typeof form_data[item.field] === 'string') { + return i[value_field] === form_data[item.field] + } else { + return form_data[item.field].indexOf([value_field]) === -1 + } + }) + if (find) { + return { [item.field]: form_data[item.field] } + } + if (item.show_default_value === true || item.show_default_value === undefined) { + return { [item.field]: item.default_value } + } + } else { + return { [item.field]: form_data[item.field] } + } + } + if (item.show_default_value === true || item.show_default_value === undefined) { + return { [item.field]: item.default_value } + } + return {} + }) + .reduce((x, y) => ({ ...x, ...y }), {}) + formValue.value = _.cloneDeep(value) } } /** @@ -193,4 +225,4 @@ defineExpose({ ruleFormRef }) - + diff --git a/ui/src/components/dynamics-form/items/DatePicker.vue b/ui/src/components/dynamics-form/items/DatePicker.vue new file mode 100644 index 00000000000..291978c9c52 --- /dev/null +++ b/ui/src/components/dynamics-form/items/DatePicker.vue @@ -0,0 +1,5 @@ + + + diff --git a/ui/src/components/dynamics-form/items/JsonInput.vue b/ui/src/components/dynamics-form/items/JsonInput.vue new file mode 100644 index 00000000000..19b68a901d3 --- /dev/null +++ b/ui/src/components/dynamics-form/items/JsonInput.vue @@ -0,0 +1,143 @@ + + + diff --git a/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue b/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue index 8fca1d45d0f..eae016df4d1 100644 --- a/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue +++ b/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue @@ -8,12 +8,13 @@ require-asterisk-position="right" ref="ceFormRef" v-model="_data[index]" + :model="_data[index]" :other-params="other" :render_data="render_data()" v-bind="attr" :parent_field="formField.field + '.' + index" > - + @@ -21,7 +22,7 @@
- + {{ add_msg }}
@@ -128,14 +129,12 @@ defineExpose({ cursor: pointer; min-height: var(--card-min-height); border: 1px dashed var(--el-color-primary); - background: #eff0f1; + background: var(--el-disabled-bg-color);; padding-bottom: 20px; .add-icon { font-size: 14px; - border-radius: 4px; border: 1px solid var(--app-border-color-dark); - background: var(--app-layout-bg-color); margin-right: 12px; } &:hover { diff --git a/ui/src/components/dynamics-form/items/complex/TabCard.vue b/ui/src/components/dynamics-form/items/complex/TabCard.vue index c27ac7185a4..ff5ba793afb 100644 --- a/ui/src/components/dynamics-form/items/complex/TabCard.vue +++ b/ui/src/components/dynamics-form/items/complex/TabCard.vue @@ -16,6 +16,7 @@ require-asterisk-position="right" ref="ceFormRef" v-model="_data[index]" + :model="_data[index]" :other-params="other" :render_data="render_data()" v-bind="attr" diff --git a/ui/src/components/dynamics-form/items/label/TooltipLabel.vue b/ui/src/components/dynamics-form/items/label/TooltipLabel.vue new file mode 100644 index 00000000000..035b6bd6f3a --- /dev/null +++ b/ui/src/components/dynamics-form/items/label/TooltipLabel.vue @@ -0,0 +1,20 @@ + + + diff --git a/ui/src/components/dynamics-form/items/radio/RadioCard.vue b/ui/src/components/dynamics-form/items/radio/RadioCard.vue index 492cff40851..5aabdd9f456 100644 --- a/ui/src/components/dynamics-form/items/radio/RadioCard.vue +++ b/ui/src/components/dynamics-form/items/radio/RadioCard.vue @@ -1,19 +1,33 @@ diff --git a/ui/src/components/dynamics-form/items/radio/RadioRow.vue b/ui/src/components/dynamics-form/items/radio/RadioRow.vue new file mode 100644 index 00000000000..007fab18c37 --- /dev/null +++ b/ui/src/components/dynamics-form/items/radio/RadioRow.vue @@ -0,0 +1,92 @@ + + + diff --git a/ui/src/components/dynamics-form/items/select/MultiSelect.vue b/ui/src/components/dynamics-form/items/select/MultiSelect.vue index f78f898c52e..f397b37cbfd 100644 --- a/ui/src/components/dynamics-form/items/select/MultiSelect.vue +++ b/ui/src/components/dynamics-form/items/select/MultiSelect.vue @@ -2,7 +2,6 @@ - + @@ -60,4 +68,10 @@ const label = (option: any) => { return option[textField.value] } - + diff --git a/ui/src/components/dynamics-form/items/slider/Slider.vue b/ui/src/components/dynamics-form/items/slider/Slider.vue new file mode 100644 index 00000000000..3892f156386 --- /dev/null +++ b/ui/src/components/dynamics-form/items/slider/Slider.vue @@ -0,0 +1,11 @@ + + + diff --git a/ui/src/components/dynamics-form/items/switch/SwitchInput.vue b/ui/src/components/dynamics-form/items/switch/SwitchInput.vue new file mode 100644 index 00000000000..c787945f35a --- /dev/null +++ b/ui/src/components/dynamics-form/items/switch/SwitchInput.vue @@ -0,0 +1,7 @@ + + + + \ No newline at end of file diff --git a/ui/src/components/dynamics-form/items/table/ProgressTableItem.vue b/ui/src/components/dynamics-form/items/table/ProgressTableItem.vue index edccac235b1..baf9e3e142e 100644 --- a/ui/src/components/dynamics-form/items/table/ProgressTableItem.vue +++ b/ui/src/components/dynamics-form/items/table/ProgressTableItem.vue @@ -5,10 +5,10 @@
- {{ item.title }} + + {{ item.title }} + +
diff --git a/ui/src/components/dynamics-form/items/table/TableCheckbox.vue b/ui/src/components/dynamics-form/items/table/TableCheckbox.vue index 12db31a4900..8285689f149 100644 --- a/ui/src/components/dynamics-form/items/table/TableCheckbox.vue +++ b/ui/src/components/dynamics-form/items/table/TableCheckbox.vue @@ -6,7 +6,7 @@ }>() const rowTemp = ref() -const evalF = (text: string, row: any) => { +const evalF: (text: string, row: any) => string = (text: string, row: any) => { rowTemp.value = row return eval(text) } @@ -167,8 +167,8 @@ watch( const activeText = computed(() => { if (props.modelValue) { - const rows = option_list.value.filter( - (f: any) => props.modelValue?.includes(f[valueField.value]) + const rows = option_list.value.filter((f: any) => + props.modelValue?.includes(f[valueField.value]) ) if (rows) { if (rows.length > 3) { diff --git a/ui/src/components/dynamics-form/items/table/TableRadio.vue b/ui/src/components/dynamics-form/items/table/TableRadio.vue index 61c0078a76f..1bc055b3b0e 100644 --- a/ui/src/components/dynamics-form/items/table/TableRadio.vue +++ b/ui/src/components/dynamics-form/items/table/TableRadio.vue @@ -6,7 +6,7 @@ + +
+ +
+
+ +
+
+

{{ $t('views.document.generateQuestion.tip1', { data: '{data}' }) }}

+

+ {{ $t('views.document.generateQuestion.tip2')+ '' + + $t('views.document.generateQuestion.tip3') }} +

+

{{ $t('views.document.generateQuestion.tip4') }}

+
+
+ + + + + + + + + {{ + $t('components.selectParagraph.error') + }} + {{ $t('components.selectParagraph.all') }} + + +
+
+ +
+ + + diff --git a/ui/src/components/icons/index.ts b/ui/src/components/icons/index.ts index 88dd8fd3cdb..81d471b2a5f 100644 --- a/ui/src/components/icons/index.ts +++ b/ui/src/components/icons/index.ts @@ -378,13 +378,13 @@ export const iconMap: any = { 'svg', { style: { height: '100%', width: '100%' }, - viewBox: '0 0 16 16', + viewBox: '0 0 1024 1024', version: '1.1', xmlns: 'http://www.w3.org/2000/svg' }, [ h('path', { - d: 'M3.33333 5.3335V13.3335H10V5.3335H3.33333ZM11.3333 4.66683V14.0742C11.3333 14.4015 11.0548 14.6668 10.7111 14.6668H2.62222C2.27858 14.6668 2 14.4015 2 14.0742V4.59276C2 4.26548 2.27858 4.00016 2.62222 4.00016H10.6667C11.0349 4.00016 11.3333 4.29864 11.3333 4.66683ZM13.8047 1.52876C13.9254 1.6494 14 1.81607 14 2.00016V10.3335C14 10.5176 13.8508 10.6668 13.6667 10.6668H13C12.8159 10.6668 12.6667 10.5176 12.6667 10.3335V2.66683H6.33333C6.14924 2.66683 6 2.51759 6 2.3335V1.66683C6 1.48273 6.14924 1.3335 6.33333 1.3335H13.3333C13.5174 1.3335 13.6841 1.40812 13.8047 1.52876Z', + d: 'M213.333333 341.333333v512h426.666667V341.333333H213.333333z m512-42.666666v602.069333c0 20.949333-17.834667 37.930667-39.808 37.930667H167.808C145.834667 938.666667 128 921.685333 128 900.736V293.973333C128 272.981333 145.834667 256 167.808 256H682.666667a42.666667 42.666667 0 0 1 42.666666 42.666667z m158.165334-200.832A42.538667 42.538667 0 0 1 896 128v533.333333a21.333333 21.333333 0 0 1-21.333333 21.333334h-42.666667a21.333333 21.333333 0 0 1-21.333333-21.333334V170.666667H405.333333a21.333333 21.333333 0 0 1-21.333333-21.333334v-42.666666a21.333333 21.333333 0 0 1 21.333333-21.333334H853.333333c11.776 0 22.442667 4.778667 30.165334 12.501334z', fill: 'currentColor' }) ] @@ -533,24 +533,39 @@ export const iconMap: any = { 'svg', { style: { height: '100%', width: '100%' }, - viewBox: '0 0 16 16', + viewBox: '0 0 1024 1024', version: '1.1', xmlns: 'http://www.w3.org/2000/svg' }, [ h('path', { - d: 'M7.99984 3.66667C8.46007 3.66667 8.83317 4.03977 8.83317 4.5C8.83317 4.96023 8.46007 5.33333 7.99984 5.33333C7.5396 5.33333 7.1665 4.96023 7.1665 4.5C7.1665 4.03977 7.5396 3.66667 7.99984 3.66667Z', + d: 'M512 234.666667A53.333333 53.333333 0 1 1 512 341.333333a53.333333 53.333333 0 0 1 0-106.666666zM522.666667 384h-64a21.333333 21.333333 0 0 0-21.333334 21.333333v42.666667a21.333333 21.333333 0 0 0 21.333334 21.333333h21.333333v213.333334H426.666667a21.333333 21.333333 0 0 0-21.333334 21.333333v42.666667a21.333333 21.333333 0 0 0 21.333334 21.333333h192a21.333333 21.333333 0 0 0 21.333333-21.333333v-42.666667a21.333333 21.333333 0 0 0-21.333333-21.333333h-53.333334v-256a42.666667 42.666667 0 0 0-42.666666-42.666667z', fill: 'currentColor' }), h('path', { - d: 'M8.1665 6H7.33317C7.05703 6 6.83317 6.22386 6.83317 6.5V6.83333C6.83317 7.10948 7.05703 7.33333 7.33317 7.33333H7.49984V10.6667H6.83317C6.55703 10.6667 6.33317 10.8905 6.33317 11.1667V11.5C6.33317 11.7761 6.55703 12 6.83317 12H9.49984C9.77598 12 9.99984 11.7761 9.99984 11.5V11.1667C9.99984 10.8905 9.77598 10.6667 9.49984 10.6667H8.83317V6.66667C8.83317 6.29848 8.53469 6 8.1665 6Z', + d: 'M512 981.333333C252.8 981.333333 42.666667 771.2 42.666667 512S252.8 42.666667 512 42.666667s469.333333 210.133333 469.333333 469.333333-210.133333 469.333333-469.333333 469.333333z m0-85.333333a384 384 0 1 0 0-768 384 384 0 0 0 0 768z', fill: 'currentColor' - }), + }) + ] + ) + ]) + } + }, + 'app-warning-colorful': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ h('path', { - d: 'M7.99984 15.3332C3.94984 15.3332 0.666504 12.0498 0.666504 7.99984C0.666504 3.94984 3.94984 0.666504 7.99984 0.666504C12.0498 0.666504 15.3332 3.94984 15.3332 7.99984C15.3332 12.0498 12.0498 15.3332 7.99984 15.3332ZM7.99984 13.9998C11.3135 13.9998 13.9998 11.3135 13.9998 7.99984C13.9998 4.68617 11.3135 1.99984 7.99984 1.99984C4.68617 1.99984 1.99984 4.68617 1.99984 7.99984C1.99984 11.3135 4.68617 13.9998 7.99984 13.9998Z', - fill: 'currentColor', - fillRule: 'evenodd', - clipRule: 'evenodd' + d: 'M42.666667 512c0 259.2 210.133333 469.333333 469.333333 469.333333s469.333333-210.133333 469.333333-469.333333S771.2 42.666667 512 42.666667 42.666667 252.8 42.666667 512z m469.333333-277.333333A53.333333 53.333333 0 1 1 512 341.333333a53.333333 53.333333 0 0 1 0-106.666666zM458.666667 384h64a42.666667 42.666667 0 0 1 42.666666 42.666667v256h53.333334a21.333333 21.333333 0 0 1 21.333333 21.333333v42.666667a21.333333 21.333333 0 0 1-21.333333 21.333333H426.666667a21.333333 21.333333 0 0 1-21.333334-21.333333v-42.666667a21.333333 21.333333 0 0 1 21.333334-21.333333h53.333333v-213.333334h-21.333333a21.333333 21.333333 0 0 1-21.333334-21.333333v-42.666667a21.333333 21.333333 0 0 1 21.333334-21.333333z', + fill: '#3370FF' }) ] ) @@ -591,7 +606,11 @@ export const iconMap: any = { }, [ h('path', { - d: 'M401.066667 378.88H175.786667c-8.533333 0-13.653333 6.826667-13.653334 13.653333 0 8.533333 6.826667 13.653333 13.653334 13.653334h225.28c8.533333 0 13.653333-6.826667 13.653333-13.653334s-5.12-13.653333-13.653333-13.653333z m0 110.933333H175.786667c-8.533333 0-13.653333 6.826667-13.653334 13.653334 0 8.533333 6.826667 13.653333 13.653334 13.653333h225.28c8.533333 0 13.653333-6.826667 13.653333-13.653333s-5.12-13.653333-13.653333-13.653334z m0 109.226667H175.786667c-8.533333 0-13.653333 6.826667-13.653334 13.653333 0 8.533333 6.826667 13.653333 13.653334 13.653334h225.28c8.533333 0 13.653333-6.826667 13.653333-13.653334s-5.12-13.653333-13.653333-13.653333zM612.693333 392.533333c0 8.533333 6.826667 13.653333 13.653334 13.653334h225.28c8.533333 0 13.653333-6.826667 13.653333-13.653334 0-8.533333-6.826667-13.653333-13.653333-13.653333H626.346667c-6.826667 0-13.653333 6.826667-13.653334 13.653333z m237.226667 97.28H626.346667c-8.533333 0-13.653333 6.826667-13.653334 13.653334 0 8.533333 6.826667 13.653333 13.653334 13.653333h225.28c8.533333 0 13.653333-6.826667 13.653333-13.653333 0-8.533333-6.826667-13.653333-15.36-13.653334z m0 109.226667H626.346667c-8.533333 0-13.653333 6.826667-13.653334 13.653333 0 8.533333 6.826667 13.653333 13.653334 13.653334h225.28c8.533333 0 13.653333-6.826667 13.653333-13.653334s-6.826667-13.653333-15.36-13.653333zM737.28 153.6c-81.92 0-174.08 13.653333-223.573333 46.08-47.786667-32.426667-139.946667-46.08-221.866667-46.08C172.373333 153.6 34.133333 180.906667 34.133333 252.586667v581.973333c0 10.24 5.12 18.773333 11.946667 25.6 8.533333 6.826667 17.066667 10.24 27.306667 8.533333 66.56-11.946667 139.946667-17.066667 216.746666-17.066666s151.893333 6.826667 216.746667 17.066666h11.946667c66.56-11.946667 139.946667-17.066667 216.746666-17.066666s151.893333 6.826667 216.746667 17.066666h6.826667c8.533333 0 15.36-3.413333 20.48-8.533333 8.533333-6.826667 11.946667-15.36 11.946666-25.6V252.586667c1.706667-71.68-136.533333-98.986667-254.293333-98.986667zM102.4 797.013333V256c13.653333-13.653333 80.213333-35.84 189.44-35.84 109.226667 0 175.786667 22.186667 189.44 35.84v539.306667c-59.733333-8.533333-124.586667-13.653333-189.44-13.653334-66.56 1.706667-131.413333 6.826667-189.44 15.36z m824.32 0c-117.76-15.36-261.12-15.36-380.586667 0V256c13.653333-13.653333 80.213333-35.84 189.44-35.84 109.226667 0 175.786667 22.186667 189.44 35.84l1.706667 541.013333z', + d: 'M768 128H256a85.333333 85.333333 0 0 0-85.333333 85.333333v426.666667h512V64h85.333333v640a21.333333 21.333333 0 0 1-21.333333 21.333333H256a85.333333 85.333333 0 0 0-0.128 170.666667H832a21.333333 21.333333 0 0 0 21.333333-21.333333V341.333333h85.333334v597.333334a42.666667 42.666667 0 0 1-42.666667 42.666666H256c-94.293333 0-170.666667-76.16-170.666667-170.410666V213.248C85.333333 119.04 161.706667 42.666667 256 42.666667h469.333333a42.666667 42.666667 0 0 1 42.666667 42.666666v42.666667z', + fill: 'currentColor' + }), + h('path', { + d: 'M277.333333 768a21.333333 21.333333 0 0 0-21.333333 21.333333v42.666667a21.333333 21.333333 0 0 0 21.333333 21.333333h469.333334a21.333333 21.333333 0 0 0 21.333333-21.333333v-42.666667a21.333333 21.333333 0 0 0-21.333333-21.333333h-469.333334z', fill: 'currentColor' }) ] @@ -633,7 +652,28 @@ export const iconMap: any = { }, [ h('path', { - d: 'M615.312 720H832V192H192v528h216.688a64 64 0 0 1 49.168 23.04l41.856 50.208a16 16 0 0 0 24.576 0l41.856-50.224A64 64 0 0 1 615.312 720z m19.088 64l-88.544 88.128a48 48 0 0 1-67.712 0L389.6 784H192a64 64 0 0 1-64-64V192a64 64 0 0 1 64-64h640a64 64 0 0 1 64 64v528a64 64 0 0 1-64 64H634.4z m-124.32-528c34.384 0 62.048 9.392 82.96 28.192 21.28 18.432 31.92 43.792 31.92 76.064 0 25.888-6.736 47.52-20.208 64.896-4.608 5.68-19.52 19.504-44.688 41.488-9.92 8.16-17.376 17.376-22.336 27.664-5.68 10.288-8.512 21.984-8.512 35.104v9.04h-56.912v-9.04c0-20.576 3.536-37.408 10.64-50.528 6.72-14.192 26.768-36.176 60.096-65.968l9.04-10.096c9.936-12.768 14.896-25.712 14.896-38.832 0-18.08-5.136-32.64-15.424-43.616-10.64-10.64-25.712-15.968-45.216-15.968-24.112 0-41.488 7.632-52.128 22.88-9.568 12.768-14.352 30.848-14.352 54.24H384c0-38.64 11.168-69.136 33.504-91.472C439.856 267.344 470.72 256 510.08 256z m-9.6 311.168c11.712 0 21.12 3.552 28.208 10.64 7.44 6.736 11.168 15.792 11.168 27.136 0 10.64-3.904 19.84-11.712 27.648a39.76 39.76 0 0 1-27.648 10.64c-10.992 0-20.224-3.712-27.68-11.168a36.928 36.928 0 0 1-11.152-27.12c0-11.36 3.712-20.4 11.168-27.136 7.088-7.088 16.32-10.64 27.664-10.64z', + d: 'M512 896a384 384 0 1 0 0-768 384 384 0 0 0 0 768z m0 85.333333C252.8 981.333333 42.666667 771.2 42.666667 512S252.8 42.666667 512 42.666667s469.333333 210.133333 469.333333 469.333333-210.133333 469.333333-469.333333 469.333333z m-21.333333-298.666666h42.666666a21.333333 21.333333 0 0 1 21.333334 21.333333v42.666667a21.333333 21.333333 0 0 1-21.333334 21.333333h-42.666666a21.333333 21.333333 0 0 1-21.333334-21.333333v-42.666667a21.333333 21.333333 0 0 1 21.333334-21.333333zM343.466667 396.032c0.554667-4.778667 1.109333-8.746667 1.664-11.946667 8.32-46.293333 29.397333-80.341333 63.189333-102.144 26.453333-17.28 59.008-25.941333 97.621333-25.941333 50.730667 0 92.842667 12.288 126.378667 36.864 33.578667 24.533333 50.346667 60.928 50.346667 109.141333 0 29.568-7.253333 54.485333-21.888 74.752-8.533333 12.245333-24.917333 27.946667-49.152 47.061334l-23.893334 18.773333c-13.013333 10.24-21.632 22.186667-25.898666 35.84-1.152 3.712-2.176 10.624-3.072 20.736a21.333333 21.333333 0 0 1-21.248 19.498667h-47.786667a21.333333 21.333333 0 0 1-21.248-23.296c2.773333-29.696 5.717333-48.469333 8.832-56.362667 5.845333-14.677333 20.906667-31.573333 45.141333-50.688l24.533334-19.413333c8.106667-6.144 49.749333-35.456 49.749333-61.44 0-25.941333-4.522667-35.498667-17.578667-49.749334-13.013333-14.208-42.368-18.773333-68.864-18.773333-26.026667 0-48.256 6.869333-59.136 24.405333-5.034667 8.106667-9.173333 16.768-12.117333 25.6a89.472 89.472 0 0 0-3.114667 13.098667 21.333333 21.333333 0 0 1-21.034666 17.706667H364.672a21.333333 21.333333 0 0 1-21.205333-23.722667z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-pricing': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M723.584 128c24.618667 0 48.213333 10.112 64.768 28.074667l170.965333 185.472c28.288 30.72 29.226667 76.373333 4.48 106.837333l-403.797333 457.685333a64 64 0 0 1-96 0l-397.824-450.986666a82.090667 82.090667 0 0 1-1.493333-113.493334l170.965333-185.514666C252.16 138.154667 275.754667 128 300.373333 128h423.168z m0 85.333333H300.373333c-1.024 0-1.834667 0.341333-2.048 0.597334L129.152 397.482667 512 831.488l382.848-433.92-169.216-183.637333a2.304 2.304 0 0 0-1.109333-0.512L723.584 213.333333z m-12.586667 202.794667a42.666667 42.666667 0 0 1 3.541334 60.202667l-170.666667 192a42.666667 42.666667 0 0 1-63.744 0l-170.666667-192a42.666667 42.666667 0 1 1 63.744-56.661334L512 575.744l138.794667-156.074667a42.666667 42.666667 0 0 1 60.202666-3.541333z', fill: 'currentColor' }) ] @@ -766,11 +806,7 @@ export const iconMap: any = { }, [ h('path', { - d: 'M565.03091564 528.45078523a588.83471385 588.83471385 0 0 1 16.90811971-15.58251253c16.81532721-14.88656875 34.70439623-28.84521246 50.33330501-44.73261462 28.33485369-28.83195638 37.04409293-63.99368709 29.02416942-101.57465094-9.23948212-43.27444672-40.20566608-71.52976398-84.66653122-81.02111147-31.27770165-8.21876458-35.38708395-7.01909007-67.9373685-4.33473551-37.94550581 6.16407344-39.35727747 6.05802485-76.22241344 22.62811474-2.48551348 1.39188755-19.28758462 10.35962019-24.5966414 15.11855-11.44661809 5.60731841-19.40026124 17.25940562-19.40026123 30.86013539a34.46578693 34.46578693 0 0 0 34.46578694 34.46578694 34.1807814 34.1807814 0 0 0 20.83854503-7.17816293c0.35128591-0.22535322 0.69594378-0.41756626 1.06711379-0.74896807 28.77230406-25.79631593 62.90668921-36.7259472 102.56885634-31.38375021 15.43006769 2.07457524 28.54032281 8.45737387 38.05818242 20.42097876 12.23535436 15.3770434 10.79707056 32.51714437 6.85338917 49.71026962-3.05552458 13.30909618-11.26103308 24.31163586-21.66704951 33.43181333-17.02079632 14.932965-34.65799999 29.27603478-52.28194758 43.60584853-19.63224249 15.97356663-28.85846852 36.7259472-31.52293898 60.18919446a257.89025081 257.89025081 0 0 0-1.49793613 30.30338037h0.04639624c-0.03976821 19.12188371 16.21880398 32.68947331 30.90653165 33.12029565 20.02329661 0.59652323 35.11533446-13.47479709 35.32743162-32.39783973-0.00662803-1.0869979-0.19884108-2.07457524-0.28500555-3.12180494-0.00662803-5.1433559-0.0927925-10.29333983 0.01988411-15.43006769 0.29826162-13.49468121 3.10854885-26.22713825 13.66038209-36.34814915zM515.93042532 643.75209862c-19.01583514-0.76222413-32.4309799 15.15169019-33.41192923 31.12525684-1.23281469 20.1691134 15.69518913 34.65799999 30.89327557 35.10870642 20.02329661 0.59652323 35.11533446-13.47479709 35.32743161-32.39783973-0.13918876-19.99015643-13.38863262-33.06064333-32.80877795-33.83612353zM96.72703555 251.52481518h120.80258323c17.31242991 0 31.34398202-14.84017249 31.34398202-33.14017976s-14.03818015-33.14017975-31.34398202-33.14017975H96.72703555c-17.31242991 0-31.34398202 14.84017249-31.34398201 33.14017975s14.03155212 33.14017975 31.34398201 33.14017976zM94.63920422 412.78492985h120.80258324c17.31242991 0 31.34398202-14.84017249 31.34398201-33.14017974s-14.03818015-33.14017975-31.34398201-33.14017976H94.63920422c-17.31242991 0-31.35061005 14.84017249-31.35061003 33.14017976s14.03818015 33.14017975 31.35061003 33.14017974zM246.78576947 542.32989251c0-18.3066353-14.03818015-33.14017975-31.34398201-33.14017975H94.63920422c-17.31242991 0-31.35061005 14.83354446-31.35061003 33.14017975 0 18.30000725 14.03818015 33.14017975 31.35061003 33.14017976h120.80258324c17.30580187 0 31.34398202-14.84017249 31.34398201-33.14017976z', - fill: 'currentColor' - }), - h('path', { - d: 'M824.35945025 44.76986174H194.99429654a35.93058289 35.93058289 0 0 0 0 71.84790971h629.36515371c19.80457142 0 35.96372307 16.13263951 35.96372307 35.93058289v718.5652615a35.99023521 35.99023521 0 0 1-35.96372307 35.93721092H230.10963102a35.95709503 35.95709503 0 0 1-35.95709503-35.93721092v-190.42347285a35.93721092 35.93721092 0 0 0-35.96372307-35.92395486 35.92395486 35.92395486 0 0 0-35.95709503 35.92395486v190.42347285c0 59.43359837 48.40454655 107.79837669 107.87791313 107.7983767h594.24981923c59.47999461 0 107.8712851-48.36477833 107.87128509-107.7983767V152.55498237c0-59.42697034-48.39129049-107.78512063-107.87128509-107.78512063z', + d: 'M512 896a384 384 0 1 0 0-768 384 384 0 0 0 0 768z m0 85.333333C252.8 981.333333 42.666667 771.2 42.666667 512S252.8 42.666667 512 42.666667s469.333333 210.133333 469.333333 469.333333-210.133333 469.333333-469.333333 469.333333z m-21.333333-298.666666h42.666666a21.333333 21.333333 0 0 1 21.333334 21.333333v42.666667a21.333333 21.333333 0 0 1-21.333334 21.333333h-42.666666a21.333333 21.333333 0 0 1-21.333334-21.333333v-42.666667a21.333333 21.333333 0 0 1 21.333334-21.333333zM343.466667 396.032c0.554667-4.778667 1.109333-8.746667 1.664-11.946667 8.32-46.293333 29.397333-80.341333 63.189333-102.144 26.453333-17.28 59.008-25.941333 97.621333-25.941333 50.730667 0 92.842667 12.288 126.378667 36.864 33.578667 24.533333 50.346667 60.928 50.346667 109.141333 0 29.568-7.253333 54.485333-21.888 74.752-8.533333 12.245333-24.917333 27.946667-49.152 47.061334l-23.893334 18.773333c-13.013333 10.24-21.632 22.186667-25.898666 35.84-1.152 3.712-2.176 10.624-3.072 20.736a21.333333 21.333333 0 0 1-21.248 19.498667h-47.786667a21.333333 21.333333 0 0 1-21.248-23.296c2.773333-29.696 5.717333-48.469333 8.832-56.362667 5.845333-14.677333 20.906667-31.573333 45.141333-50.688l24.533334-19.413333c8.106667-6.144 49.749333-35.456 49.749333-61.44 0-25.941333-4.522667-35.498667-17.578667-49.749334-13.013333-14.208-42.368-18.773333-68.864-18.773333-26.026667 0-48.256 6.869333-59.136 24.405333-5.034667 8.106667-9.173333 16.768-12.117333 25.6a89.472 89.472 0 0 0-3.114667 13.098667 21.333333 21.333333 0 0 1-21.034666 17.706667H364.672a21.333333 21.333333 0 0 1-21.205333-23.722667z', fill: 'currentColor' }) ] @@ -812,11 +848,592 @@ export const iconMap: any = { }, [ h('path', { - d: 'M537.6 665.6c-12.8 12.8-12.8 32 0 44.8 6.4 6.4 12.8 6.4 25.6 6.4 6.4 0 19.2 0 25.6-6.4l128-134.4s6.4-6.4 6.4-12.8v-19.2-6.4c0-6.4-6.4-12.8-6.4-12.8l-134.4-128c-12.8-12.8-32-12.8-44.8 0-12.8 12.8-12.8 38.4 0 51.2l76.8 76.8H96c-19.2 0-32 12.8-32 32s12.8 32 32 32h524.8l-83.2 76.8z', + d: 'M896.128 113.792a42.666667 42.666667 0 0 1 42.24 36.864l0.426667 5.802667v711.509333a42.666667 42.666667 0 0 1-36.906667 42.24l-5.76 0.426667h-263.082667a21.333333 21.333333 0 0 1-20.906666-17.066667l-0.426667-4.266667v-42.666666a21.333333 21.333333 0 0 1 17.066667-20.906667l4.266666-0.426667h220.416V199.125333H281.941333l0.042667 192.170667a21.333333 21.333333 0 0 1-21.333333 21.333333h-42.666667a21.333333 21.333333 0 0 1-21.333333-21.333333V135.125333a21.333333 21.333333 0 0 1 17.066666-20.906666l4.266667-0.426667h678.144zM424.96 485.973333c6.272 0 12.373333 2.218667 17.152 6.272l178.858667 151.338667a26.538667 26.538667 0 0 1 0 40.533333l-178.858667 151.381334a26.538667 26.538667 0 0 1-43.690667-20.266667v-103.765333H135.168a21.333333 21.333333 0 0 1-21.333333-21.333334v-42.666666a21.333333 21.333333 0 0 1 21.333333-21.333334H398.506667l-0.042667-113.621333c0-14.677333 11.904-26.538667 26.538667-26.538667z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-chat-record': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 16 16', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M11.3333 7.33334C11.3333 6.96515 11.6318 6.66667 12 6.66667H14.6667C15.0349 6.66667 15.3333 6.96515 15.3333 7.33334V12.6667C15.3333 13.0349 15.0349 13.3333 14.6667 13.3333H13.2761L12.4714 14.1381C12.2111 14.3984 11.7889 14.3984 11.5286 14.1381L10.7239 13.3333H7.33334C6.96515 13.3333 6.66667 13.0349 6.66667 12.6667V10C6.66667 9.63182 6.96515 9.33334 7.33334 9.33334H11.3333V7.33334ZM12.6667 8.00001V10C12.6667 10.3682 12.3682 10.6667 12 10.6667H8.00001V12H11C11.1768 12 11.3464 12.0702 11.4714 12.1953L12 12.7239L12.5286 12.1953C12.6536 12.0702 12.8232 12 13 12H14V8.00001H12.6667Z', + fill: 'currentColor' + }), + h('path', { + d: 'M1.33334 1.33333C0.965149 1.33333 0.666672 1.63181 0.666672 1.99999V10C0.666672 10.3682 0.965149 10.6667 1.33334 10.6667H2.72386L3.86193 11.8047C4.12228 12.0651 4.54439 12.0651 4.80474 11.8047L5.94281 10.6667H12C12.3682 10.6667 12.6667 10.3682 12.6667 10V1.99999C12.6667 1.63181 12.3682 1.33333 12 1.33333H1.33334ZM4.66667 5.99999C4.66667 6.36818 4.36819 6.66666 4.00001 6.66666C3.63182 6.66666 3.33334 6.36818 3.33334 5.99999C3.33334 5.6318 3.63182 5.33333 4.00001 5.33333C4.36819 5.33333 4.66667 5.6318 4.66667 5.99999ZM7.33334 5.99999C7.33334 6.36818 7.03486 6.66666 6.66667 6.66666C6.29848 6.66666 6 6.36818 6 5.99999C6 5.6318 6.29848 5.33333 6.66667 5.33333C7.03486 5.33333 7.33334 5.6318 7.33334 5.99999ZM10 5.99999C10 6.36818 9.70153 6.66666 9.33334 6.66666C8.96515 6.66666 8.66667 6.36818 8.66667 5.99999C8.66667 5.6318 8.96515 5.33333 9.33334 5.33333C9.70153 5.33333 10 5.6318 10 5.99999Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-history-outlined': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 20 20', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M18.6667 10.0001C18.6667 14.6025 14.9358 18.3334 10.3334 18.3334C7.68359 18.3334 5.32266 17.0967 3.79633 15.1689L5.12054 14.1563C6.3421 15.6864 8.22325 16.6667 10.3334 16.6667C14.0153 16.6667 17 13.682 17 10.0001C17 6.31818 14.0153 3.33341 10.3334 3.33341C7.03005 3.33341 4.28786 5.73596 3.75889 8.88897H4.3469C4.70187 8.88897 4.9136 9.28459 4.7167 9.57995L3.32493 11.6676C3.14901 11.9315 2.76125 11.9315 2.58533 11.6676L1.19356 9.57995C0.996651 9.28459 1.20838 8.88897 1.56336 8.88897H2.07347C2.61669 4.8119 6.10774 1.66675 10.3334 1.66675C14.9358 1.66675 18.6667 5.39771 18.6667 10.0001Z', + fill: 'currentColor' + }), + h('path', { + d: 'M10.8334 9.7223V7.11119C10.8334 6.86573 10.6344 6.66675 10.3889 6.66675H9.61115C9.36569 6.66675 9.16671 6.86573 9.16671 7.11119V10.9445C9.16671 11.19 9.36569 11.389 9.61115 11.389H13.1667C13.4122 11.389 13.6112 11.19 13.6112 10.9445V10.1667C13.6112 9.92129 13.4122 9.7223 13.1667 9.7223H10.8334Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-document-refresh': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M512 170.666667a85.333333 85.333333 0 0 1 85.333333-85.333334h256a85.333333 85.333333 0 0 1 85.333334 85.333334v256a85.333333 85.333333 0 0 1-85.333334 85.333333h-256a85.333333 85.333333 0 0 1-85.333333-85.333333V170.666667z m85.333333 0v256h256V170.666667h-256zM85.333333 597.333333a85.333333 85.333333 0 0 1 85.333334-85.333333h256a85.333333 85.333333 0 0 1 85.333333 85.333333v256a85.333333 85.333333 0 0 1-85.333333 85.333334H170.666667a85.333333 85.333333 0 0 1-85.333334-85.333334v-256z m85.333334 0v256h256v-256H170.666667zM128 298.666667a213.333333 213.333333 0 0 1 213.333333-213.333334h85.333334v85.333334H341.333333a128 128 0 0 0-128 128h57.514667a12.8 12.8 0 0 1 9.728 21.12l-100.181333 116.906666a12.8 12.8 0 0 1-19.456 0l-100.181334-116.906666A12.8 12.8 0 0 1 70.485333 298.666667H128zM896 725.333333a213.333333 213.333333 0 0 1-213.333333 213.333334h-85.333334v-85.333334h85.333334a128 128 0 0 0 128-128v-21.333333h-57.514667a12.8 12.8 0 0 1-9.728-21.12l100.181333-116.906667a12.8 12.8 0 0 1 19.456 0l100.181334 116.906667a12.8 12.8 0 0 1-9.728 21.12H896v21.333333z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-export': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M791.04 554.24l-386.432-1.728a21.248 21.248 0 0 1-21.12-21.248L383.36 490.88c-0.064-11.776 9.408-21.376 21.12-21.44h0.192l394.112 1.728-97.664-98.112a21.44 21.44 0 0 1 0-30.208l30.08-30.144a21.12 21.12 0 0 1 29.952 0l165.12 165.952a42.88 42.88 0 0 1 0 60.288l-165.12 165.952a21.12 21.12 0 0 1-30.016 0l-30.016-30.144a21.44 21.44 0 0 1 0-30.208L791.04 554.24z m-132.672-383.552H170.24v682.624h488.128c11.712 0 21.184 9.6 21.184 21.376v42.624a21.248 21.248 0 0 1-21.248 21.376h-530.56A42.56 42.56 0 0 1 85.376 896V128c0-23.552 19.008-42.688 42.496-42.688h530.56c11.712 0 21.184 9.6 21.184 21.376v42.624a21.248 21.248 0 0 1-21.248 21.376z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-import': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 16 16', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M8.11532 8.65984L9.55945 10.0713C9.69264 10.2015 9.69264 10.4125 9.55945 10.5427L9.07714 11.0141C8.94395 11.1443 8.72801 11.1443 8.59482 11.0141L6.42439 8.89279L5.94207 8.42139C5.6757 8.16104 5.6757 7.73893 5.94207 7.47858L8.59482 4.88585C8.72801 4.75568 8.94395 4.75568 9.07714 4.88585L9.55945 5.35726C9.69264 5.48743 9.69264 5.69849 9.55945 5.82866L7.99017 7.36244L14.3241 7.33506C14.3251 7.33505 14.3261 7.33505 14.327 7.33506C14.5154 7.33582 14.6675 7.48567 14.6667 7.66977L14.664 8.30105C14.6632 8.48403 14.5117 8.63219 14.3245 8.633L8.11532 8.65984ZM10.5417 2.66665H2.69754V13.3333H10.5417C10.73 13.3333 10.8827 13.4826 10.8827 13.6666V14.3333C10.8827 14.5174 10.73 14.6666 10.5417 14.6666H2.01544C1.63873 14.6666 1.33334 14.3682 1.33334 14V1.99998C1.33334 1.63179 1.63873 1.33331 2.01544 1.33331H10.5417C10.73 1.33331 10.8827 1.48255 10.8827 1.66665V2.33331C10.8827 2.51741 10.73 2.66665 10.5417 2.66665Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-fitview': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M128 85.333333h192a21.333333 21.333333 0 0 1 21.333333 21.333334v42.666666a21.333333 21.333333 0 0 1-21.333333 21.333334H170.666667v149.333333a21.333333 21.333333 0 0 1-21.333334 21.333333h-42.666666a21.333333 21.333333 0 0 1-21.333334-21.333333V128a42.666667 42.666667 0 0 1 42.666667-42.666667z m768 853.333334h-192a21.333333 21.333333 0 0 1-21.333333-21.333334v-42.666666a21.333333 21.333333 0 0 1 21.333333-21.333334H853.333333v-149.333333a21.333333 21.333333 0 0 1 21.333334-21.333333h42.666666a21.333333 21.333333 0 0 1 21.333334 21.333333V896a42.666667 42.666667 0 0 1-42.666667 42.666667zM85.333333 896v-192a21.333333 21.333333 0 0 1 21.333334-21.333333h42.666666a21.333333 21.333333 0 0 1 21.333334 21.333333V853.333333h149.333333a21.333333 21.333333 0 0 1 21.333333 21.333334v42.666666a21.333333 21.333333 0 0 1-21.333333 21.333334H128a42.666667 42.666667 0 0 1-42.666667-42.666667zM938.666667 128v192a21.333333 21.333333 0 0 1-21.333334 21.333333h-42.666666a21.333333 21.333333 0 0 1-21.333334-21.333333V170.666667h-149.333333a21.333333 21.333333 0 0 1-21.333333-21.333334v-42.666666a21.333333 21.333333 0 0 1 21.333333-21.333334H896a42.666667 42.666667 0 0 1 42.666667 42.666667z', + fill: 'currentColor' + }), + h('path', { + d: 'M512 512m-170.666667 0a170.666667 170.666667 0 1 0 341.333334 0 170.666667 170.666667 0 1 0-341.333334 0Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-magnify': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M366.165333 593.749333a21.333333 21.333333 0 0 1 30.208 0l30.165334 30.165334a21.333333 21.333333 0 0 1 0 30.208l-170.752 170.666666H377.173333a21.333333 21.333333 0 0 1 21.333334 21.333334v42.666666a21.333333 21.333333 0 0 1-21.333334 21.333334H156.458667a42.538667 42.538667 0 0 1-42.666667-42.666667v-220.16a21.333333 21.333333 0 0 1 21.333333-21.333333h42.666667a21.333333 21.333333 0 0 1 21.333333 21.333333v113.493333l167.04-167.04z m500.992-480a42.538667 42.538667 0 0 1 42.666667 42.666667v220.16a21.333333 21.333333 0 0 1-21.333333 21.333333h-42.666667a21.333333 21.333333 0 0 1-21.333333-21.333333v-113.493333l-167.04 167.04a21.333333 21.333333 0 0 1-30.165334 0l-30.165333-30.165334a21.333333 21.333333 0 0 1 0-30.165333l170.709333-170.666667h-121.344a21.333333 21.333333 0 0 1-21.333333-21.333333v-42.666667a21.333333 21.333333 0 0 1 21.333333-21.333333h220.672z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-minify': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M384.341333 597.205333a42.538667 42.538667 0 0 1 42.666667 42.666667v220.16a21.333333 21.333333 0 0 1-21.333333 21.333333h-42.666667a21.333333 21.333333 0 0 1-21.333333-21.333333v-113.493333l-167.04 167.04a21.333333 21.333333 0 0 1-30.165334 0l-30.165333-30.208a21.333333 21.333333 0 0 1 0-30.165334l170.709333-170.666666H163.669333a21.333333 21.333333 0 0 1-21.333333-21.333334v-42.666666a21.333333 21.333333 0 0 1 21.333333-21.333334h220.672zM849.92 110.506667a21.333333 21.333333 0 0 1 30.165333 0l30.165334 30.165333a21.333333 21.333333 0 0 1 0 30.165333l-170.709334 170.666667h121.344a21.333333 21.333333 0 0 1 21.333334 21.333333v42.666667a21.333333 21.333333 0 0 1-21.333334 21.333333h-220.672a42.538667 42.538667 0 0 1-42.666666-42.666666v-220.16a21.333333 21.333333 0 0 1 21.333333-21.333334h42.666667a21.333333 21.333333 0 0 1 21.333333 21.333334v113.493333l167.04-166.997333z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-play-outlined': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 14 14', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M2.63333 1.82346C2.81847 1.72056 3.04484 1.72611 3.22472 1.83795L10.8081 6.55299C10.9793 6.65945 11.0834 6.84677 11.0834 7.04838C11.0834 7.24999 10.9793 7.43731 10.8081 7.54376L3.22472 12.2588C3.04484 12.3707 2.81847 12.3762 2.63333 12.2733C2.44819 12.1704 2.33337 11.9752 2.33337 11.7634V2.33333C2.33337 2.12152 2.44819 1.92635 2.63333 1.82346ZM3.50004 3.38293V10.7138L9.39529 7.04838L3.50004 3.38293Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-save-outlined': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 14 14', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M1.16666 2.53734C1.16666 1.78025 1.7804 1.1665 2.53749 1.1665H11.4625C12.2196 1.1665 12.8333 1.78025 12.8333 2.53734V11.4623C12.8333 12.2194 12.2196 12.8332 11.4625 12.8332H2.53749C1.7804 12.8332 1.16666 12.2194 1.16666 11.4623V2.53734ZM2.53749 2.33317C2.42473 2.33317 2.33332 2.42458 2.33332 2.53734V11.4623C2.33332 11.5751 2.42473 11.6665 2.53749 11.6665H11.4625C11.5753 11.6665 11.6667 11.5751 11.6667 11.4623V2.53734C11.6667 2.42457 11.5753 2.33317 11.4625 2.33317H2.53749Z', + fill: 'currentColor' + }), + h('path', { + d: 'M3.79166 1.74984C3.79166 1.42767 4.05282 1.1665 4.37499 1.1665H9.33332C9.65549 1.1665 9.91666 1.42767 9.91666 1.74984V6.99984C9.91666 7.322 9.65549 7.58317 9.33332 7.58317H4.37499C4.05282 7.58317 3.79166 7.322 3.79166 6.99984V1.74984ZM4.95832 2.33317V6.4165H8.74999V2.33317H4.95832Z', + fill: 'currentColor' + }), + h('path', { + d: 'M7.58333 3.2085C7.9055 3.2085 8.16667 3.46966 8.16667 3.79183V4.9585C8.16667 5.28066 7.9055 5.54183 7.58333 5.54183C7.26117 5.54183 7 5.28066 7 4.9585V3.79183C7 3.46966 7.26117 3.2085 7.58333 3.2085Z', + fill: 'currentColor' + }), + h('path', { + d: 'M2.62415 1.74984C2.62415 1.42767 2.88531 1.1665 3.20748 1.1665H10.4996C10.8217 1.1665 11.0829 1.42767 11.0829 1.74984C11.0829 2.072 10.8217 2.33317 10.4996 2.33317H3.20748C2.88531 2.33317 2.62415 2.072 2.62415 1.74984Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-reference-outlined': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M121.216 714.368c-7.082667-17.493333-7.466667-83.413333-7.424-104.32 0.341333-142.72 34.048-256.426667 88.32-330.112C262.4 198.229333 351.701333 161.024 460.8 172.8c7.893333 0.853333 11.946667 7.338667 10.581333 16.981333l-7.381333 51.285334c-1.749333 12.202667-9.813333 12.885333-17.621333 12.202666-138.709333-11.946667-232.576 84.053333-245.76 296.704a165.632 165.632 0 0 1 83.754666-22.528c91.050667 0 164.906667 72.96 164.906667 162.944C449.28 780.373333 375.466667 853.333333 284.373333 853.333333c-82.858667 0-151.424-60.330667-163.157333-138.965333z m438.570667 0c-7.082667-17.493333-7.509333-83.413333-7.466667-104.32 0.426667-142.72 34.090667-256.426667 88.405333-330.112 60.202667-81.706667 149.504-118.912 258.645334-107.136 7.893333 0.853333 11.946667 7.338667 10.581333 16.981333l-7.381333 51.285334c-1.749333 12.202667-9.813333 12.885333-17.621334 12.202666-138.752-11.946667-232.576 84.053333-245.76 296.704a165.632 165.632 0 0 1 83.712-22.528c91.093333 0 164.906667 72.96 164.906667 162.944 0 90.026667-73.813333 162.944-164.906667 162.944-82.773333 0-151.381333-60.330667-163.114666-138.965333z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-access': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M490.368 48.554667a42.666667 42.666667 0 0 1 43.264 0l362.666667 213.333333A42.666667 42.666667 0 0 1 917.333333 298.666667v426.666666a42.666667 42.666667 0 0 1-21.034666 36.778667l-362.666667 213.333333a42.666667 42.666667 0 0 1-43.264 0l-362.666667-213.333333A42.666667 42.666667 0 0 1 106.666667 725.333333V298.666667a42.666667 42.666667 0 0 1 21.034666-36.778667l362.666667-213.333333zM192 323.072v377.856L512 889.173333l320-188.245333V323.072L512 134.826667 192 323.072z', + fill: 'currentColor' + }), + h('path', { + d: 'M705.194667 441.472a42.666667 42.666667 0 1 0-45.226667-72.362667l-148.096 92.586667L363.946667 369.066667a42.666667 42.666667 0 1 0-45.312 72.362666L469.333333 535.722667V704a42.666667 42.666667 0 1 0 85.333334 0v-168.448l150.528-94.08z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-access-active': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M533.632 48.554667a42.666667 42.666667 0 0 0-43.264 0l-362.666667 213.333333A42.666667 42.666667 0 0 0 106.666667 298.666667v426.666666a42.666667 42.666667 0 0 0 21.034666 36.778667l362.666667 213.333333a42.666667 42.666667 0 0 0 43.264 0l362.666667-213.333333A42.666667 42.666667 0 0 0 917.333333 725.333333V298.666667a42.666667 42.666667 0 0 0-21.034666-36.778667l-362.666667-213.333333z m185.130667 334.08a42.666667 42.666667 0 0 1-13.568 58.837333L554.666667 535.552V704a42.666667 42.666667 0 1 1-85.333334 0v-168.277333l-150.613333-94.293334a42.666667 42.666667 0 0 1 45.226667-72.32l147.925333 92.586667 148.053333-92.586667a42.666667 42.666667 0 0 1 58.837334 13.568z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-video-play': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M512 896a384 384 0 1 0 0-768 384 384 0 0 0 0 768z m469.333333-384c0 259.2-210.133333 469.333333-469.333333 469.333333S42.666667 771.2 42.666667 512 252.8 42.666667 512 42.666667s469.333333 210.133333 469.333333 469.333333z', + fill: 'currentColor' + }), + h('path', { + d: 'M686.890667 539.776l-253.141334 159.274667a32.298667 32.298667 0 0 1-44.8-10.453334 32.896 32.896 0 0 1-4.949333-17.322666V352.768a32.64 32.64 0 0 1 32.512-32.768c6.101333 0 12.074667 1.706667 17.28 4.992l253.098667 159.232a32.853333 32.853333 0 0 1 0 55.552z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-video-stop': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M981.333333 512c0 259.2-210.133333 469.333333-469.333333 469.333333S42.666667 771.2 42.666667 512 252.8 42.666667 512 42.666667s469.333333 210.133333 469.333333 469.333333z m-85.333333 0a384 384 0 1 0-768 0 384 384 0 0 0 768 0zM384 341.333333h256c23.466667 0 42.666667 19.072 42.666667 42.666667v256c0 23.552-19.2 42.666667-42.666667 42.666667H384c-23.466667 0-42.666667-19.114667-42.666667-42.666667V384c0-23.594667 19.2-42.666667 42.666667-42.666667z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-video-pause': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M405.333333 341.333333a21.333333 21.333333 0 0 0-21.333333 21.333334v298.666666a21.333333 21.333333 0 0 0 21.333333 21.333334h42.666667a21.333333 21.333333 0 0 0 21.333333-21.333334v-298.666666a21.333333 21.333333 0 0 0-21.333333-21.333334h-42.666667zM576 341.333333a21.333333 21.333333 0 0 0-21.333333 21.333334v298.666666a21.333333 21.333333 0 0 0 21.333333 21.333334h42.666667a21.333333 21.333333 0 0 0 21.333333-21.333334v-298.666666a21.333333 21.333333 0 0 0-21.333333-21.333334h-42.666667z', + fill: 'currentColor' + }), + h('path', { + d: 'M512 42.666667C252.8 42.666667 42.666667 252.8 42.666667 512s210.133333 469.333333 469.333333 469.333333 469.333333-210.133333 469.333333-469.333333S771.2 42.666667 512 42.666667zM128 512a384 384 0 1 1 768 0 384 384 0 0 1-768 0z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-invisible': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M512 640c-28.032 0-55.466667-2.218667-82.090667-6.4l-21.248 79.274667a21.333333 21.333333 0 0 1-26.154666 15.061333L341.333333 716.885333a21.333333 21.333333 0 0 1-15.061333-26.112l20.821333-77.653333a473.770667 473.770667 0 0 1-97.152-45.653333l-67.84 67.84a21.333333 21.333333 0 0 1-30.122666 0l-30.165334-30.208a21.333333 21.333333 0 0 1 0-30.165334l59.733334-59.733333A386.389333 386.389333 0 0 1 104.789333 416.426667a37.76 37.76 0 0 1 7.594667-45.397334c10.496-9.514667 17.877333-16 24.32-22.442666a170.24 170.24 0 0 0 1.834667-1.92c9.301333-9.6 25.173333-6.016 30.634666 6.186666C222.336 471.936 349.568 554.666667 512 554.666667c155.648 0 285.866667-80.512 338.090667-190.976 1.365333-2.858667 2.901333-6.485333 4.437333-10.325334a18.346667 18.346667 0 0 1 29.866667-6.613333l27.392 27.434667a36.565333 36.565333 0 0 1 6.997333 42.666666c-1.792 3.456-3.541333 6.698667-5.034667 9.301334a390.4 390.4 0 0 1-76.928 94.293333l54.442667 54.485333a21.333333 21.333333 0 0 1 0 30.165334l-30.165333 30.165333a21.333333 21.333333 0 0 1-30.165334 0l-63.658666-63.658667a475.306667 475.306667 0 0 1-90.282667 41.514667l20.778667 77.653333a21.333333 21.333333 0 0 1-15.061334 26.112l-41.216 11.093334a21.333333 21.333333 0 0 1-26.154666-15.104l-21.248-79.317334c-26.581333 4.266667-54.058667 6.442667-82.090667 6.442667z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + + 'app-beautify': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M739.6864 689.92l4.2496 3.584 136.4992 135.936a34.1504 34.1504 0 0 1-43.9296 51.968l-4.1984-3.584-136.5504-135.936a34.1504 34.1504 0 0 1 43.9296-51.968zM663.4496 151.552a34.1504 34.1504 0 0 1 51.2512 30.464l-5.9392 216.6272 156.4672 146.1248a34.1504 34.1504 0 0 1-8.6528 55.808l-4.8128 1.792-202.8032 61.0816-87.4496 197.12a34.1504 34.1504 0 0 1-56.32 9.216l-3.2768-4.096-119.5008-178.432-209.9712-24.064a34.1504 34.1504 0 0 1-26.1632-50.176l2.7648-4.3008 129.28-171.7248-42.5472-212.3776a34.1504 34.1504 0 0 1 40.448-40.1408l4.6592 1.3312 198.912 72.3456z m-18.6368 89.7536l-144.5376 83.968a34.1504 34.1504 0 0 1-28.8256 2.56L314.5728 270.592l33.792 167.8848c1.4848 7.68 0.3584 15.5136-3.1744 22.3232l-3.072 4.9152-102.656 136.2944 166.4 19.1488c8.2944 0.9216 15.872 4.864 21.4016 10.9568l3.072 3.9424 93.8496 140.032 68.7104-154.7776a34.1504 34.1504 0 0 1 16.7936-17.0496l4.608-1.792 160.9216-48.4864-124.2624-116.0192a34.1504 34.1504 0 0 1-10.4448-20.0704l-0.3584-5.7856 4.6592-170.9056z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-retract': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 16 16', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M5.44661 0.747985C5.55509 0.639506 5.73097 0.639506 5.83945 0.747985L8.00004 2.90858L10.1606 0.748004C10.2691 0.639525 10.445 0.639525 10.5534 0.748004L11.1034 1.29798C11.2119 1.40645 11.2119 1.58233 11.1034 1.69081L8.7488 4.04544L8.74644 4.04782L8.19647 4.59779C8.16892 4.62534 8.13703 4.64589 8.10299 4.65945C8.003 4.6993 7.88453 4.67875 7.80359 4.59781L7.25362 4.04784L7.25003 4.04419L4.89664 1.69079C4.78816 1.58232 4.78816 1.40644 4.89664 1.29796L5.44661 0.747985Z', + fill: 'currentColor' + }), + h('path', { + d: 'M1.99999 5.82774C1.63181 5.82774 1.33333 6.12622 1.33333 6.49441V9.16107C1.33333 9.52926 1.63181 9.82774 2 9.82774H14C14.3682 9.82774 14.6667 9.52926 14.6667 9.16107V6.49441C14.6667 6.12622 14.3682 5.82774 14 5.82774H1.99999ZM13.3333 7.16108V8.49441H2.66666V7.16108H13.3333Z', + fill: 'currentColor' + }), + h('path', { + d: 'M10.1605 14.9075C10.269 15.016 10.4449 15.016 10.5534 14.9075L11.1033 14.3575C11.2118 14.249 11.2118 14.0732 11.1033 13.9647L8.75 11.6113L8.74637 11.6076L8.1964 11.0577C8.11546 10.9767 7.99699 10.9562 7.897 10.996C7.86296 11.0096 7.83107 11.0301 7.80352 11.0577L7.25354 11.6077L7.25117 11.6101L4.89657 13.9647C4.78809 14.0731 4.78809 14.249 4.89657 14.3575L5.44654 14.9075C5.55502 15.016 5.7309 15.016 5.83938 14.9075L7.99995 12.7469L10.1605 14.9075Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-extend': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 16 16', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M10.5534 5.07974C10.4449 5.18822 10.269 5.18822 10.1605 5.07974L7.99992 2.91915L5.83935 5.07972C5.73087 5.1882 5.555 5.1882 5.44652 5.07972L4.89654 4.52975C4.78807 4.42127 4.78807 4.24539 4.89654 4.13691L7.25117 1.78229L7.25352 1.77991L7.80349 1.22994C7.83019 1.20324 7.86098 1.18311 7.89384 1.16955C7.99448 1.12801 8.11459 1.14813 8.19638 1.22992L8.74635 1.77989L8.74998 1.78359L11.1033 4.13693C11.2118 4.24541 11.2118 4.42129 11.1033 4.52977L10.5534 5.07974Z', + fill: 'currentColor' + }), + h('path', { + d: 'M5.83943 10.9202C5.73095 10.8118 5.55507 10.8118 5.44659 10.9202L4.89662 11.4702C4.78814 11.5787 4.78814 11.7546 4.89662 11.863L7.24997 14.2164L7.25359 14.2201L7.80357 14.7701C7.8862 14.8527 8.00795 14.8724 8.10922 14.8291C8.14091 14.8156 8.17059 14.7959 8.19645 14.77L8.74642 14.2201L8.74873 14.2177L11.1034 11.8631C11.2119 11.7546 11.2119 11.5787 11.1034 11.4702L10.5534 10.9202C10.4449 10.8118 10.2691 10.8118 10.1606 10.9202L8.00002 13.0808L5.83943 10.9202Z', + fill: 'currentColor' + }), + h('path', { + d: 'M2.00004 6C1.63185 6 1.33337 6.29848 1.33337 6.66667V9.33333C1.33337 9.70152 1.63185 10 2.00004 10H14C14.3682 10 14.6667 9.70152 14.6667 9.33333V6.66667C14.6667 6.29848 14.3682 6 14 6H2.00004ZM13.3334 7.33333V8.66667H2.66671V7.33333H13.3334Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-close': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 16 16', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M7.96141 6.98572L12.4398 2.50738C12.5699 2.3772 12.781 2.3772 12.9112 2.50738L13.3826 2.97878C13.5127 3.10895 13.5127 3.32001 13.3826 3.45018L8.90422 7.92853L13.3826 12.4069C13.5127 12.537 13.5127 12.7481 13.3826 12.8783L12.9112 13.3497C12.781 13.4799 12.5699 13.4799 12.4398 13.3497L7.96141 8.87134L3.48307 13.3497C3.35289 13.4799 3.14184 13.4799 3.01166 13.3497L2.54026 12.8783C2.41008 12.7481 2.41008 12.537 2.54026 12.4069L7.0186 7.92853L2.54026 3.45018C2.41008 3.32001 2.41008 3.10895 2.54026 2.97878L3.01166 2.50738C3.14184 2.3772 3.35289 2.3772 3.48307 2.50738L7.96141 6.98572Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-add-application': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + viewBox: '0 0 16 16', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M3.33333 2.00002H10.6667V3.67969C10.6667 3.76809 10.7018 3.85288 10.7643 3.91539C10.8268 3.9779 10.9116 4.01302 11 4.01302H12.6667V8.66669H14V3.27702C14.0001 3.10027 13.9299 2.93074 13.805 2.80569L11.862 0.86202C11.8001 0.800071 11.7265 0.750933 11.6456 0.717417C11.5647 0.6839 11.4779 0.666662 11.3903 0.666687H2.66667C2.48986 0.666687 2.32029 0.736925 2.19526 0.861949C2.07024 0.986973 2 1.15654 2 1.33335V14.6667C2 14.8435 2.07024 15.0131 2.19526 15.1381C2.32029 15.2631 2.48986 15.3334 2.66667 15.3334H8V14H3.33333V2.00002Z', + fill: 'currentColor' + }), + h('path', { + d: 'M11.6667 10C11.4826 10 11.3333 10.1492 11.3333 10.3333V12H9.66668C9.48258 12 9.33334 12.1492 9.33334 12.3333V13C9.33334 13.1841 9.48258 13.3333 9.66668 13.3333H11.3333V15C11.3333 15.1841 11.4826 15.3333 11.6667 15.3333H12.3333C12.5174 15.3333 12.6667 15.1841 12.6667 15V13.3333H14.3333C14.5174 13.3333 14.6667 13.1841 14.6667 13V12.3333C14.6667 12.1492 14.5174 12 14.3333 12H12.6667V10.3333C12.6667 10.1492 12.5174 10 12.3333 10H11.6667Z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-quote': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M800.768 477.184c-14.336 0-30.72 2.048-45.056 4.096 18.432-51.2 77.824-188.416 237.568-315.392 36.864-28.672-20.48-86.016-59.392-57.344-155.648 116.736-356.352 317.44-356.352 573.44v20.48c0 122.88 100.352 223.232 223.232 223.232S1024 825.344 1024 702.464c0-124.928-100.352-225.28-223.232-225.28zM223.232 477.184c-14.336 0-30.72 2.048-45.056 4.096 18.432-51.2 77.824-188.416 237.568-315.392 36.864-28.672-20.48-86.016-59.392-57.344C200.704 225.28 0 425.984 0 681.984v20.48c0 122.88 100.352 223.232 223.232 223.232s223.232-100.352 223.232-223.232c0-124.928-100.352-225.28-223.232-225.28z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-user-input': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M85.333333 234.666667a149.333333 149.333333 0 0 1 292.48-42.666667H917.333333a21.333333 21.333333 0 0 1 21.333334 21.333333v42.666667a21.333333 21.333333 0 0 1-21.333334 21.333333H377.813333A149.418667 149.418667 0 0 1 85.333333 234.666667z m21.333334 320a21.333333 21.333333 0 0 1-21.333334-21.333334v-42.666666a21.333333 21.333333 0 0 1 21.333334-21.333334h262.186666a149.418667 149.418667 0 0 1 286.293334 0H917.333333a21.333333 21.333333 0 0 1 21.333334 21.333334v42.666666a21.333333 21.333333 0 0 1-21.333334 21.333334h-262.186666a149.418667 149.418667 0 0 1-286.293334 0H106.666667z m405.333333 21.333333a64 64 0 1 0 0-128 64 64 0 0 0 0 128z m-405.333333 256A21.333333 21.333333 0 0 1 85.333333 810.666667v-42.666667a21.333333 21.333333 0 0 1 21.333334-21.333333h539.52a149.418667 149.418667 0 0 1 292.48 42.666666 149.333333 149.333333 0 0 1-292.48 42.666667H106.666667z m682.666666-106.666667a64 64 0 1 0 0 128 64 64 0 0 0 0-128zM234.666667 298.666667a64 64 0 1 0 0-128 64 64 0 0 0 0 128z', + fill: 'currentColor' + }) + ] + ) + ]) + } + }, + 'app-keyboard': { + iconReader: () => { + return h('i', [ + h( + 'svg', + { + style: { height: '100%', width: '100%' }, + viewBox: '0 0 1024 1024', + version: '1.1', + xmlns: 'http://www.w3.org/2000/svg' + }, + [ + h('path', { + d: 'M373.333333 352a53.333333 53.333333 0 1 1-106.666666 0 53.333333 53.333333 0 0 1 106.666666 0zM320 576a53.333333 53.333333 0 1 0 0-106.666667 53.333333 53.333333 0 0 0 0 106.666667zM565.333333 352a53.333333 53.333333 0 1 1-106.666666 0 53.333333 53.333333 0 0 1 106.666666 0zM512 576a53.333333 53.333333 0 1 0 0-106.666667 53.333333 53.333333 0 0 0 0 106.666667zM757.333333 352a53.333333 53.333333 0 1 1-106.666666 0 53.333333 53.333333 0 0 1 106.666666 0zM704 576a53.333333 53.333333 0 1 0 0-106.666667 53.333333 53.333333 0 0 0 0 106.666667zM362.666667 661.333333a42.666667 42.666667 0 1 0 0 85.333334h298.666666a42.666667 42.666667 0 1 0 0-85.333334h-298.666666z', fill: 'currentColor' }), h('path', { - d: 'M960 384c0-6.4-6.4-12.8-6.4-19.2L704 128c-6.4-6.4-6.4-6.4-12.8-6.4h-6.4-371.2c-76.8 0-140.8 64-140.8 140.8v172.8c0 19.2 12.8 32 32 32s25.6-19.2 25.6-38.4V262.4c0-44.8 38.4-76.8 76.8-76.8h339.2v211.2c0 19.2 12.8 32 32 32H896V768c0 44.8-38.4 76.8-76.8 76.8H313.6c-44.8 0-76.8-38.4-76.8-76.8v-89.6c0-19.2-12.8-32-32-32s-32 12.8-32 32V768c0 76.8 64 140.8 140.8 140.8h505.6c76.8 0 140.8-64 140.8-140.8V384c0 6.4 0 6.4 0 0z m-243.2-25.6V224l134.4 134.4h-134.4z', + d: 'M512 42.666667C252.8 42.666667 42.666667 252.8 42.666667 512s210.133333 469.333333 469.333333 469.333333 469.333333-210.133333 469.333333-469.333333S771.2 42.666667 512 42.666667zM128 512a384 384 0 1 1 768 0 384 384 0 0 1-768 0z', fill: 'currentColor' }) ] diff --git a/ui/src/components/index.ts b/ui/src/components/index.ts index 114ad716cd4..5fe7bf8194c 100644 --- a/ui/src/components/index.ts +++ b/ui/src/components/index.ts @@ -12,13 +12,19 @@ import AppTable from './app-table/index.vue' import ReadWrite from './read-write/index.vue' import TagEllipsis from './tag-ellipsis/index.vue' import CommonList from './common-list/index.vue' -import MarkdownRenderer from './markdown-renderer/index.vue' import dynamicsForm from './dynamics-form' import CardCheckbox from './card-checkbox/index.vue' import AiChat from './ai-chat/index.vue' import InfiniteScroll from './infinite-scroll/index.vue' import AutoTooltip from './auto-tooltip/index.vue' - +import MdEditor from './markdown/MdEditor.vue' +import MdPreview from './markdown/MdPreview.vue' +import MdEditorMagnify from './markdown/MdEditorMagnify.vue' +import LogoFull from './logo/LogoFull.vue' +import LogoIcon from './logo/LogoIcon.vue' +import SendIcon from './logo/SendIcon.vue' +import CodemirrorEditor from './codemirror-editor/index.vue' +import ModelSelect from './model-select/index.vue' export default { install(app: App) { app.component(AppIcon.name, AppIcon) @@ -35,10 +41,17 @@ export default { app.component(TagEllipsis.name, TagEllipsis) app.component(CommonList.name, CommonList) app.use(dynamicsForm) - app.component(MarkdownRenderer.name, MarkdownRenderer) app.component(CardCheckbox.name, CardCheckbox) app.component(AiChat.name, AiChat) app.component(InfiniteScroll.name, InfiniteScroll) app.component(AutoTooltip.name, AutoTooltip) + app.component(MdPreview.name, MdPreview) + app.component(MdEditor.name, MdEditor) + app.component(LogoFull.name, LogoFull) + app.component(LogoIcon.name, LogoIcon) + app.component(SendIcon.name, SendIcon) + app.component(CodemirrorEditor.name, CodemirrorEditor) + app.component(MdEditorMagnify.name, MdEditorMagnify) + app.component(ModelSelect.name, ModelSelect) } } diff --git a/ui/src/components/infinite-scroll/index.vue b/ui/src/components/infinite-scroll/index.vue index a562c5c2802..b8c8ec0958e 100644 --- a/ui/src/components/infinite-scroll/index.vue +++ b/ui/src/components/infinite-scroll/index.vue @@ -4,15 +4,15 @@
- 加载中... + {{ $t('components.loading') }}... - 到底啦! + {{ $t('components.noMore') }}
- diff --git a/ui/src/components/loading/DownloadLoading.vue b/ui/src/components/loading/DownloadLoading.vue new file mode 100644 index 00000000000..83332c8c518 --- /dev/null +++ b/ui/src/components/loading/DownloadLoading.vue @@ -0,0 +1,93 @@ + + + diff --git a/ui/src/components/login-container/index.vue b/ui/src/components/login-container/index.vue index fd9bc46392a..5675a63c6b3 100644 --- a/ui/src/components/login-container/index.vue +++ b/ui/src/components/login-container/index.vue @@ -1,9 +1,8 @@ - diff --git a/ui/src/components/logo/LogoFull.vue b/ui/src/components/logo/LogoFull.vue new file mode 100644 index 00000000000..ba793399988 --- /dev/null +++ b/ui/src/components/logo/LogoFull.vue @@ -0,0 +1,95 @@ + + + diff --git a/ui/src/components/logo/LogoIcon.vue b/ui/src/components/logo/LogoIcon.vue new file mode 100644 index 00000000000..51d47db826b --- /dev/null +++ b/ui/src/components/logo/LogoIcon.vue @@ -0,0 +1,59 @@ + + + diff --git a/ui/src/components/logo/SendIcon.vue b/ui/src/components/logo/SendIcon.vue new file mode 100644 index 00000000000..933976c0bc2 --- /dev/null +++ b/ui/src/components/logo/SendIcon.vue @@ -0,0 +1,44 @@ + + + diff --git a/ui/src/components/markdown-editor/index.vue b/ui/src/components/markdown-editor/index.vue deleted file mode 100644 index c5efbc2c7c6..00000000000 --- a/ui/src/components/markdown-editor/index.vue +++ /dev/null @@ -1,52 +0,0 @@ - - - diff --git a/ui/src/components/markdown-renderer/MdRenderer.vue b/ui/src/components/markdown-renderer/MdRenderer.vue deleted file mode 100644 index 416cf7f2bbb..00000000000 --- a/ui/src/components/markdown-renderer/MdRenderer.vue +++ /dev/null @@ -1,47 +0,0 @@ - - - diff --git a/ui/src/components/markdown-renderer/index.vue b/ui/src/components/markdown-renderer/index.vue deleted file mode 100644 index 586722d14e3..00000000000 --- a/ui/src/components/markdown-renderer/index.vue +++ /dev/null @@ -1,66 +0,0 @@ - - - - diff --git a/ui/src/components/markdown/EchartsRander.vue b/ui/src/components/markdown/EchartsRander.vue new file mode 100644 index 00000000000..f008cdaa00a --- /dev/null +++ b/ui/src/components/markdown/EchartsRander.vue @@ -0,0 +1,119 @@ + + + diff --git a/ui/src/components/markdown/FormRander.vue b/ui/src/components/markdown/FormRander.vue new file mode 100644 index 00000000000..96c74380907 --- /dev/null +++ b/ui/src/components/markdown/FormRander.vue @@ -0,0 +1,92 @@ + + + diff --git a/ui/src/components/markdown/HtmlRander.vue b/ui/src/components/markdown/HtmlRander.vue new file mode 100644 index 00000000000..a8be059593a --- /dev/null +++ b/ui/src/components/markdown/HtmlRander.vue @@ -0,0 +1,36 @@ + + + diff --git a/ui/src/components/markdown/MdEditor.vue b/ui/src/components/markdown/MdEditor.vue new file mode 100644 index 00000000000..6a621a04ed6 --- /dev/null +++ b/ui/src/components/markdown/MdEditor.vue @@ -0,0 +1,26 @@ + + + diff --git a/ui/src/components/markdown/MdEditorMagnify.vue b/ui/src/components/markdown/MdEditorMagnify.vue new file mode 100644 index 00000000000..e8555abe387 --- /dev/null +++ b/ui/src/components/markdown/MdEditorMagnify.vue @@ -0,0 +1,68 @@ + + + + + diff --git a/ui/src/components/markdown/MdPreview.vue b/ui/src/components/markdown/MdPreview.vue new file mode 100644 index 00000000000..0d8c3ab15fc --- /dev/null +++ b/ui/src/components/markdown/MdPreview.vue @@ -0,0 +1,23 @@ + + + diff --git a/ui/src/components/markdown/MdRenderer.vue b/ui/src/components/markdown/MdRenderer.vue new file mode 100644 index 00000000000..e2fc4ee903e --- /dev/null +++ b/ui/src/components/markdown/MdRenderer.vue @@ -0,0 +1,256 @@ + + + diff --git a/ui/src/components/markdown/ReasoningRander.vue b/ui/src/components/markdown/ReasoningRander.vue new file mode 100644 index 00000000000..b42ae7fed33 --- /dev/null +++ b/ui/src/components/markdown/ReasoningRander.vue @@ -0,0 +1,35 @@ + + + diff --git a/ui/src/components/markdown-editor/assets/font_prouiefeic.js b/ui/src/components/markdown/assets/markdown-iconfont.js similarity index 100% rename from ui/src/components/markdown-editor/assets/font_prouiefeic.js rename to ui/src/components/markdown/assets/markdown-iconfont.js diff --git a/ui/src/components/model-select/index.vue b/ui/src/components/model-select/index.vue new file mode 100644 index 00000000000..116824e3c63 --- /dev/null +++ b/ui/src/components/model-select/index.vue @@ -0,0 +1,161 @@ + + + diff --git a/ui/src/components/read-write/index.vue b/ui/src/components/read-write/index.vue index 31ec539cb99..f01cbb9b74d 100644 --- a/ui/src/components/read-write/index.vue +++ b/ui/src/components/read-write/index.vue @@ -1,12 +1,17 @@ -
- - - diff --git a/ui/src/layout/components/app-header/index.vue b/ui/src/layout/components/app-header/index.vue new file mode 100644 index 00000000000..47b97cf0a50 --- /dev/null +++ b/ui/src/layout/components/app-header/index.vue @@ -0,0 +1,34 @@ + + + + + diff --git a/ui/src/layout/components/app-main/index.vue b/ui/src/layout/components/app-main/index.vue index d9a3fabe5aa..1598772f241 100644 --- a/ui/src/layout/components/app-main/index.vue +++ b/ui/src/layout/components/app-main/index.vue @@ -2,7 +2,7 @@ - + @@ -17,8 +17,7 @@ const route = useRoute() const cachedViews: any = ref([]) onBeforeUpdate(() => { const { name, meta } = route - let isCached = meta?.cache - if (isCached && name && !cachedViews.value.includes(name)) { + if (name && !cachedViews.value.includes(name)) { cachedViews.value.push(name) } }) diff --git a/ui/src/layout/components/breadcrumb/index.vue b/ui/src/layout/components/breadcrumb/index.vue index 6656c63f963..ae5011318d6 100644 --- a/ui/src/layout/components/breadcrumb/index.vue +++ b/ui/src/layout/components/breadcrumb/index.vue @@ -36,10 +36,19 @@ > - + + + + -
{{ current?.name }}
+
{{ current?.name }}
@@ -63,6 +72,7 @@ >
+ - + + + + - {{ item?.name }} + {{ item?.name }} - + + + + diff --git a/ui/src/layout/components/top-bar/avatar/AboutDialog.vue b/ui/src/layout/components/top-bar/avatar/AboutDialog.vue index aa8361c33b7..59ecfc07607 100644 --- a/ui/src/layout/components/top-bar/avatar/AboutDialog.vue +++ b/ui/src/layout/components/top-bar/avatar/AboutDialog.vue @@ -1,56 +1,102 @@ diff --git a/ui/src/layout/components/top-bar/avatar/index.vue b/ui/src/layout/components/top-bar/avatar/index.vue index 8042bd2a7bc..ac1fcbb9ee8 100644 --- a/ui/src/layout/components/top-bar/avatar/index.vue +++ b/ui/src/layout/components/top-bar/avatar/index.vue @@ -21,15 +21,57 @@

- {{ $t("layout.topbar.avatar.resetPassword") }} + {{ $t('views.login.resetPassword') }} + +
+ + {{ $t('layout.apiKey') }} + +
+ + +
+ {{ $t('layout.language') }} + +
+ + +
+
+ + {{ $t('layout.about.title') }} + + + + {{ $t('layout.logout') }} - {{ $t("layout.topbar.avatar.about") }} - {{ $t("layout.topbar.avatar.logout") }} + + + + diff --git a/ui/src/layout/layout-template/DetailLayout.vue b/ui/src/layout/layout-template/DetailLayout.vue new file mode 100644 index 00000000000..9e1a6c20150 --- /dev/null +++ b/ui/src/layout/layout-template/DetailLayout.vue @@ -0,0 +1,18 @@ + + + + diff --git a/ui/src/layout/layout-template/SystemLayout.vue b/ui/src/layout/layout-template/SystemLayout.vue new file mode 100644 index 00000000000..46221f3adfc --- /dev/null +++ b/ui/src/layout/layout-template/SystemLayout.vue @@ -0,0 +1,24 @@ + + + + diff --git a/ui/src/layout/layout-template/index.scss b/ui/src/layout/layout-template/index.scss new file mode 100644 index 00000000000..8dcba638050 --- /dev/null +++ b/ui/src/layout/layout-template/index.scss @@ -0,0 +1,26 @@ +.app-layout { + background-color: var(--app-layout-bg-color); + height: 100%; +} + +.app-main { + position: relative; + height: 100%; + padding: var(--app-header-height) 0 0 !important; + box-sizing: border-box; + overflow: auto; + &.isExpire { + padding-top: calc(var(--app-header-height) + 40px) !important; + } +} + +.sidebar-container { + box-sizing: border-box; + transition: width 0.28s; + width: var(--sidebar-width); + min-width: var(--sidebar-width); + background-color: var(--sidebar-bg-color); +} +.view-container { + width: calc(100% - var(--sidebar-width)); +} diff --git a/ui/src/layout/main-layout/index.vue b/ui/src/layout/main-layout/index.vue deleted file mode 100644 index dbb9438b29d..00000000000 --- a/ui/src/layout/main-layout/index.vue +++ /dev/null @@ -1,26 +0,0 @@ - - - - diff --git a/ui/src/locales/index.ts b/ui/src/locales/index.ts index 51f94c11c00..db1b2de9db3 100644 --- a/ui/src/locales/index.ts +++ b/ui/src/locales/index.ts @@ -1,66 +1,82 @@ -import { useLocalStorage, usePreferredLanguages } from '@vueuse/core'; -import { computed } from 'vue'; -import { createI18n } from 'vue-i18n'; +import { useLocalStorage, usePreferredLanguages } from '@vueuse/core' +import { computed } from 'vue' +import { createI18n } from 'vue-i18n' // 导入语言文件 -const langModules = import.meta.glob('./lang/*/index.ts', { eager: true }) as Record Promise<{ default: Object }>>; +const langModules = import.meta.glob('./lang/*/index.ts', { eager: true }) as Record< + string, + () => Promise<{ default: Object }> +> -const langModuleMap = new Map(); +const langModuleMap = new Map() -export const langCode: Array = []; +export const langCode: Array = [] -export const localeConfigKey = 'MaxKB-locale'; +export const localeConfigKey = 'MaxKB-locale' // 获取浏览器默认语言环境 -const languages = usePreferredLanguages(); +const languages = usePreferredLanguages() + +export function getBrowserLang() { + const browserLang = navigator.language ? navigator.language : languages.value[0] + let defaultBrowserLang = '' + if (browserLang === 'zh-HK' || browserLang === 'zh-TW') { + defaultBrowserLang = 'zh-Hant' + } else if (browserLang === 'zh-CN') { + defaultBrowserLang = 'zh-CN' + } else { + defaultBrowserLang = 'en-US' + } + return defaultBrowserLang +} // 生成语言模块列表 const generateLangModuleMap = () => { - const fullPaths = Object.keys(langModules); - fullPaths.forEach((fullPath) => { - const k = fullPath.replace('./lang', ''); - const startIndex = 1; - const lastIndex = k.lastIndexOf('/'); - const code = k.substring(startIndex, lastIndex); - langCode.push(code); - langModuleMap.set(code, langModules[fullPath]); - }); -}; + const fullPaths = Object.keys(langModules) + fullPaths.forEach((fullPath) => { + const k = fullPath.replace('./lang', '') + const startIndex = 1 + const lastIndex = k.lastIndexOf('/') + const code = k.substring(startIndex, lastIndex) + langCode.push(code) + langModuleMap.set(code, langModules[fullPath]) + }) +} // 导出 Message const importMessages = computed(() => { - generateLangModuleMap(); + generateLangModuleMap() - const message: Recordable = {}; - langModuleMap.forEach((value: any, key) => { - message[key] = value.default; - }); - return message; -}); + const message: Recordable = {} + langModuleMap.forEach((value: any, key) => { + message[key] = value.default + }) + return message +}) export const i18n = createI18n({ - legacy: false, - locale: useLocalStorage(localeConfigKey, 'zh_CN').value || languages.value[0] || 'zh_CN', - fallbackLocale: 'zh_CN', - messages: importMessages.value, - globalInjection: true, -}); + legacy: false, + locale: useLocalStorage(localeConfigKey, getBrowserLang()).value || getBrowserLang(), + fallbackLocale: getBrowserLang(), + messages: importMessages.value, + globalInjection: true +}) export const langList = computed(() => { - if (langModuleMap.size === 0) generateLangModuleMap(); + if (langModuleMap.size === 0) generateLangModuleMap() - const list:any=[] - langModuleMap.forEach((value: any, key) => { - list.push({ - label: value.default.lang, - value: key, - }); - }); + const list: any = [] + langModuleMap.forEach((value: any, key) => { + list.push({ + label: value.default.lang, + value: key + }) + }) - return list; -}); + return list +}) // @ts-ignore -export const { t } = i18n.global; +export const { t } = i18n.global -export default i18n; +export default i18n diff --git a/ui/src/locales/lang/en-US/ai-chat.ts b/ui/src/locales/lang/en-US/ai-chat.ts new file mode 100644 index 00000000000..3a52270977c --- /dev/null +++ b/ui/src/locales/lang/en-US/ai-chat.ts @@ -0,0 +1,99 @@ +export default { + noHistory: 'No Chat History', + createChat: 'New Chat', + history: 'Chat History', + only20history: 'Showing only the last 20 chats', + question_count: 'Questions', + exportRecords: 'Export Chat History', + chatId: 'Chat ID', + userInput: 'User Input', + quote: 'Quote', + download: 'Click to Download', + transcribing: 'Transcribing', + passwordValidator: { + title: 'Enter Password to Access', + errorMessage1: 'Password cannot be empty', + errorMessage2: 'Incorrect password' + }, + operation: { + play: 'Play', + pause: 'Pause', + regeneration: 'Regenerate Response', + like: 'Like', + cancelLike: 'Unlike', + oppose: 'Dislike', + cancelOppose: 'Undo Dislike', + continue: 'Continue', + stopChat: 'Stop Response', + startChat: 'Start Chat' + }, + tip: { + error500Message: 'Sorry, the service is currently under maintenance. Please try again later!', + errorIdentifyMessage: 'Unable to verify user identity', + errorLimitMessage: + 'Sorry, you have reached the maximum number of questions. Please try again tomorrow!', + answerMessage: + 'Sorry, no relevant content found. Please rephrase your question or provide more details.', + stopAnswer: 'Response Stopped', + answerLoading: 'Generating Response...', + recorderTip: `

This feature requires microphone access. Browsers block recording on insecure pages. Solutions:
+1. Enable HTTPS;
+2. If HTTPS is not available, adjust browser security settings. For Chrome:
+(1) Enter chrome://flags/#unsafely-treat-insecure-origin-as-secure in the address bar;
+(2) Add your HTTP site, e.g., http://127.0.0.1:8080.

`, + recorderError: 'Recording Failed', + confirm: 'Got it', + requiredMessage: 'Please fill in all required fields', + inputParamMessage1: 'Please specify a parameter in the URL', + inputParamMessage2: 'value', + prologueMessage: 'Sorry, the service is currently under maintenance. Please try again later!' + }, + inputPlaceholder: { + speaking: 'Speaking...', + recorderLoading: 'Transcribing...', + default: 'Type your question' + }, + uploadFile: { + label: 'Upload File', + most: 'Maximum', + limit: 'files allowed, each up to', + fileType: 'File Type', + tipMessage: 'Please select allowed file types in the upload settings', + limitMessage1: 'You can upload up to', + limitMessage2: 'files', + sizeLimit: 'Each file must not exceed', + imageMessage: 'Please process the image content', + errorMessage: 'Upload Failed' + }, + executionDetails: { + title: 'Execution Details', + paramOutputTooltip: 'Each document supports previewing up to 500 characters', + audioFile: 'Audio File', + searchContent: 'Search Query', + searchResult: 'Search Results', + conditionResult: 'Condition Evaluation', + currentChat: 'Current Chat', + answer: 'AI Response', + replyContent: 'Reply Content', + textContent: 'Text Content', + input: 'Input', + output: 'Output', + rerankerContent: 'Re-ranked Content', + rerankerResult: 'Re-ranking Results', + paragraph: 'Segment', + noSubmit: 'No submission from user', + errMessage: 'Error Log' + }, + KnowledgeSource: { + title: 'Knowledge Source', + referenceParagraph: 'Cited Segment', + consume: 'Tokens', + consumeTime: 'Runtime' + }, + paragraphSource: { + title: 'Knowledge Quote', + question: 'User Question', + optimizationQuestion: 'Optimized Question' + }, + editTitle: 'Edit Title' +} diff --git a/ui/src/locales/lang/en-US/common.ts b/ui/src/locales/lang/en-US/common.ts new file mode 100644 index 00000000000..2fd0a30b32d --- /dev/null +++ b/ui/src/locales/lang/en-US/common.ts @@ -0,0 +1,68 @@ +export default { + create: 'Create', + createSuccess: 'Successful', + copy: 'Copy', + copySuccess: 'Successful', + copyError: 'Copy Failed', + save: 'Save', + saveSuccess: 'Successful', + delete: 'Delete', + deleteSuccess: 'Successful', + setting: 'Settings', + settingSuccess: 'Successful', + submit: 'Submit', + submitSuccess: 'Successful', + edit: 'Edit', + editSuccess: 'Successful', + modify: 'Modify', + modifySuccess: 'Successful', + add: 'Add', + addSuccess: 'Successful', + cancel: 'Cancel', + confirm: 'OK', + tip: 'Tips', + refresh: 'Refresh', + search: 'Search', + clear: 'Clear', + professional: 'Purchase the Professional Edition', + createDate: 'Create Date', + createTime: 'Create Time', + operation: 'Action', + character: 'characters', + export: 'Export', + exportSuccess: 'Successful', + unavailable: '(Unavailable)', + public: 'Public', + private: 'Private', + paramSetting: 'Parameter Settings', + creator: 'Creator', + author: 'Author', + debug: 'Debug', + required: 'Required', + noData: 'No data', + result: 'Result', + fileUpload: { + document: 'Documents', + image: 'Image', + audio: 'Audio', + video: 'Video', + other: 'Other', + addExtensions: 'Add suffix', + existingExtensionsTip: 'File suffix already exists', + }, + status: { + label: 'Status', + enableSuccess: 'Successful', + disableSuccess: 'Successful' + }, + param: { + outputParam: 'Output Parameters', + inputParam: 'Input Parameters', + initParam: 'Startup Parameters' + }, + + inputPlaceholder: 'Please input', + title: 'Title', + content: 'Content', + rename: 'Rename' +} diff --git a/ui/src/locales/lang/en-US/components.ts b/ui/src/locales/lang/en-US/components.ts new file mode 100644 index 00000000000..7f794e1a15f --- /dev/null +++ b/ui/src/locales/lang/en-US/components.ts @@ -0,0 +1,12 @@ +export default { + quickCreatePlaceholder: 'Quickly create blank document', + quickCreateName: 'document name', + noData: 'No Data', + loading: 'Loading', + noMore: 'No more! ', + selectParagraph: { + title: 'Select Segments', + error: 'Process only the failed segments', + all: 'All Segments' + } +} diff --git a/ui/src/locales/lang/en-US/dynamics-form.ts b/ui/src/locales/lang/en-US/dynamics-form.ts new file mode 100644 index 00000000000..2cfcd80580c --- /dev/null +++ b/ui/src/locales/lang/en-US/dynamics-form.ts @@ -0,0 +1,102 @@ +export default { + input_type_list: { + TextInput: 'Input', + PasswordInput: 'Password', + Slider: 'Slider', + SwitchInput: 'Switch', + SingleSelect: 'Single Select', + MultiSelect: 'Multi Select', + DatePicker: 'Date Picker', + JsonInput: 'JSON', + RadioCard: 'Radio Card', + RadioRow: 'Radio Row' + }, + default: { + label: 'Default', + placeholder: 'Please enter a default', + requiredMessage: ' is a required property', + show: 'Show Default' + }, + tip: { + requiredMessage: 'cannot be empty', + jsonMessage: 'Incorrect JSON format' + }, + searchBar: { + placeholder: 'Please enter keywords to search' + }, + paramForm: { + field: { + label: 'Parameter', + placeholder: 'Please enter a parameter', + requiredMessage: 'Parameter is a required property', + requiredMessage2: 'Only letters, numbers, and underscores are allowed' + }, + name: { + label: 'Name', + placeholder: 'Please enter a name', + requiredMessage: 'Name is a required property' + }, + tooltip: { + label: 'Tooltip', + placeholder: 'Please enter a tooltip' + }, + required: { + label: 'Required', + requiredMessage: 'Required is a required property' + }, + input_type: { + label: 'Type', + placeholder: 'Please select a type', + requiredMessage: 'Type is a required property' + } + }, + DatePicker: { + placeholder: 'Select Date', + year: 'Year', + month: 'Month', + date: 'Date', + datetime: 'Date Time', + dataType: { + label: 'Date Type', + placeholder: 'Please select a date type' + }, + format: { + label: 'Format', + placeholder: 'Please select a format' + } + }, + Select: { + label: 'Option Value', + placeholder: 'Please enter an option value' + }, + tag: { + label: 'Tag', + placeholder: 'Please enter an option label' + }, + Slider: { + showInput: { + label: 'Show Input Box' + }, + valueRange: { + label: 'Value Range', + minRequired: 'Minimum value is required', + maxRequired: 'Maximum value is required' + }, + step: { + label: 'Step Value', + requiredMessage1: 'Step value is required', + requiredMessage2: 'Step value cannot be 0' + } + }, + TextInput: { + length: { + label: 'Text Length', + minRequired: 'Minimum length is required', + maxRequired: 'Maximum length is required', + requiredMessage1: 'Length must be between', + requiredMessage2: 'and', + requiredMessage3: 'characters', + requiredMessage4: 'Text length is a required parameter' + } + } +} diff --git a/ui/src/locales/lang/en-US/index.ts b/ui/src/locales/lang/en-US/index.ts new file mode 100644 index 00000000000..bf56593e2be --- /dev/null +++ b/ui/src/locales/lang/en-US/index.ts @@ -0,0 +1,17 @@ +import en from 'element-plus/es/locale/lang/en' +import components from './components' +import layout from './layout' +import views from './views' +import common from './common' +import dynamicsForm from './dynamics-form' +import chat from './ai-chat' +export default { + lang: 'English', + layout, + views, + components, + en, + common, + dynamicsForm, + chat +} diff --git a/ui/src/locales/lang/en-US/layout.ts b/ui/src/locales/lang/en-US/layout.ts new file mode 100644 index 00000000000..8ceecd75327 --- /dev/null +++ b/ui/src/locales/lang/en-US/layout.ts @@ -0,0 +1,34 @@ +export default { + github: 'Project Address', + wiki: 'User Manual', + forum: 'Forum For Help', + logout: 'Log Out', + apiKey: 'API Key', + apiServiceAddress: 'API Service Address', + language: 'Language', + isExpire: 'License not uploaded or expired', + about: { + title: 'About', + expiredTime: 'Expiration Date', + edition: { + label: 'Edition', + community: 'Community Edition', + professional: 'Professional Edition' + }, + version: 'Version', + serialNo: 'Serial No.', + remark: 'Remarks', + update: 'Update', + authorize: 'Authorized', + + }, + time: { + daysLater: 'days later', + hoursLater: 'hours later', + expired: 'expired', + expiringSoon: 'expiring soon' + }, + copyright: 'Copyright © 2014-2025 FIT2CLOUD, All rights reserved.', + userManualUrl: 'http://docs.maxkb.hk/', + forumUrl: 'https://github.com/1Panel-dev/MaxKB/discussions' +} diff --git a/ui/src/locales/lang/en-US/views/404.ts b/ui/src/locales/lang/en-US/views/404.ts new file mode 100644 index 00000000000..0d4861a4a6f --- /dev/null +++ b/ui/src/locales/lang/en-US/views/404.ts @@ -0,0 +1,5 @@ +export default { + title: "404", + message: "Unable to Access APP", + operate: "Back to Home", +}; diff --git a/ui/src/locales/lang/en-US/views/application-overview.ts b/ui/src/locales/lang/en-US/views/application-overview.ts new file mode 100644 index 00000000000..9fe29e0eeff --- /dev/null +++ b/ui/src/locales/lang/en-US/views/application-overview.ts @@ -0,0 +1,115 @@ +export default { + title: 'Overview', + appInfo: { + header: 'App Information', + publicAccessLink: 'Public URL', + openText: 'On', + closeText: 'Off', + copyLinkText: 'Copy Link', + refreshLinkText: 'Refresh Link', + demo: 'Preview', + embedInWebsite: 'Get Embed Code', + accessControl: 'Access Control', + displaySetting: 'Display Settings', + apiAccessCredentials: 'API Access Credentials', + apiKey: 'API Key', + refreshToken: { + msgConfirm1: 'Are you sure you want to regenerate the public URL?', + msgConfirm2: + 'Regenerating the Public URL will affect any existing embedded codes on third-party sites. You will need to update the embed code and re-integrate it into those sites. Proceed with caution!', + refreshSuccess: 'Successfully Refreshed' + }, + APIKeyDialog: { + saveSettings: 'Save Settings', + msgConfirm1: 'Are you sure you want to delete the API Key', + msgConfirm2: + 'This action is irreversible. Once deleted, the API Key cannot be recovered. Do you still want to proceed?', + enabledSuccess: 'Enabled', + disabledSuccess: 'Disabled' + }, + EditAvatarDialog: { + title: 'App Logo', + customizeUpload: 'Custom Upload', + upload: 'Upload', + default: 'Default Logo', + custom: 'Custom', + sizeTip: + 'Recommended size: 32×32 pixels. Supports JPG, PNG, and GIF formats. Max size: 10 MB', + fileSizeExceeded: 'File size exceeds 10 MB', + uploadImagePrompt: 'Please upload an image' + }, + EmbedDialog: { + fullscreenModeTitle: 'Fullscreen Mode', + copyInstructions: 'Copy the code below to embed', + floatingModeTitle: 'Floating Mode', + mobileModeTitle: 'Mobile Mode' + }, + LimitDialog: { + dialogTitle: 'Access Restrictions', + showSourceLabel: 'Show Knowledge Source', + clientQueryLimitLabel: 'Query Limit per Client', + authentication: 'Authentication', + authenticationValue: 'Access Password', + timesDays: 'queries per day', + whitelistLabel: 'Allowed Domains', + whitelistPlaceholder: + 'Enter allowed third-party domains, one per line. For example:\nhttp://127.0.0.1:5678\nhttps://dataease.io' + }, + SettingAPIKeyDialog: { + allowCrossDomainLabel: 'Allow Cross-Domain Access', + crossDomainPlaceholder: + 'Enter allowed cross-domain addresses. If enabled but left blank, no restrictions will apply.\nEnter one per line, e.g.:\nhttp://127.0.0.1:5678\nhttps://dataease.io' + }, + SettingDisplayDialog: { + dialogTitle: 'Display Settings', + languageLabel: 'Language', + showSourceLabel: 'Show Knowledge Source', + showExecutionDetail: 'Show Execution Details', + restoreDefault: 'Restore Default', + customThemeColor: 'Custom Theme Color', + headerTitleFontColor: 'Header Title Font Color', + default: 'Default', + askUserAvatar: 'User Avatar (Asking)', + replace: 'Replace', + display: 'Display', + imageMessage: + 'Recommended size: 32×32 pixels. Supports JPG, PNG, and GIF formats. Max size: 10 MB', + AIAvatar: 'AI Avatar', + floatIcon: 'Floating Icon', + iconDefaultPosition: 'Default Icon Position', + iconPosition: { + left: 'Left', + right: 'Right', + bottom: 'Bottom', + top: 'Top' + }, + draggablePosition: 'Draggable Position', + showHistory: 'Show Chat History', + displayGuide: 'Show Guide Image (Floating Mode)', + disclaimer: 'Disclaimer', + disclaimerValue: 'This content is AI-generated and for reference only.' + } + }, + monitor: { + monitoringStatistics: 'Monitoring Statistics', + customRange: 'Custom Range', + startDatePlaceholder: 'Start Date', + endDatePlaceholder: 'End Date', + pastDayOptions: { + past7Days: 'Last 7 Days', + past30Days: 'Last 30 Days', + past90Days: 'Last 90 Days', + past183Days: 'Last 6 Months', + other: 'Custom' + }, + charts: { + customerTotal: 'Total Users', + customerNew: 'New Users', + queryCount: 'Total Queries', + tokensTotal: 'Total Tokens Used', + userSatisfaction: 'User Feedback Metrics', + approval: 'Like', + disapproval: 'Dislike' + } + } +} diff --git a/ui/src/locales/lang/en-US/views/application-workflow.ts b/ui/src/locales/lang/en-US/views/application-workflow.ts new file mode 100644 index 00000000000..e1fd8009e68 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/application-workflow.ts @@ -0,0 +1,302 @@ +export default { + node: 'Node', + nodeName: 'Node Name', + baseComponent: 'Basic', + nodeSetting: 'Node Settings', + workflow: 'Workflow', + searchBar: { + placeholder: 'Search by name' + }, + info: { + previewVersion: 'Preview Version:', + saveTime: 'Last Saved:' + }, + setting: { + restoreVersion: 'Restore Previous Version"', + restoreCurrentVersion: 'Restore to This Version', + addComponent: 'Add', + public: 'Publish', + releaseHistory: 'Release History', + autoSave: 'Auto Save', + latestRelease: 'Latest Release', + copyParam: 'Copy Parameters', + debug: 'Run', + exit: 'Exit', + exitSave: 'Save & Exit' + }, + tip: { + publicSuccess: 'Published successfully', + noData: 'No related results found', + nameMessage: 'Name cannot be empty!', + onlyRight: 'Connections can only be made from the right anchor', + notRecyclable: 'Loop connections are not allowed', + onlyLeft: 'Connections can only be made to the left anchor', + applicationNodeError: 'This application is unavailable', + functionNodeError: 'This function node is unavailable', + repeatedNodeError: 'A node with this name already exists', + cannotCopy: 'Cannot be copied', + copyError: 'Node already copied', + paramErrorMessage: 'Parameter already exists: ', + saveMessage: 'Current changes have not been saved. Save before exiting?' + }, + delete: { + confirmTitle: 'Confirm to delete this node?', + deleteMessage: 'This node cannot be deleted' + }, + control: { + zoomOut: 'Zoom Out', + zoomIn: 'Zoom In', + fitView: 'Fit to Screen', + retract: 'Collapse All', + extend: 'Expand All', + beautify: 'Auto-Arrange' + }, + variable: { + label: 'Variable', + global: 'Global Variable', + Referencing: 'Referenced Variable', + ReferencingRequired: 'Referenced variable is required', + ReferencingError: 'Invalid referenced variable', + NoReferencing: 'Referenced variable does not exist', + placeholder: 'Please select a variable' + }, + condition: { + title: 'Execution Condition', + front: 'Precondition', + AND: 'All', + OR: 'Any', + text: 'After the connected node is executed, execute the current node' + }, + validate: { + startNodeRequired: 'Start node is required', + startNodeOnly: 'Only one start node is allowed', + baseNodeRequired: 'Base information node is required', + baseNodeOnly: 'Only one base information node is allowed', + notInWorkFlowNode: 'Node not in workflow', + noNextNode: 'Next node does not exist', + nodeUnavailable: 'Node unavailable', + needConnect1: 'The branch of the node needs to be connected', + cannotEndNode: 'This node cannot be used as an end node' + }, + nodes: { + startNode: { + label: 'Start', + question: 'User Question', + currentTime: 'Current Time' + }, + baseNode: { + label: 'Base Information', + appName: { + label: 'App Name' + }, + appDescription: { + label: 'App Description' + }, + fileUpload: { + label: 'File Upload', + tooltip: 'When enabled, the Q&A page will display a file upload button.' + }, + FileUploadSetting: { + title: 'File Upload Settings', + maxFiles: 'Maximum number of files per upload', + fileLimit: 'Maximum size per file (MB)', + fileUploadType: { + label: 'File types allowed for upload', + documentText: 'Requires "Document Content Extraction" node to parse document content', + imageText: 'Requires "Image Understanding" node to parse image content', + audioText: 'Requires "Speech-to-Text" node to parse audio content', + otherText: 'Need to parse this type of file by yourself' + } + } + }, + aiChatNode: { + label: 'AI Chat', + text: 'Chat with an AI model', + answer: 'AI Content', + returnContent: { + label: 'Return Content', + tooltip: `If turned off, the content of this node will not be output to the user. + If you want the user to see the output of this node, please turn on the switch.` + }, + defaultPrompt: 'Known Information', + think: 'Thinking Process' + }, + searchDatasetNode: { + label: 'Knowledge Retrieval', + text: 'Allows you to query text content related to user questions from the Knowledge', + paragraph_list: 'List of retrieved segments', + is_hit_handling_method_list: 'List of segments that meet direct response criteria', + result: 'Search Result', + directly_return: 'Content of segments that meet direct response criteria', + searchParam: 'Retrieval Parameters', + searchQuestion: { + label: 'Question', + placeholder: 'Please select a search question', + requiredMessage: 'Please select a search question' + } + }, + questionNode: { + label: 'Question Optimization', + text: 'Optimize and improve the current question based on historical chat records to better match knowledge segments', + result: 'Optimized Question Result', + defaultPrompt1: `Optimize and improve the user's question based on context:`, + defaultPrompt2: `Please output an optimized question.`, + systemDefault: 'You are a question optimization expert' + }, + conditionNode: { + label: 'Conditional Branch', + text: 'Trigger different nodes based on conditions', + branch_name: 'Branch Name', + conditions: { + label: 'Conditions', + info: 'Meets the following', + requiredMessage: 'Please select conditions' + }, + valueMessage: 'Please enter a value', + addCondition: 'Add Condition', + addBranch: 'Add Branch' + }, + replyNode: { + label: 'Specified Reply', + text: 'Specify reply content, referenced variables will be converted to strings for output', + content: 'Content', + replyContent: { + label: 'Reply Content', + custom: 'Custom', + reference: 'Reference Variable' + } + }, + rerankerNode: { + label: 'Multi-path Recall', + text: 'Use a re-ranking model to refine retrieval results from multiple knowledge sources', + result_list: 'Re-ranked Results List', + result: 'Re-ranking Result', + rerankerContent: { + label: 'Re-ranking Content', + requiredMessage: 'Please select re-ranking content' + }, + higher: 'Higher', + ScoreTooltip: 'The higher the Score, the stronger the relevance.', + max_paragraph_char_number: 'Maximum Character', + reranker_model: { + label: 'Rerank', + placeholder: 'Please select a rerank' + } + }, + formNode: { + label: 'Form Input', + text: 'Collect user input during Q&A and use it in subsequent processes', + form_content_format1: 'Hello, please fill out the form below:', + form_content_format2: 'Click the [Submit] button after filling it out.', + form_data: 'All Form Content', + formContent: { + label: 'Form Output Content', + requiredMessage: + 'Please set the output content of this node, { form } is a placeholder for the form.', + tooltip: 'Define the output content of this node. { form } is a placeholder for the form' + }, + formAllContent: 'All Form Content', + formSetting: 'Form Configuration' + }, + documentExtractNode: { + label: 'Document Content Extraction', + text: 'Extract content from documents', + content: 'Document Content' + }, + imageUnderstandNode: { + label: 'Image Understanding', + text: 'Analyze images to identify objects, scenes, and provide answers', + answer: 'AI Content', + model: { + label: 'Vision Model', + requiredMessage: 'Please select a vision model' + }, + image: { + label: 'Select Image', + requiredMessage: 'Please select an image' + } + }, + variableAssignNode: { + label: 'Variable Assign', + text: 'Update the value of the global variable', + assign: 'Set Value' + }, + mcpNode: { + label: 'MCP Server', + text: 'Call MCP Tools through SSE/Streamable HTTP', + getToolsSuccess: 'Get Tools Successfully', + getTool: 'Get Tools', + tool: 'Tool', + toolParam: 'Tool Params', + mcpServerTip: 'Please enter the JSON format of the MCP server config', + mcpToolTip: 'Please select a tool', + configLabel: 'MCP Server Config (Only supports SSE/Streamable HTTP call method)' + }, + imageGenerateNode: { + label: 'Image Generation', + text: 'Generate images based on provided text content', + answer: 'AI Content', + model: { + label: 'Image Generation Model', + requiredMessage: 'Please select an image generation model' + }, + prompt: { + label: 'Positive Prompt', + tooltip: 'Describe elements and visual features you want in the generated image' + }, + negative_prompt: { + label: 'Negative Prompt', + tooltip: 'Describe elements you want to exclude from the generated image', + placeholder: + 'Please describe content you do not want to generate, such as color, bloody content' + } + }, + speechToTextNode: { + label: 'Speech2Text', + text: 'Convert audio to text through speech recognition model', + stt_model: { + label: 'Speech Recognition Model' + }, + audio: { + label: 'Select Audio File', + placeholder: 'Please select an audio file' + } + }, + textToSpeechNode: { + label: 'TTS', + text: 'Convert text to audio through speech synthesis model', + tts_model: { + label: 'Speech Synthesis Model' + }, + content: { + label: 'Select Text Content' + } + }, + functionNode: { + label: 'Custom Function', + text: 'Execute custom scripts to achieve data processing' + }, + applicationNode: { + label: 'APP Node' + } + }, + compare: { + is_null: 'Is null', + is_not_null: 'Is not null', + contain: 'Contains', + not_contain: 'Does not contain', + eq: 'Equal to', + ge: 'Greater than or equal to', + gt: 'Greater than', + le: 'Less than or equal to', + lt: 'Less than', + len_eq: 'Length equal to', + len_ge: 'Length greater than or equal to', + len_gt: 'Length greater than', + len_le: 'Length less than or equal to', + len_lt: 'Length less than', + is_true: 'Is true', + is_not_true: 'Is not true' + }, + FileUploadSetting: {} +} diff --git a/ui/src/locales/lang/en-US/views/application.ts b/ui/src/locales/lang/en-US/views/application.ts new file mode 100644 index 00000000000..b69ede6d890 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/application.ts @@ -0,0 +1,229 @@ +export default { + title: 'APP', + createApplication: 'Create APP', + importApplication: 'Import APP', + copyApplication: 'Copy APP', + workflow: 'WORKFLOW', + simple: 'SIMPLE', + searchBar: { + placeholder: 'Search by name' + }, + + setting: { + demo: 'Demo' + }, + delete: { + confirmTitle: 'Are you sure you want to delete this APP: ', + confirmMessage: + 'Deleting this APP will no longer provide its services. Please proceed with caution.' + }, + tip: { + ExportError: 'Export Failed', + professionalMessage: + 'The Community Edition supports up to 5 APP. If you need more APP, please upgrade to the Professional Edition.', + saveErrorMessage: 'Saving failed, please check your input or try again later', + loadingErrorMessage: 'Failed to load configuration, please check your input or try again later' + }, + + applicationForm: { + title: { + appTest: 'Debug Preview', + copy: 'copy' + }, + form: { + appName: { + label: 'Name', + placeholder: 'Please enter the APP name', + requiredMessage: 'APP name is required' + }, + appDescription: { + label: 'Description', + placeholder: + 'Describe the APP scenario and use, e.g.: XXX assistant answering user questions about XXX product usage' + }, + appType: { + label: 'Type', + simplePlaceholder: 'Suitable for beginners to create assistant.', + workflowPlaceholder: 'Suitable for advanced users to customize the workflow of assistant' + }, + appTemplate: { + blankApp: 'Blank APP', + assistantApp: 'Knowledge Assistant' + }, + aiModel: { + label: 'AI Model', + placeholder: 'Please select an AI model' + }, + roleSettings: { + label: 'System Role', + placeholder: 'You are xxx assistant' + }, + + prompt: { + label: 'Prompt', + noReferences: '(No references Knowledge)', + references: ' (References Knowledge)', + placeholder: 'Please enter prompt', + requiredMessage: 'Please enter prompt', + tooltip: + 'By adjusting the content of the prompt, you can guide the direction of the large model chat.', + + noReferencesTooltip: + 'By adjusting the content of the prompt, you can guide the direction of the large model chat. This prompt will be fixed at the beginning of the context. Variables used: {question} is the question posed by the user.', + referencesTooltip: + 'By adjusting the content of the prompt, you can guide the direction of the large model chat. This prompt will be fixed at the beginning of the context. Variables used: {data} carries known information from the knowledge; {question} is the question posed by the user.', + defaultPrompt: `Known information: {data} +Question: {question} +Response requirements: +- Please use concise and professional language to answer the user's question. + ` + }, + historyRecord: { + label: 'Chat History' + }, + relatedKnowledge: { + label: 'Related Knowledge', + placeholder: 'Related knowledge are displayed here' + }, + multipleRoundsDialogue: 'Multiple Rounds Dialogue', + + prologue: 'Prologue', + defaultPrologue: + 'Hello, I am XXX Assistant. You can ask me questions about using XXX.\n- What are the main features of XXX?\n- Which LLM does XXX support?\n- What document types does XXX support?', + problemOptimization: { + label: 'Questions Optimization', + tooltip: + 'Optimize the current question based on historical chat to better match knowledge points.' + }, + + voiceInput: { + label: 'Voice Input', + placeholder: 'Please select a speech recognition model', + requiredMessage: 'Please select a speech input model', + autoSend: 'Automatic Sending' + }, + voicePlay: { + label: 'Voice Playback', + placeholder: 'Please select a speech synthesis model', + requiredMessage: 'Please select a speech playback model', + autoPlay: 'Automatic Playback', + browser: 'Browser Playback (free)', + tts: 'TTS Model', + listeningTest: 'Preview' + }, + reasoningContent: { + label: 'Output Thinking', + tooltip: + "Please set the thinking label based on the model's return, and the content in the middle of the label will be recognized as the thinking process.", + start: 'Start', + end: 'End' + } + }, + buttons: { + publish: 'Save&Publish', + addModel: 'Add Model' + }, + dialog: { + addDataset: 'Add Related Knowledge', + addDatasetPlaceholder: 'The selected knowledge must use the same embedding model', + selected: 'Selected', + countDataset: 'Knowledge', + + selectSearchMode: 'Retrieval Mode', + vectorSearch: 'Vector Search', + vectorSearchTooltip: + 'Vector search is a retrieval method based on vector distance calculations, suitable for large data volumes in the knowledge.', + fullTextSearch: 'Full-text Search', + fullTextSearchTooltip: + 'Full-text search is a retrieval method based on text similarity, suitable for small data volumes in the knowledge.', + hybridSearch: 'Hybrid Search', + hybridSearchTooltip: + 'Hybrid search is a retrieval method based on both vector and text similarity, suitable for medium data volumes in the knowledge.', + similarityThreshold: 'Similarity higher than', + similarityTooltip: 'The higher the similarity, the stronger the correlation.', + topReferences: 'Top N Segments', + maxCharacters: 'Maximum Characters per Reference', + noReferencesAction: 'When there are no knowledge references', + continueQuestioning: 'Continue to ask questions to the Al model', + provideAnswer: 'Specify Reply Content', + designated_answer: + 'Hello, I am XXX Assistant. My knowledge only contains information related to XXX products. Please rephrase your question.', + defaultPrompt1: + "The content inside the parentheses () represents the user's question. Based on the context, please speculate and complete the user's question ({question}). The requirement is to output a completed question and place it", + defaultPrompt2: 'tag' + } + }, + applicationAccess: { + title: 'APP Access', + wecom: 'WeCom', + wecomTip: 'Create WeCom intelligent APP', + dingtalk: 'DingTalk', + dingtalkTip: 'Create DingTalk intelligent APP', + wechat: 'WeChat', + wechatTip: 'Create WeChat intelligent APP', + lark: 'Lark', + larkTip: 'Create Lark intelligent APP', + setting: 'Setting', + callback: 'Callback Address', + callbackTip: 'Please fill in the callback address', + wecomPlatform: 'WeCom Open Platform', + wechatPlatform: 'WeChat Open Platform', + dingtalkPlatform: 'DingTalk Open Platform', + larkPlatform: 'Lark Open Platform', + slack: 'Slack', + slackTip: 'Create Slack intelligent APP', + wecomSetting: { + title: 'WeCom Configuration', + cropId: 'Crop ID', + cropIdPlaceholder: 'Please enter crop ID', + agentIdPlaceholder: 'Please enter agent ID', + secretPlaceholder: 'Please enter secret', + tokenPlaceholder: 'Please enter token', + encodingAesKeyPlaceholder: 'Please enter EncodingAESKey', + authenticationSuccessful: 'Successful', + urlInfo: + '-APP management-Self-built-Created APP-Receive messages-Set the "URL" received by the API' + }, + dingtalkSetting: { + title: 'DingTalk Configuration', + clientIdPlaceholder: 'Please enter client ID', + clientSecretPlaceholder: 'Please enter client secret', + urlInfo: + '-On the robot page, set the "Message Receiving Mode" to HTTP mode, and fill in the above URL into the "Message Receiving Address"' + }, + wechatSetting: { + title: 'WeChat Configuration', + appId: 'APP ID', + appIdPlaceholder: 'Please enter APP ID', + appSecret: 'APP SECRET', + appSecretPlaceholder: 'Please enter APP SECRET', + token: 'TOKEN', + tokenPlaceholder: 'Please enter TOKEN', + aesKey: 'Message Encryption Key', + aesKeyPlaceholder: 'Please enter the message encryption key', + urlInfo: + '-Settings and Development-Basic Configuration-"Server Address URL" in server configuration' + }, + larkSetting: { + title: 'Lark Configuration', + appIdPlaceholder: 'Please enter App ID', + appSecretPlaceholder: 'Please enter App secret', + verificationTokenPlaceholder: 'Please enter verification token', + urlInfo: + '-Events and callbacks - event configuration - configure the "request address" of the subscription method', + folderTokenPlaceholder: 'Please enter folder token' + }, + slackSetting: { + title: 'Slack Configuration', + signingSecretPlaceholder: 'Please enter signing secret', + botUserTokenPlaceholder: 'Please enter bot user token' + }, + copyUrl: 'Copy the link and fill it in' + }, + hitTest: { + title: 'Retrieval Testing', + text: 'Test the hitting effect of the Knowledge based on the given query text.', + emptyMessage1: 'Retrieval Testing results will show here', + emptyMessage2: 'No matching sections found' + } +} diff --git a/ui/src/locales/lang/en-US/views/dataset.ts b/ui/src/locales/lang/en-US/views/dataset.ts new file mode 100644 index 00000000000..23c6837e9f7 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/dataset.ts @@ -0,0 +1,94 @@ +export default { + title: 'Knowledge', + createDataset: 'Create Knowledge', + general: 'General', + web: 'Web Site', + lark: 'Lark', + relatedApplications: 'Linked App', + document_count: 'docs', + relatedApp_count: 'linked apps', + searchBar: { + placeholder: 'Search by name' + }, + setting: { + vectorization: 'Vectorization', + sync: 'Sync' + }, + tip: { + professionalMessage: + 'The community edition supports up to 50 knowledge. For more knowledge, please upgrade to the professional edition.', + syncSuccess: 'Sync task sent successfully', + updateModeMessage: + 'After modifying the knowledge vector model, you need to vectorize the knowledge. Do you want to continue saving?' + }, + delete: { + confirmTitle: 'Confirm deletion of knowledge:', + confirmMessage1: 'This knowledge is related with', + confirmMessage2: 'APP. Deleting it will be irreversible, please proceed with caution.' + }, + datasetForm: { + title: { + info: 'Knowledge Settings' + }, + form: { + datasetName: { + label: 'Name', + placeholder: 'Please enter the knowledge name', + requiredMessage: 'Please enter the knowledge name' + }, + datasetDescription: { + label: 'Description', + placeholder: + 'Describe the content of the knowledge. A detailed description will help AI understand the content better, improving the accuracy of content retrieval and hit rate.', + requiredMessage: 'Please enter the knowledge description' + }, + EmbeddingModel: { + label: 'Embedding Model', + placeholder: 'Please select a embedding model', + requiredMessage: 'Please select the embedding model' + }, + datasetType: { + label: 'Type', + generalInfo: 'Upload local documents', + webInfo: 'Sync text data from a web site', + larkInfo: 'Sync documents from Feishu', + yuqueInfo: 'Sync documents from Yuque' + }, + source_url: { + label: 'Web Root URL', + placeholder: 'Please enter the web root URL', + requiredMessage: 'Please enter the web root URL' + }, + user_id: { + requiredMessage: 'Please enter User ID' + }, + token: { + requiredMessage: 'Please enter Token' + }, + selector: { + label: 'Selector', + placeholder: 'Default is body, can input .classname/#idname/tagname' + } + } + }, + ResultSuccess: { + title: 'Knowledge Created Successfully', + paragraph: 'Segments', + paragraph_count: 'Segments', + documentList: 'Document List', + loading: 'Importing', + buttons: { + toDataset: 'Return to Knowledge List', + toDocument: 'Go to Document' + } + }, + syncWeb: { + title: 'Sync Knowledge', + syncMethod: 'Sync Method', + replace: 'Replace Sync', + replaceText: 'Re-fetch Web site documents, replacing the documents in the local knowledge', + complete: 'Full Sync', + completeText: 'Delete all documents in the local knowledge and re-fetch web site documents', + tip: 'Note: All syncs will delete existing data and re-fetch new data. Please proceed with caution.' + } +} diff --git a/ui/src/locales/lang/en-US/views/document.ts b/ui/src/locales/lang/en-US/views/document.ts new file mode 100644 index 00000000000..9a3f1da7387 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/document.ts @@ -0,0 +1,179 @@ +export default { + uploadDocument: 'Upload Document', + importDocument: 'Import Document', + syncDocument: 'Sync Document', + selected: 'Selected', + items: 'Items', + searchBar: { + placeholder: 'Search by document name' + }, + setting: { + migration: 'Move', + cancelGenerateQuestion: 'Cancel Generating Questions', + cancelVectorization: 'Cancel Vectorization', + cancelGenerate: 'Cancel Generation', + export: 'Export to' + }, + tip: { + saveMessage: 'Current changes have not been saved. Confirm exit?', + cancelSuccess: 'Successful', + sendMessage: 'Successful', + vectorizationSuccess: 'Successful', + nameMessage: 'Document name cannot be empty!', + importMessage: 'Successful', + migrationSuccess: 'Successful' + }, + upload: { + selectFile: 'Select File', + selectFiles: 'Select Folder', + uploadMessage: 'Drag and drop files here to upload or', + formats: 'Supported formats:', + requiredMessage: 'Please upload a file', + errorMessage1: 'The file size exceeds 100mb', + errorMessage2: 'Unsupported file format', + errorMessage3: 'File cannot be empty', + errorMessage4: 'Up to 50 files can be uploaded at once', + template: 'Template', + download: 'Download' + }, + + fileType: { + txt: { + label: 'Text File', + tip1: '1. It is recommended to standardize the segment markers in the file before uploading.', + tip2: '2. Up to 50 files can be uploaded at once, with each file not exceeding 100MB.' + }, + table: { + label: 'Table', + tip1: '1. Click to download the corresponding template and complete the information:', + tip2: '2. The first row must be column headers, and the column headers must be meaningful terms. Each record in the table will be treated as a segment.', + tip3: '3. Each sheet in the uploaded spreadsheet file will be treated as a document, with the sheet name as the document name.', + tip4: '4. Up to 50 files can be uploaded at once, with each file not exceeding 100MB.' + }, + QA: { + label: 'QA Pairs', + tip1: '1. Click to download the corresponding template and complete the information:', + tip2: '2. Each sheet in the uploaded spreadsheet file will be treated as a document, with the sheet name as the document name.', + tip3: '3. Up to 50 files can be uploaded at once, with each file not exceeding 100MB.' + } + }, + setRules: { + title: { + setting: 'Set Segment Rules', + preview: 'Preview' + }, + intelligent: { + label: 'Automatic Segmentation (Recommended)', + text: 'If you are unsure how to set segmentation rules, it is recommended to use automatic segmentation.' + }, + advanced: { + label: 'Advanced Segmentation', + text: 'Users can customize segmentation delimiters, segment length, and cleaning rules based on document standards.' + }, + patterns: { + label: 'Segment Delimiters', + tooltip: + 'Recursively split according to the selected symbols in order. If the split result exceeds the segment length, it will be truncated to the segment length.', + placeholder: 'Please select' + }, + limit: { + label: 'Segment Length' + }, + with_filter: { + label: 'Auto Clean', + text: 'Remove duplicate extra symbols, spaces, blank lines, and tab words.' + }, + checkedConnect: { + label: 'Add "Related Questions" section for question-based QA pairs during import.' + } + }, + buttons: { + prev: 'Previous', + next: 'Next', + import: 'Start Import', + preview: 'Apply' + }, + table: { + name: 'Document Name', + char_length: 'Character', + paragraph: 'Segment', + all: 'All', + updateTime: 'Update Time' + }, + fileStatus: { + label: 'File Status', + SUCCESS: 'Success', + FAILURE: 'Failure', + EMBEDDING: 'Indexing', + PENDING: 'Queuing', + GENERATE: 'Generating', + SYNC: 'Syncing', + REVOKE: 'Cancelling', + finish: 'Finish' + }, + enableStatus: { + label: 'Status', + enable: 'Enabled', + close: 'Disabled' + }, + sync: { + label: 'Sync', + confirmTitle: 'Confirm sync document?', + confirmMessage1: + 'Syncing will delete existing data and retrieve new data. Please proceed with caution.', + confirmMessage2: 'Cannot sync, please set the document URL first.', + successMessage: 'Successful' + }, + delete: { + confirmTitle1: 'Confirm batch deletion of', + confirmTitle2: 'documents?', + confirmMessage: + 'Segments within the selected documents will also be deleted. Please proceed with caution.', + successMessage: 'Successful', + confirmTitle3: 'Confirm deleting document:', + confirmMessage1: 'Under this document', + confirmMessage2: 'All segments will be deleted, please operate with caution. ' + }, + form: { + source_url: { + label: 'Document URL', + placeholder: 'Enter document URL, one per line. Incorrect URL will cause import failure.', + requiredMessage: 'Please enter a document URL' + }, + selector: { + label: 'Selector', + placeholder: 'Default is body, you can input .classname/#idname/tagname' + }, + hit_handling_method: { + label: 'Retrieve-Respond', + tooltip: 'When user asks a question, handle matched segments according to the set method.' + }, + similarity: { + label: 'Similarity Higher Than', + placeholder: 'Directly return segment content', + requiredMessage: 'Please enter similarity value' + } + }, + hitHandlingMethod: { + optimization: 'Model optimization', + directly_return: 'Respond directly' + }, + generateQuestion: { + title: 'Generate Questions', + successMessage: 'Successful', + tip1: 'The {data} in the prompt is a placeholder for segmented content, which is replaced by the segmented content when executed and sent to the AI model;', + tip2: 'The AI model generates relevant questions based on the segmented content. Please place the generated questions within the', + tip3: 'tags, and the system will automatically relate the questions within these tags;', + tip4: 'The generation effect depends on the selected model and prompt. Users can adjust to achieve the best effect.', + prompt1: + 'Content: {data}\n \n Please summarize the above and generate 5 questions based on the summary. \nAnswer requirements: \n - Please output only questions; \n - Please place each question in', + prompt2: 'tag.' + }, + feishu: { + selectDocument: 'Select Document', + tip1: 'Supports document and table types, including TXT, Markdown, PDF, DOCX, HTML, XLS, XLSX, CSV, and ZIP formats;', + tip2: 'The system does not store original documents. Before importing, Please ensure the document follows standardized paragraph markers', + allCheck: 'Select All', + errorMessage1: 'Please select a document' + } +} diff --git a/ui/src/locales/lang/en-US/views/function-lib.ts b/ui/src/locales/lang/en-US/views/function-lib.ts new file mode 100644 index 00000000000..05f5f60a8ff --- /dev/null +++ b/ui/src/locales/lang/en-US/views/function-lib.ts @@ -0,0 +1,80 @@ +export default { + title: 'Function', + internalTitle: 'Internal Function', + added: 'Added', + createFunction: 'Create Function', + editFunction: 'Edit Function', + copyFunction: 'Copy Function', + importFunction: 'Import Function', + searchBar: { + placeholder: 'Search by function name' + }, + setting: { + disabled: 'Disabled' + }, + tip: { + saveMessage: 'Unsaved changes will be lost. Are you sure you want to exit?' + }, + delete: { + confirmTitle: 'Confirm deletion of function:', + confirmMessage: + 'Deleting this function will cause errors in APP that reference it when they are queried. Please proceed with caution.' + }, + disabled: { + confirmTitle: 'Confirm disable function:', + confirmMessage: + 'Disabling this function will cause errors in APP that reference it when they are queried. Please proceed with caution.' + }, + functionForm: { + title: { + copy: 'Copy', + baseInfo: 'Basic Information' + }, + form: { + functionName: { + label: 'Name', + name: 'Function Name', + placeholder: 'Please enter the function name', + requiredMessage: 'Please enter the function name' + }, + functionDescription: { + label: 'Description', + placeholder: 'Please enter a description of the function' + }, + permission_type: { + label: 'Permissions', + requiredMessage: 'Please select' + }, + paramName: { + label: 'Parameter Name', + placeholder: 'Please enter the parameter name', + requiredMessage: 'Please enter the parameter name' + }, + dataType: { + label: 'Data Type' + }, + source: { + label: 'Source', + custom: 'Custom', + reference: 'Reference Parameter' + }, + required: { + label: 'Required' + }, + param: { + paramInfo1: 'Displayed when using the function', + paramInfo2: 'Not displayed when using the function', + code: 'Content (Python)', + selectPlaceholder: 'Please select parameter', + inputPlaceholder: 'Please enter parameter values', + }, + debug: { + run: 'Run', + output: 'Output', + runResult: 'Run Result', + runSuccess: 'Successful', + runFailed: 'Run Failed' + } + } + } +} diff --git a/ui/src/locales/lang/en-US/views/index.ts b/ui/src/locales/lang/en-US/views/index.ts new file mode 100644 index 00000000000..c63d40492f8 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/index.ts @@ -0,0 +1,34 @@ +import notFound from './404' +import application from './application' +import applicationOverview from './application-overview' +import dataset from './dataset' +import system from './system' +import functionLib from './function-lib' +import user from './user' +import team from './team' +import template from './template' +import document from './document' +import paragraph from './paragraph' +import problem from './problem' +import log from './log' +import applicationWorkflow from './application-workflow' +import login from './login' +import operateLog from './operate-log' +export default { + notFound, + application, + applicationOverview, + system, + functionLib, + user, + team, + template, + dataset, + applicationWorkflow, + document, + paragraph, + problem, + log, + login, + operateLog +} diff --git a/ui/src/locales/lang/en-US/views/log.ts b/ui/src/locales/lang/en-US/views/log.ts new file mode 100644 index 00000000000..9dfa40e009e --- /dev/null +++ b/ui/src/locales/lang/en-US/views/log.ts @@ -0,0 +1,41 @@ +export default { + title: 'Chat Logs', + delete: { + confirmTitle: 'Confirm deletion of question:', + confirmMessage1: 'Deleting this question will cancel the association of', + confirmMessage2: 'segments. Please proceed with caution.' + }, + buttons: { + clearStrategy: 'Cleanup Strategy', + prev: 'Previous', + next: 'Next' + }, + table: { + abstract: 'Title', + chat_record_count: 'Total Messages', + user: 'User', + feedback: { + label: 'User Feedback', + star: 'Agree', + trample: 'Disagree' + }, + mark: 'Marks', + recenTimes: 'Last Chat Time' + }, + addToDataset: 'Add to Knowledge', + daysText: 'Days ago', + selectDataset: 'Select Knowledge', + selectDatasetPlaceholder: 'Please select a knowledge', + saveToDocument: 'Save to Document', + documentPlaceholder: 'Please select a document', + editContent: 'Edit Content', + editMark: 'Edit Label', + form: { + content: { + placeholder: 'Please enter the content' + }, + title: { + placeholder: 'Please set a title for the current content for management and viewing' + } + } +} diff --git a/ui/src/locales/lang/en-US/views/login.ts b/ui/src/locales/lang/en-US/views/login.ts new file mode 100644 index 00000000000..dc2edba57eb --- /dev/null +++ b/ui/src/locales/lang/en-US/views/login.ts @@ -0,0 +1,24 @@ +export default { + title: 'Login', + jump_tip: 'You will be redirected to the authentication source page for authentication', + jump: 'Redirect', + resetPassword: 'Change Password', + forgotPassword: 'Forgot Password', + userRegister: 'User Registration', + buttons: { + login: 'Login', + register: 'Register', + backLogin: 'Back to Login', + checkCode: 'Verify Now' + }, + newPassword: 'New Password', + enterPassword: 'Please enter your new password', + useEmail: 'Use Email', + moreMethod: 'More Login Methods', + verificationCode: { + placeholder: 'Please enter the verification code', + getVerificationCode: 'Get Verification Code', + successMessage: 'Verification code sent successfully', + resend: 'Resend' + } +} diff --git a/ui/src/locales/lang/en-US/views/operate-log.ts b/ui/src/locales/lang/en-US/views/operate-log.ts new file mode 100644 index 00000000000..a182233ac38 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/operate-log.ts @@ -0,0 +1,31 @@ +export default { + title: 'Operate Logs', + table: { + menu: { + label: 'Operate menu' + }, + operate: { + label: 'Operate', + detail: 'Operate details' + }, + user: { + label: 'Operate user' + }, + status: { + label: 'Status', + success: 'Successful', + fail: 'Failed', + all: 'All' + }, + ip_address: { + label: 'IP Address' + }, + opt: { + label: 'API Details' + }, + operateTime: { + label: 'Operate Time' + } + }, + close: 'Close' +} diff --git a/ui/src/locales/lang/en-US/views/paragraph.ts b/ui/src/locales/lang/en-US/views/paragraph.ts new file mode 100644 index 00000000000..812e3ab6812 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/paragraph.ts @@ -0,0 +1,32 @@ +export default { + title: 'Segment', + paragraph_count: 'Segments', + editParagraph: 'Edit Segment', + addParagraph: 'Add Segment', + paragraphDetail: 'Segment Details', + character_count: 'characters', + setting: { + batchSelected: 'Batch Select', + cancelSelected: 'Cancel Selection' + }, + delete: { + confirmTitle: 'Confirm deletion of segment:', + confirmMessage: 'Deletion cannot be undone. Please proceed with caution.' + }, + relatedProblem: { + title: 'Related Questions', + placeholder: 'Please select a question' + }, + form: { + paragraphTitle: { + label: 'Title', + placeholder: 'Please enter the segment title' + }, + content: { + label: 'Content', + placeholder: 'Please enter the segment content', + requiredMessage1: 'Please enter the segment content', + requiredMessage2: 'Content must not exceed 100,000 words' + } + } +} diff --git a/ui/src/locales/lang/en-US/views/problem.ts b/ui/src/locales/lang/en-US/views/problem.ts new file mode 100644 index 00000000000..cd9780b5975 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/problem.ts @@ -0,0 +1,37 @@ +export default { + title: 'Questions', + createProblem: 'Create Question', + detailProblem: 'Question Details', + quickCreateProblem: 'Quick Create', + quickCreateName: 'question', + tip: { + placeholder: 'Enter the question, support multiple entries, one per line.', + errorMessage: 'Question cannot be empty!', + requiredMessage: 'Please enter a question', + relatedSuccess: 'Successful' + }, + + setting: { + batchDelete: 'Bulk Delete', + cancelRelated: 'Cancel Association' + }, + searchBar: { + placeholder: 'Search by name' + }, + table: { + paragraph_count: 'Related Segments', + updateTime: 'Update Time' + }, + delete: { + confirmTitle: 'Confirm deletion of question:', + confirmMessage1: 'Deleting this question will cancel the association of', + confirmMessage2: 'segments. Please proceed with caution.' + }, + relateParagraph: { + title: 'Relate to Segment', + selectDocument: 'Select a Document', + placeholder: 'Search document by name', + selectedParagraph: 'Selected Segments', + count: 'Count' + } +} diff --git a/ui/src/locales/lang/en-US/views/system.ts b/ui/src/locales/lang/en-US/views/system.ts new file mode 100644 index 00000000000..ce72a168b69 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/system.ts @@ -0,0 +1,154 @@ +export default { + title: 'System', + subTitle: 'Setting', + test: 'Test Connection', + testSuccess: 'Successful', + testFailed: 'Test connection failed', + password: 'Password', + authentication: { + title: 'Login Authentication', + ldap: { + title: 'LDAP', + address: 'LDAP Address', + serverPlaceholder: 'Please enter LDAP address', + bindDN: 'Bind DN', + bindDNPlaceholder: 'Please enter Bind DN', + ou: 'User OU', + ouPlaceholder: 'Please enter user OU', + ldap_filter: 'User Filter', + ldap_filterPlaceholder: 'Please enter user filter', + ldap_mapping: 'LDAP Attribute Mapping', + ldap_mappingPlaceholder: 'Please enter LDAP attribute mapping', + enableAuthentication: 'Enable LDAP Authentication' + }, + cas: { + title: 'CAS', + ldpUri: 'ldpUri', + ldpUriPlaceholder: 'Please enter ldpUri', + validateUrl: 'Validation Address', + validateUrlPlaceholder: 'Please enter validation address', + redirectUrl: 'Callback Address', + redirectUrlPlaceholder: 'Please enter callback address', + enableAuthentication: 'Enable CAS Authentication' + }, + oidc: { + title: 'OIDC', + authEndpoint: 'Auth Endpoint', + authEndpointPlaceholder: 'Please enter auth endpoint', + tokenEndpoint: 'Token Endpoint', + tokenEndpointPlaceholder: 'Please enter token endpoint', + userInfoEndpoint: 'User Information Endpoint', + userInfoEndpointPlaceholder: 'Please enter user information endpoint', + clientId: 'Client ID', + clientIdPlaceholder: 'Please enter client ID', + scopePlaceholder: 'Please enter scope', + clientSecret: 'Client Secret', + clientSecretPlaceholder: 'Please enter client secret', + logoutEndpoint: 'Logout Endpoint', + logoutEndpointPlaceholder: 'Please enter logout endpoint', + redirectUrl: 'Redirect URL', + redirectUrlPlaceholder: 'Please enter redirect URL', + enableAuthentication: 'Enable OIDC Authentication' + }, + + oauth2: { + title: 'OAuth2', + authEndpoint: 'Auth Endpoint', + authEndpointPlaceholder: 'Please enter auth endpoint', + tokenEndpoint: 'Token Endpoint', + tokenEndpointPlaceholder: 'Please enter token endpoint', + userInfoEndpoint: 'User Information Endpoint', + userInfoEndpointPlaceholder: 'Please enter user information endpoint', + scope: 'Scope', + scopePlaceholder: 'Please enter scope', + clientId: 'Client ID', + clientIdPlaceholder: 'Please enter client ID', + clientSecret: 'Client Secret', + clientSecretPlaceholder: 'Please enter client secret', + redirectUrl: 'Redirect URL', + redirectUrlPlaceholder: 'Please enter redirect URL', + filedMapping: 'Field Mapping', + filedMappingPlaceholder: 'Please enter field mapping', + enableAuthentication: 'Enable OAuth2 Authentication' + }, + scanTheQRCode: { + title: 'Scan the QR code', + wecom: 'WeCom', + dingtalk: 'DingTalk', + lark: 'Lark', + effective: 'Effective', + alreadyTurnedOn: 'Turned On', + notEnabled: 'Not Enabled', + validate: 'Validate', + validateSuccess: 'Successful', + validateFailed: 'Validation failed', + validateFailedTip: 'Please fill in all required fields and ensure the format is correct', + appKeyPlaceholder: 'Please enter APP key', + appSecretPlaceholder: 'Please enter APP secret', + corpIdPlaceholder: 'Please enter corp ID', + agentIdPlaceholder: 'Please enter agent ID', + callbackWarning: 'Please enter a valid URL address', + larkQrCode: 'Lark Scan Code Login', + dingtalkQrCode: 'DingTalk Scan Code Login', + setting: ' Setting', + access: 'Access' + } + }, + theme: { + title: 'Appearance Settings', + platformDisplayTheme: 'Platform Display Theme', + customTheme: 'Custom Theme', + platformLoginSettings: 'Platform Login Settings', + custom: 'Custom', + pagePreview: 'Page Preview', + default: 'Default', + restoreDefaults: 'Restore Defaults', + orange: 'Orange', + green: 'Green', + purple: 'Purple', + red: 'Red', + loginBackground: 'Login Background Image', + loginLogo: 'Login Logo', + websiteLogo: 'Website Logo', + replacePicture: 'Replace Image', + websiteLogoTip: + 'Logo displayed at the top of the website. Recommended size: 48x48. Supports JPG, PNG, GIF. Maximum size: 10MB', + loginLogoTip: + 'Logo on the right side of the login page. Recommended size: 204x52. Supports JPG, PNG, GIF. Maximum size: 10MB', + loginBackgroundTip: + 'Left-side background image. Vector graphics recommended size: 576x900; Bitmap recommended size: 1152x1800. Supports JPG, PNG, GIF. Maximum size: 10MB', + websiteName: 'Website Name', + websiteNamePlaceholder: 'Please enter the website name', + websiteNameTip: 'The platform name displayed in the web page tab', + websiteSlogan: 'Welcome Slogan', + websiteSloganPlaceholder: 'Please enter the welcome slogan', + websiteSloganTip: 'The welcome slogan below the product logo', + defaultSlogan: 'Ready-to-use open-source AI assistant', + defaultTip: 'The default is the MaxKB platform interface, supports custom settings', + logoDefaultTip: 'The default is the MaxKB login interface, supports custom settings', + platformSetting: 'Platform Settings', + showUserManual: 'Show User Manual', + showForum: 'Show Forum Support', + showProject: 'Show Project Address', + urlPlaceholder: 'Please enter the URL address', + abandonUpdate: 'Abandon Update', + saveAndApply: 'Save and Apply', + fileMessageError: 'File size exceeds 10MB', + saveSuccess: 'Appearance settings successfully applied' + }, + email: { + title: 'Email Settings', + smtpHost: 'SMTP Host', + smtpHostPlaceholder: 'Please enter SMTP host', + smtpPort: 'SMTP Port', + smtpPortPlaceholder: 'Please enter SMTP port', + smtpUser: 'SMTP User', + smtpUserPlaceholder: 'Please enter SMTP user', + sendEmail: 'Sender\'s Email', + sendEmailPlaceholder: 'Please enter the sender\'s email', + smtpPassword: 'SMTP Password', + smtpPasswordPlaceholder: 'Please enter SMTP password', + enableSSL: 'Enable SSL (if the SMTP port is 465, you usually need to enable SSL)', + enableTLS: 'Enable TLS (if the SMTP port is 587, you usually need to enable TLS)' + } +} diff --git a/ui/src/locales/lang/en-US/views/team.ts b/ui/src/locales/lang/en-US/views/team.ts new file mode 100644 index 00000000000..d25df4bb373 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/team.ts @@ -0,0 +1,30 @@ +export default { + title: 'Team Members', + member: 'Member', + manage: 'Owner', + permissionSetting: 'Permission Settings', + addMember: 'Add Member', + addSubTitle: 'Members can access the data you authorize after logging in.', + searchBar: { + placeholder: 'Enter username to search' + }, + delete: { + button: 'Remove', + confirmTitle: 'Confirm removal of member:', + confirmMessage: + 'Removing the member will revoke their access to knowledge and APP.' + }, + setting: { + management: 'Manage', + check: 'View' + }, + teamForm: { + form: { + userName: { + label: 'Username/Email', + placeholder: "Enter the member's username or email", + requiredMessage: 'Enter the username/email' + } + } + } +} diff --git a/ui/src/locales/lang/en-US/views/template.ts b/ui/src/locales/lang/en-US/views/template.ts new file mode 100644 index 00000000000..0a89fcfb6b0 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/template.ts @@ -0,0 +1,89 @@ +export default { + title: 'Models', + provider: 'Provider', + providerPlaceholder: 'Select Provider', + addModel: 'Add Model', + searchBar: { + placeholder: 'Search by name' + }, + delete: { + confirmTitle: 'Delete Model', + confirmMessage: 'Are you sure you want to delete the model:' + }, + tip: { + createSuccessMessage: 'Model created successfully', + createErrorMessage: 'There are errors in the basic information', + errorMessage: 'Variable already exists: ', + emptyMessage1: 'Please select the model type and base model in the basic information first', + emptyMessage2: 'The selected model does not support parameter settings', + updateSuccessMessage: 'Model updated successfully', + saveSuccessMessage: 'Model parameters saved successfully', + downloadError: 'Download failed', + noModel: 'Model does not exist in Ollama' + }, + model: { + allModel: 'All Models', + publicModel: 'Public Models', + privateModel: 'Private Models', + LLM: 'LLM', + EMBEDDING: 'Embedding Model', + RERANKER: 'Rerank', + STT: 'Speech2Text', + TTS: 'TTS', + IMAGE: 'Vision Model', + TTI: 'Image Generation' + }, + templateForm: { + title: { + baseInfo: 'Basic Information', + advancedInfo: 'Advanced Settings', + modelParams: 'Model Parameters', + editParam: 'Edit Parameter', + addParam: 'Add Parameter', + paramSetting: 'Model Parameter Settings', + apiParamPassing: 'Interface Parameters' + }, + form: { + templateName: { + label: 'Model Name', + placeholder: 'Set a name for the base model', + tooltip: 'Custom model name in MaxKB', + requiredMessage: 'Model name cannot be empty' + }, + permissionType: { + label: 'Permission', + privateDesc: 'Available only to current user', + publicDesc: 'Available to all users', + requiredMessage: 'Permission cannot be empty' + }, + model_type: { + label: 'Model Type', + placeholder: 'Select a model type', + tooltip1: + 'LLM: An inference model for AI chats in the APP.', + tooltip2: + 'Embedding Model: A model for vectorizing document content in the knowledge.', + tooltip3: 'Speech2Text: A model used for speech recognition in the APP.', + tooltip4: 'TTS: A model used for TTS in the APP.', + tooltip5: + 'Rerank: A model used to reorder candidate segments when using multi-route recall in advanced orchestration APP.', + tooltip6: + 'Vision Model: A visual model used for image understanding in advanced orchestration APP.', + tooltip7: + 'Image Generation: A visual model used for image generation in advanced orchestration APP.', + requiredMessage: 'Model type cannot be empty' + }, + base_model: { + label: 'Base Model', + tooltip: + 'For models not listed, enter the model name and press Enter', + placeholder: 'Enter the base model name and press Enter to add', + requiredMessage: 'Base model cannot be empty' + } + } + }, + download: { + downloading: 'Downloading...', + cancelDownload: 'Cancel Download' + } +} diff --git a/ui/src/locales/lang/en-US/views/user.ts b/ui/src/locales/lang/en-US/views/user.ts new file mode 100644 index 00000000000..2bbc1404363 --- /dev/null +++ b/ui/src/locales/lang/en-US/views/user.ts @@ -0,0 +1,74 @@ +export default { + title: 'User', + createUser: 'Create User', + editUser: 'Edit User', + setting: { + updatePwd: 'Change Password' + }, + tip: { + professionalMessage: + 'The community edition supports up to 2 users. For more users, please upgrade to the professional edition.', + updatePwdSuccess: 'User password updated successfully' + }, + delete: { + confirmTitle: 'Confirm deletion of user:', + confirmMessage: + 'Deleting this user will also delete all resources (APP, knowledge, models) created by this user. Please proceed with caution.' + }, + disabled: { + confirmTitle: 'Confirm disable function:', + confirmMessage: + 'Disabling this function will cause errors when APP that reference it are queried. Please proceed with caution.' + }, + userForm: { + form: { + username: { + label: 'Username', + placeholder: 'Please enter username', + requiredMessage: 'Please enter username', + lengthMessage: 'Length must be between 6 and 20 words' + }, + captcha: { + label: 'captcha', + placeholder: 'Please enter the captcha' + }, + nick_name: { + label: 'Name', + placeholder: 'Please enter name' + }, + email: { + label: 'Email', + placeholder: 'Please enter email', + requiredMessage: 'Please enter email' + }, + phone: { + label: 'Phone', + placeholder: 'Please enter phone' + }, + password: { + label: 'Login Password', + placeholder: 'Please enter password', + requiredMessage: 'Please enter password', + lengthMessage: 'Length must be between 6 and 20 words' + }, + new_password: { + label: 'New Password', + placeholder: 'Please enter new password', + requiredMessage: 'Please enter new password' + }, + re_password: { + label: 'Confirm Password', + placeholder: 'Please enter confirm password', + requiredMessage: 'Please enter confirm password', + validatorMessage: 'Passwords do not match' + } + } + }, + source: { + label: 'User Type', + local: 'System User', + wecom: 'WeCom', + lark: 'Lark', + dingtalk: 'DingTalk' + } +} diff --git a/ui/src/locales/lang/en_US/components/index.ts b/ui/src/locales/lang/en_US/components/index.ts deleted file mode 100644 index bd77588db6f..00000000000 --- a/ui/src/locales/lang/en_US/components/index.ts +++ /dev/null @@ -1,4 +0,0 @@ - -export default { - -}; diff --git a/ui/src/locales/lang/en_US/index.ts b/ui/src/locales/lang/en_US/index.ts deleted file mode 100644 index 16bd5f6bb5a..00000000000 --- a/ui/src/locales/lang/en_US/index.ts +++ /dev/null @@ -1,12 +0,0 @@ -import en from 'element-plus/es/locale/lang/en'; -import components from './components'; -import layout from './layout'; -import pages from './pages'; - -export default { - lang: 'English', - layout, - pages, - components, - en, -}; diff --git a/ui/src/locales/lang/en_US/layout.ts b/ui/src/locales/lang/en_US/layout.ts deleted file mode 100644 index b2c01cb053e..00000000000 --- a/ui/src/locales/lang/en_US/layout.ts +++ /dev/null @@ -1,39 +0,0 @@ -export default { - breadcrumb: { - - }, - sidebar: { - - }, - topbar: { - github: "Project address", - wiki: "User manual", - forum: "Forum for help", - MenuItem: { - application: "Application", - dataset: "Knowledge base", - setting: "System settings" - }, - avatar: { - resetPassword: "Change password", - about: "About", - logout: "Logout", - version:"Version", - dialog:{ - newPassword:"New password", - enterPassword: "Please enter new password", - confirmPassword: "Confirm password", - passwordLength:"Password length should be between 6 and 20 characters", - passwordMismatch:"Passwords do not match", - useEmail:"Use email", - enterEmail: "Please enter email", - enterVerificationCode: "Please enter verification code", - getVerificationCode: "Get verification code", - verificationCodeSentSuccess:"Verification code sent successfully", - resend:"Resend", - cancel:"Cancel", - save:"Save", - } - } - }, -}; diff --git a/ui/src/locales/lang/en_US/pages/index.ts b/ui/src/locales/lang/en_US/pages/index.ts deleted file mode 100644 index bd77588db6f..00000000000 --- a/ui/src/locales/lang/en_US/pages/index.ts +++ /dev/null @@ -1,4 +0,0 @@ - -export default { - -}; diff --git a/ui/src/locales/lang/zh-CN/ai-chat.ts b/ui/src/locales/lang/zh-CN/ai-chat.ts new file mode 100644 index 00000000000..76bb53d4f53 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/ai-chat.ts @@ -0,0 +1,97 @@ +export default { + noHistory: '暂无历史记录', + createChat: '新建对话', + history: '历史记录', + only20history: '仅显示最近 20 条对话', + question_count: '条提问', + exportRecords: '导出聊天记录', + chatId: '对话 ID', + userInput: '用户输入', + quote: '引用', + download: '点击下载文件', + transcribing: '转文字中', + passwordValidator: { + title: '请输入密码打开链接', + errorMessage1: '密码不能为空', + errorMessage2: '密码错误' + }, + operation: { + play: '点击播放', + pause: '停止', + regeneration: '换个答案', + like: '赞同', + cancelLike: '取消赞同', + oppose: '反对', + cancelOppose: '取消反对', + continue: '继续', + stopChat: '停止回答', + startChat: '开始对话' + }, + tip: { + error500Message: '抱歉,当前正在维护,无法提供服务,请稍后再试!', + errorIdentifyMessage: '无法识别用户身份', + errorLimitMessage: '抱歉,您的提问已达到最大限制,请明天再来吧!', + answerMessage: '抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。', + stopAnswer: '已停止回答', + answerLoading: '回答中', + recorderTip: `

该功能需要使用麦克风,浏览器禁止不安全页面录音,解决方案如下:
+1、可开启 https 解决;
+2、若无 https 配置则需要修改浏览器安全配置,Chrome 设置如下:
+(1) 地址栏输入chrome://flags/#unsafely-treat-insecure-origin-as-secure;
+(2) 将 http 站点配置在文本框中,例如: http://127.0.0.1:8080。

`, + recorderError: '录音失败', + confirm: '我知道了', + requiredMessage: '请填写所有必填字段', + inputParamMessage1: '请在URL中填写参数', + inputParamMessage2: '的值', + prologueMessage: '抱歉,当前正在维护,无法提供服务,请稍后再试!' + }, + inputPlaceholder: { + speaking: '说话中', + recorderLoading: '转文字中', + default: '请输入问题' + }, + uploadFile: { + label: '上传文件', + most: '最多', + limit: '个,每个文件限制', + fileType: '文件类型', + tipMessage: '请在文件上传配置中选择文件类型', + limitMessage1: '最多上传', + limitMessage2: '个文件', + sizeLimit: '单个文件大小不能超过', + imageMessage: '请解析图片内容', + errorMessage: '上传失败' + }, + executionDetails: { + title: '执行详情', + paramOutputTooltip: '每个文档仅支持预览500字', + audioFile: '语音文件', + searchContent: '检索内容', + searchResult: '检索结果', + conditionResult: '判断结果', + currentChat: '本次对话', + answer: 'AI 回答', + replyContent: '回复内容', + textContent: '文本内容', + input: '输入', + output: '输出', + rerankerContent: '重排内容', + rerankerResult: '重排结果', + paragraph: '分段', + noSubmit: '用户未提交', + errMessage: '错误日志' + }, + KnowledgeSource: { + title: '知识来源', + referenceParagraph: '引用分段', + consume: '消耗tokens', + consumeTime: '耗时' + }, + paragraphSource: { + title: '知识库引用', + question: '用户问题', + optimizationQuestion: '优化后问题' + }, + editTitle: '编辑标题' +} diff --git a/ui/src/locales/lang/zh-CN/common.ts b/ui/src/locales/lang/zh-CN/common.ts new file mode 100644 index 00000000000..db1e7e7318a --- /dev/null +++ b/ui/src/locales/lang/zh-CN/common.ts @@ -0,0 +1,67 @@ +export default { + create: '创建', + createSuccess: '创建成功', + copy: '复制', + copySuccess: '复制成功', + copyError: '复制失败', + save: '保存', + saveSuccess: '保存成功', + delete: '删除', + deleteSuccess: '删除成功', + setting: '设置', + settingSuccess: '设置成功', + submit: '提交', + submitSuccess: '提交成功', + edit: '编辑', + editSuccess: '编辑成功', + modify: '修改', + modifySuccess: '修改成功', + add: '添加', + addSuccess: '添加成功', + cancel: '取消', + confirm: '确定', + tip: '提示', + refresh: '刷新', + search: '搜索', + clear: '清空', + professional: '购买专业版', + createDate: '创建日期', + createTime: '创建时间', + operation: '操作', + character: '字符', + export: '导出', + exportSuccess: '导出成功', + unavailable: '(不可用)', + public: '公有', + private: '私有', + paramSetting: '参数设置', + creator: '创建者', + author: '作者', + debug: '调试', + required: '必填', + noData: '暂无数据', + result: '结果', + fileUpload: { + document: '文档', + image: '图片', + audio: '音频', + video: '视频', + other: '其他文件', + addExtensions: '添加后缀名', + existingExtensionsTip: '文件后缀已存在', + }, + status: { + label: '状态', + enableSuccess: '启用成功', + disableSuccess: '禁用成功' + }, + inputPlaceholder: '请输入', + title: '标题', + content: '内容', + param: { + outputParam: '输出参数', + inputParam: '输入参数', + initParam: '启动参数' + }, + rename: '重命名' +} diff --git a/ui/src/locales/lang/zh-CN/components.ts b/ui/src/locales/lang/zh-CN/components.ts new file mode 100644 index 00000000000..5b871daf12f --- /dev/null +++ b/ui/src/locales/lang/zh-CN/components.ts @@ -0,0 +1,12 @@ +export default { + quickCreatePlaceholder: '快速创建空白文档', + quickCreateName: '文档名称', + noData: '无匹配数据', + loading: '加载中', + noMore: '到底啦!', + selectParagraph: { + title: '选择分段', + error: '仅执行未成功分段', + all: '全部分段' + } +} diff --git a/ui/src/locales/lang/zh-CN/dynamics-form.ts b/ui/src/locales/lang/zh-CN/dynamics-form.ts new file mode 100644 index 00000000000..9814c06eb15 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/dynamics-form.ts @@ -0,0 +1,102 @@ +export default { + input_type_list: { + TextInput: '文本框', + PasswordInput: '密码框', + Slider: '滑块', + SwitchInput: '开关', + SingleSelect: '单选框', + MultiSelect: '多选框', + DatePicker: '日期', + JsonInput: 'JSON文本框', + RadioCard: '选项卡', + RadioRow: '单行选项卡' + }, + default: { + label: '默认值', + placeholder: '请输入默认值', + requiredMessage: '为必填属性', + show: '显示默认值' + }, + tip: { + requiredMessage: '不能为空', + jsonMessage: 'JSON格式不正确' + }, + searchBar: { + placeholder: '请输入关键字搜索' + }, + paramForm: { + field: { + label: '参数', + placeholder: '请输入参数', + requiredMessage: '参数 为必填属性', + requiredMessage2: '只能输入字母数字和下划线' + }, + name: { + label: '显示名称', + placeholder: '请输入显示名称', + requiredMessage: '显示名称 为必填属性' + }, + tooltip: { + label: '参数提示说明', + placeholder: '请输入参数提示说明' + }, + required: { + label: '是否必填', + requiredMessage: '是否必填 为必填属性' + }, + input_type: { + label: '组件类型', + placeholder: '请选择组件类型', + requiredMessage: '组建类型 为必填属性' + } + }, + DatePicker: { + placeholder: '选择日期', + year: '年', + month: '月', + date: '日期', + datetime: '日期时间', + dataType: { + label: '时间类型', + placeholder: '请选择时间类型' + }, + format: { + label: '格式', + placeholder: '请选择格式' + } + }, + Select: { + label: '选项值', + placeholder: '请输入选项值' + }, + tag: { + label: '标签', + placeholder: '请输入选项标签' + }, + Slider: { + showInput: { + label: '是否带输入框' + }, + valueRange: { + label: '取值范围', + minRequired: '最小值必填', + maxRequired: '最大值必填' + }, + step: { + label: '步长值', + requiredMessage1: '步长值必填', + requiredMessage2: '步长不能为0' + } + }, + TextInput: { + length: { + label: '文本长度', + minRequired: '最小长度必填', + maxRequired: '最大长度必填', + requiredMessage1: '长度在', + requiredMessage2: '到', + requiredMessage3: '个字符', + requiredMessage4: '文本长度为必填参数' + } + } +} diff --git a/ui/src/locales/lang/zh-CN/index.ts b/ui/src/locales/lang/zh-CN/index.ts new file mode 100644 index 00000000000..3df42d640e8 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/index.ts @@ -0,0 +1,17 @@ +import zhCn from 'element-plus/es/locale/lang/zh-cn' +import components from './components' +import layout from './layout' +import views from './views' +import common from './common' +import dynamicsForm from './dynamics-form' +import chat from './ai-chat' +export default { + lang: '简体中文', + layout, + views, + components, + zhCn, + common, + dynamicsForm, + chat +} diff --git a/ui/src/locales/lang/zh-CN/layout.ts b/ui/src/locales/lang/zh-CN/layout.ts new file mode 100644 index 00000000000..fbf8e7e934e --- /dev/null +++ b/ui/src/locales/lang/zh-CN/layout.ts @@ -0,0 +1,33 @@ +export default { + github: '项目地址', + wiki: '用户手册', + forum: '论坛求助', + logout: '退出', + apiKey: 'API Key 管理', + apiServiceAddress: 'API 服务地址', + language: '语言', + isExpire: '未上传 License 或 License 已过期。', + about: { + title: '关于', + expiredTime: '到期时间', + edition: { + label: '版本', + community: '社区版', + professional: '专业版' + }, + version: '版本号', + serialNo: '序列号', + remark: '备注', + update: '更新', + authorize: '授权给' + }, + time: { + daysLater: '天后', + hoursLater: '小时后', + expired: '已过期', + expiringSoon: '即将到期' + }, + copyright: '版权所有 © 2014-2025 杭州飞致云信息科技有限公司', + userManualUrl: 'https://maxkb.cn/docs/', + forumUrl: 'https://bbs.fit2cloud.com/c/mk/11' +} diff --git a/ui/src/locales/lang/zh-CN/views/404.ts b/ui/src/locales/lang/zh-CN/views/404.ts new file mode 100644 index 00000000000..a65dcbbd048 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/404.ts @@ -0,0 +1,5 @@ +export default { + title: "404", + message: "无法访问应用", + operate: "返回首页", +}; diff --git a/ui/src/locales/lang/zh-CN/views/application-overview.ts b/ui/src/locales/lang/zh-CN/views/application-overview.ts new file mode 100644 index 00000000000..1f15d2c08c0 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/application-overview.ts @@ -0,0 +1,113 @@ +export default { + title: '概览', + appInfo: { + header: '应用信息', + publicAccessLink: '公开访问链接', + openText: '开', + closeText: '关', + copyLinkText: '复制链接', + refreshLinkText: '刷新链接', + demo: '演示', + embedInWebsite: '嵌入第三方', + accessControl: '访问限制', + displaySetting: '显示设置', + apiAccessCredentials: 'API 访问凭据', + apiKey: 'API Key', + refreshToken: { + msgConfirm1: '是否重新生成公开访问链接?', + msgConfirm2: + '重新生成公开访问链接会影响嵌入第三方脚本变更,需要将新脚本重新嵌入第三方,请谨慎操作!', + refreshSuccess: '刷新成功' + }, + + APIKeyDialog: { + saveSettings: '保存设置', + msgConfirm1: '是否删除API Key', + msgConfirm2: '删除API Key后将无法恢复,请确认是否删除?', + enabledSuccess: '已启用', + disabledSuccess: '已禁用' + }, + EditAvatarDialog: { + title: '应用头像', + customizeUpload: '自定义上传', + upload: '上传', + default: '默认logo', + custom: '自定义', + sizeTip: '建议尺寸 32*32,支持 JPG、PNG、GIF,大小不超过 10 MB', + fileSizeExceeded: '文件大小超过 10 MB', + uploadImagePrompt: '请上传一张图片' + }, + EmbedDialog: { + fullscreenModeTitle: '全屏模式', + copyInstructions: '复制以下代码进行嵌入', + floatingModeTitle: '浮窗模式', + mobileModeTitle: '移动端模式' + }, + LimitDialog: { + showSourceLabel: '显示知识来源', + clientQueryLimitLabel: '每个客户端提问限制', + timesDays: '次/天', + authentication: '身份验证', + authenticationValue: '验证密码', + whitelistLabel: '白名单', + whitelistPlaceholder: + '请输入允许嵌入第三方的源地址,一行一个,如:\nhttp://127.0.0.1:5678\nhttps://dataease.io' + }, + SettingAPIKeyDialog: { + dialogTitle: '设置', + allowCrossDomainLabel: '允许跨域地址', + crossDomainPlaceholder: + '请输入允许的跨域地址,开启后不输入跨域地址则不限制。\n跨域地址一行一个,如:\nhttp://127.0.0.1:5678 \nhttps://dataease.io' + }, + SettingDisplayDialog: { + dialogTitle: '显示设置', + languageLabel: '语言', + showSourceLabel: '显示知识来源', + showExecutionDetail: '显示执行详情', + restoreDefault: '恢复默认', + customThemeColor: '自定义主题色', + headerTitleFontColor: '头部标题字体颜色', + default: '默认', + askUserAvatar: '提问用户头像', + replace: '替换', + imageMessage: '建议尺寸 32*32,支持 JPG、PNG、GIF,大小不超过 10 MB', + AIAvatar: 'AI 回复头像', + display: '显示', + floatIcon: '浮窗入口图标', + iconDefaultPosition: '图标默认位置', + iconPosition: { + left: '左', + right: '右', + bottom: '下', + top: '上' + }, + draggablePosition: '可拖拽位置', + showHistory: '显示历史记录', + displayGuide: '显示引导图(浮窗模式)', + disclaimer: '免责声明', + disclaimerValue: '「以上内容均由 AI 生成,仅供参考和借鉴」' + } + }, + monitor: { + monitoringStatistics: '监控统计', + customRange: '自定义范围', + startDatePlaceholder: '开始时间', + endDatePlaceholder: '结束时间', + pastDayOptions: { + past7Days: '过去7天', + past30Days: '过去30天', + past90Days: '过去90天', + past183Days: '过去半年', + other: '自定义' + }, + charts: { + customerTotal: '用户总数', + customerNew: '用户新增数', + queryCount: '提问次数', + tokensTotal: 'Tokens 总数', + userSatisfaction: '用户满意度', + approval: '赞同', + disapproval: '反对' + } + } +} diff --git a/ui/src/locales/lang/zh-CN/views/application-workflow.ts b/ui/src/locales/lang/zh-CN/views/application-workflow.ts new file mode 100644 index 00000000000..c7c6038cc5f --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/application-workflow.ts @@ -0,0 +1,302 @@ +export default { + node: '节点', + nodeName: '节点名称', + baseComponent: '基础组件', + nodeSetting: '节点设置', + workflow: '工作流', + searchBar: { + placeholder: '按名称搜索' + }, + info: { + previewVersion: '预览版本:', + saveTime: '保存时间:' + }, + setting: { + restoreVersion: '恢复版本', + restoreCurrentVersion: '恢复此版本', + addComponent: '添加组件', + public: '发布', + releaseHistory: '发布历史', + autoSave: '自动保存', + latestRelease: '最近发布', + copyParam: '复制参数', + debug: '调试', + exit: '直接退出', + exitSave: '保存并退出' + }, + tip: { + publicSuccess: '发布成功', + noData: '没有找到相关结果', + nameMessage: '名字不能为空!', + onlyRight: '只允许从右边的锚点连出', + notRecyclable: '不可循环连线', + onlyLeft: '只允许连接左边的锚点', + applicationNodeError: '该应用不可用', + functionNodeError: '该函数不可用', + repeatedNodeError: '节点名称已存在!', + cannotCopy: '不能被复制', + copyError: '已复制节点', + paramErrorMessage: '参数已存在: ', + saveMessage: '当前的更改尚未保存,是否保存后退出?' + }, + delete: { + confirmTitle: '确定删除该节点?', + deleteMessage: '节点不允许删除' + }, + control: { + zoomOut: '缩小', + zoomIn: '放大', + fitView: '适应', + retract: '收起全部节点', + extend: '展开全部节点', + beautify: '一键美化' + }, + variable: { + label: '变量', + global: '全局变量', + Referencing: '引用变量', + ReferencingRequired: '引用变量必填', + ReferencingError: '引用变量错误', + NoReferencing: '不存在的引用变量', + placeholder: '请选择变量' + }, + condition: { + title: '执行条件', + front: '前置', + AND: '所有', + OR: '任一', + text: '连线节点执行完,执行当前节点' + }, + validate: { + startNodeRequired: '开始节点必填', + startNodeOnly: '开始节点只能有一个', + baseNodeRequired: '基本信息节点必填', + baseNodeOnly: '基本信息节点只能有一个', + notInWorkFlowNode: '未在流程中的节点', + noNextNode: '不存在的下一个节点', + nodeUnavailable: '节点不可用', + needConnect1: '节点的', + needConnect2: '分支需要连接', + cannotEndNode: '节点不能当做结束节点' + }, + nodes: { + startNode: { + label: '开始', + question: '用户问题', + currentTime: '当前时间' + }, + baseNode: { + label: '基本信息', + appName: { + label: '应用名称' + }, + appDescription: { + label: '应用描述' + }, + fileUpload: { + label: '文件上传', + tooltip: '开启后,问答页面会显示上传文件的按钮。' + }, + FileUploadSetting: { + title: '文件上传设置', + maxFiles: '单次上传最多文件数', + fileLimit: '每个文件最大(MB)', + fileUploadType: { + label: '上传的文件类型', + documentText: '需要使用“文档内容提取”节点解析文档内容', + imageText: '需要使用“视觉模型”节点解析图片内容', + audioText: '需要使用“语音转文本”节点解析音频内容', + otherText: '需要自行解析该类型文件' + }, + + } + }, + aiChatNode: { + label: 'AI 对话', + text: '与 AI 大模型进行对话', + answer: 'AI 回答内容', + returnContent: { + label: '返回内容', + tooltip: `关闭后该节点的内容则不输出给用户。 + 如果你想让用户看到该节点的输出内容,请打开开关。` + }, + defaultPrompt: '已知信息', + think: '思考过程' + }, + searchDatasetNode: { + label: '知识库检索', + text: '关联知识库,查找与问题相关的分段', + paragraph_list: '检索结果的分段列表', + is_hit_handling_method_list: '满足直接回答的分段列表', + result: '检索结果', + directly_return: '满足直接回答的分段内容', + searchParam: '检索参数', + searchQuestion: { + label: '检索问题', + placeholder: '请选择检索问题', + requiredMessage: '请选择检索问题' + } + }, + questionNode: { + label: '问题优化', + text: '根据历史聊天记录优化完善当前问题,更利于匹配知识库分段', + result: '问题优化结果', + defaultPrompt1: `根据上下文优化和完善用户问题`, + defaultPrompt2: `请输出一个优化后的问题。`, + systemDefault: '你是一个问题优化大师' + }, + conditionNode: { + label: '判断器', + text: '根据不同条件执行不同的节点', + branch_name: '分支名称', + conditions: { + label: '条件', + info: '符合以下', + requiredMessage: '请选择条件' + }, + valueMessage: '请输入值', + addCondition: '添加条件', + addBranch: '添加分支' + }, + replyNode: { + label: '指定回复', + text: '指定回复内容,引用变量会转换为字符串进行输出', + content: '内容', + replyContent: { + label: '回复内容', + custom: '自定义', + reference: '引用变量' + } + }, + rerankerNode: { + label: '多路召回', + text: '使用重排模型对多个知识库的检索结果进行二次召回', + result_list: '重排结果列表', + result: '重排结果', + rerankerContent: { + label: '重排内容', + requiredMessage: '请选择重排内容' + }, + higher: '高于', + ScoreTooltip: 'Score越高相关性越强。', + max_paragraph_char_number: '最大引用字符数', + reranker_model: { + label: '重排模型', + placeholder: '请选择重排模型' + } + }, + formNode: { + label: '表单收集', + text: '在问答过程中用于收集用户信息,可以根据收集到表单数据执行后续流程', + form_content_format1: '你好,请先填写下面表单内容:', + form_content_format2: '填写后请点击【提交】按钮进行提交。', + form_data: '表单全部内容', + formContent: { + label: '表单输出内容', + requiredMessage: '请表单输出内容', + tooltip: '设置执行该节点输出的内容,{ form } 为表单的占位符。' + }, + formAllContent: '表单全部内容', + formSetting: '表单配置' + }, + documentExtractNode: { + label: '文档内容提取', + text: '提取文档中的内容', + content: '文档内容' + }, + imageUnderstandNode: { + label: '图片理解', + text: '识别出图片中的对象、场景等信息回答用户问题', + answer: 'AI 回答内容', + model: { + label: '视觉模型', + requiredMessage: '请选择视觉模型' + }, + image: { + label: '选择图片', + requiredMessage: '请选择图片' + } + }, + variableAssignNode: { + label: '变量赋值', + text: '更新全局变量的值', + assign: '赋值' + }, + mcpNode: { + label: 'MCP 调用', + text: '通过SSE/Streamable HTTP方式执行MCP服务中的工具', + getToolsSuccess: '获取工具成功', + getTool: '获取工具', + tool: '工具', + toolParam: '工具参数', + mcpServerTip: '请输入JSON格式的MCP服务器配置', + mcpToolTip: '请选择工具', + configLabel: 'MCP Server Config (仅支持SSE/Streamable HTTP调用方式)' + }, + imageGenerateNode: { + label: '图片生成', + text: '根据提供的文本内容生成图片', + answer: 'AI 回答内容', + model: { + label: '图片生成模型', + requiredMessage: '请选择图片生成模型' + }, + prompt: { + label: '提示词(正向)', + tooltip: '正向提示词,用来描述生成图像中期望包含的元素和视觉特点' + }, + negative_prompt: { + label: '提示词(负向)', + tooltip: '反向提示词,用来描述不希望在画面中看到的内容,可以对画面进行限制。', + placeholder: '请描述不想生成的图片内容,比如:颜色、血腥内容' + } + }, + speechToTextNode: { + label: '语音转文本', + text: '将音频通过语音识别模型转换为文本', + stt_model: { + label: '语音识别模型' + }, + audio: { + label: '选择语音文件', + placeholder: '请选择语音文件' + } + }, + textToSpeechNode: { + label: '文本转语音', + text: '将文本通过语音合成模型转换为音频', + tts_model: { + label: '语音合成模型' + }, + content: { + label: '选择文本内容' + } + }, + functionNode: { + label: '自定义函数', + text: '通过执行自定义脚本,实现数据处理' + }, + applicationNode: { + label: '应用节点' + } + }, + compare: { + is_null: '为空', + is_not_null: '不为空', + contain: '包含', + not_contain: '不包含', + eq: '等于', + ge: '大于等于', + gt: '大于', + le: '小于等于', + lt: '小于', + len_eq: '长度等于', + len_ge: '长度大于等于', + len_gt: '长度大于', + len_le: '长度小于等于', + len_lt: '长度小于', + is_true: '为真', + is_not_true: '不为真' + }, + FileUploadSetting: {} +} diff --git a/ui/src/locales/lang/zh-CN/views/application.ts b/ui/src/locales/lang/zh-CN/views/application.ts new file mode 100644 index 00000000000..dc9b16216bc --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/application.ts @@ -0,0 +1,216 @@ +export default { + title: '应用', + createApplication: '创建应用', + importApplication: '导入应用', + copyApplication: '复制应用', + workflow: '高级编排', + simple: '简单配置', + searchBar: { + placeholder: '按名称搜索' + }, + setting: { + demo: '演示' + }, + delete: { + confirmTitle: '是否删除应用:', + confirmMessage: '删除后该应用将不再提供服务,请谨慎操作。' + }, + tip: { + ExportError: '导出失败', + professionalMessage: '社区版最多支持 5 个应用,如需拥有更多应用,请升级为专业版。', + saveErrorMessage: '保存失败,请检查输入或稍后再试', + loadingErrorMessage: '加载配置失败,请检查输入或稍后再试' + }, + applicationForm: { + title: { + appTest: '调试预览', + copy: '副本' + }, + form: { + appName: { + label: '名称', + placeholder: '请输入应用名称', + requiredMessage: '请输入应用名称' + }, + appDescription: { + label: '描述', + placeholder: '描述该应用的应用场景及用途,如:XXX 小助手回答用户提出的 XXX 产品使用问题' + }, + appType: { + label: '类型', + simplePlaceholder: '适合新手创建小助手', + workflowPlaceholder: '适合高级用户自定义小助手的工作流' + }, + appTemplate: { + blankApp: '空白应用', + assistantApp: '知识库问答助手' + }, + aiModel: { + label: 'AI 模型', + placeholder: '请选择 AI 模型' + }, + roleSettings: { + label: '系统角色', + placeholder: '你是 xxx 小助手' + }, + prompt: { + label: '提示词', + noReferences: ' (无引用知识库)', + references: ' (引用知识库)', + placeholder: '请输入提示词', + requiredMessage: '请输入提示词', + tooltip: + '通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头,可以使用变量。', + noReferencesTooltip: + '通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头。可以使用变量:{question} 是用户提出问题的占位符。', + referencesTooltip: + '通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头。可以使用变量:{data} 是引用知识库中分段的占位符;{question} 是用户提出问题的占位符。', + defaultPrompt: `已知信息:{data} +用户问题:{question} +回答要求: + - 请使用中文回答用户问题` + }, + historyRecord: { + label: '历史聊天记录' + }, + relatedKnowledge: { + label: '关联知识库', + placeholder: '关联的知识库展示在这里' + }, + multipleRoundsDialogue: '多轮对话', + + prologue: '开场白', + defaultPrologue: + '您好,我是 XXX 小助手,您可以向我提出 XXX 使用问题。\n- XXX 主要功能有什么?\n- XXX 如何收费?\n- 需要转人工服务', + + problemOptimization: { + label: '问题优化', + tooltip: '根据历史聊天优化完善当前问题,更利于匹配知识点。' + }, + voiceInput: { + label: '语音输入', + placeholder: '请选择语音识别模型', + requiredMessage: '请选择语音输入模型', + autoSend: '自动发送' + }, + voicePlay: { + label: '语音播放', + placeholder: '请选择语音合成模型', + requiredMessage: '请选择语音播放模型', + autoPlay: '自动播放', + browser: '浏览器播放(免费)', + tts: 'TTS模型', + listeningTest: '试听' + }, + reasoningContent: { + label: '输出思考', + tooltip: '请根据模型返回的思考标签设置,标签中间的内容将会认定为思考过程', + start: '开始', + end: '结束' + } + }, + buttons: { + publish: '保存并发布', + + addModel: '添加模型' + }, + + dialog: { + addDataset: '添加关联知识库', + addDatasetPlaceholder: '所选知识库必须使用相同的 Embedding 模型', + selected: '已选', + countDataset: '个知识库', + + selectSearchMode: '检索模式', + vectorSearch: '向量检索', + vectorSearchTooltip: '向量检索是一种基于向量相似度的检索方式,适用于知识库中的大数据量场景。', + fullTextSearch: '全文检索', + fullTextSearchTooltip: + '全文检索是一种基于文本相似度的检索方式,适用于知识库中的小数据量场景。', + hybridSearch: '混合检索', + hybridSearchTooltip: + '混合检索是一种基于向量和文本相似度的检索方式,适用于知识库中的中等数据量场景。', + similarityThreshold: '相似度高于', + similarityTooltip: '相似度越高相关性越强。', + topReferences: '引用分段数 TOP', + maxCharacters: '最多引用字符数', + noReferencesAction: '无引用知识库分段时', + continueQuestioning: '继续向 AI 模型提问', + provideAnswer: '指定回答内容', + designated_answer: + '你好,我是 XXX 小助手,我的知识库只包含了 XXX 产品相关知识,请重新描述您的问题。', + defaultPrompt1: + '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在', + defaultPrompt2: '标签中' + } + }, + applicationAccess: { + title: '应用接入', + wecom: '企业微信应用', + wecomTip: '打造企业微信智能应用', + dingtalk: '钉钉应用', + dingtalkTip: '打造钉钉智能应用', + wechat: '公众号', + wechatTip: '打造公众号智能应用', + lark: '飞书应用', + larkTip: '打造飞书智能应用', + slack: 'Slack', + slackTip: '打造 Slack 智能应用', + setting: '配置', + callback: '回调地址', + callbackTip: '请输入回调地址', + wecomPlatform: '企业微信后台', + wechatPlatform: '微信公众平台', + dingtalkPlatform: '钉钉开放平台', + larkPlatform: '飞书开放平台', + wecomSetting: { + title: '企业微信应用配置', + cropId: '企业 ID', + cropIdPlaceholder: '请输入企业 ID', + agentIdPlaceholder: '请输入Agent ID', + secretPlaceholder: '请输入Secret', + tokenPlaceholder: '请输入Token', + encodingAesKeyPlaceholder: '请输入EncodingAESKey', + authenticationSuccessful: '认证成功', + urlInfo: '-应用管理-自建-创建的应用-接收消息-设置 API 接收的 "URL" 中' + }, + dingtalkSetting: { + title: '钉钉应用配置', + clientIdPlaceholder: '请输入Client ID', + clientSecretPlaceholder: '请输入Client Secret', + urlInfo: '-机器人页面,设置 "消息接收模式" 为 HTTP模式 ,并把上面URL填写到"消息接收地址"中' + }, + wechatSetting: { + title: '公众号应用配置', + appId: '开发者ID (APP ID)', + appIdPlaceholder: '请输入开发者ID (APP ID)', + appSecret: '开发者密钥 (APP SECRET)', + appSecretPlaceholder: '请输入开发者密钥 (APP SECRET)', + token: '令牌 (TOKEN)', + tokenPlaceholder: '请输入令牌 (TOKEN)', + aesKey: '消息加解密密钥', + aesKeyPlaceholder: '请输入消息加解密密钥', + urlInfo: '-设置与开发-基本配置-服务器配置的 "服务器地址URL" 中' + }, + larkSetting: { + title: '飞书应用配置', + appIdPlaceholder: '请输入App ID', + appSecretPlaceholder: '请输入App Secret', + verificationTokenPlaceholder: '请输入Verification Token', + urlInfo: '-事件与回调-事件配置-配置订阅方式的 "请求地址" 中', + folderTokenPlaceholder: '请输入Folder Token' + }, + slackSetting: { + title: 'Slack 应用配置', + signingSecretPlaceholder: '请输入 Signing Secret', + botUserTokenPlaceholder: '请输入 Bot User Token' + }, + copyUrl: '复制链接填入到' + }, + hitTest: { + title: '命中测试', + text: '针对用户提问调试段落匹配情况,保障回答效果。', + emptyMessage1: '命中段落显示在这里', + emptyMessage2: '没有命中的分段' + } +} diff --git a/ui/src/locales/lang/zh-CN/views/dataset.ts b/ui/src/locales/lang/zh-CN/views/dataset.ts new file mode 100644 index 00000000000..d9f9271bae5 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/dataset.ts @@ -0,0 +1,94 @@ +export default { + title: '知识库', + createDataset: '创建知识库', + general: '通用型', + web: 'web 站点', + lark: '飞书', + yuque: '语雀', + relatedApplications: '关联应用', + document_count: '文档数', + relatedApp_count: '关联应用', + searchBar: { + placeholder: '按名称搜索' + }, + setting: { + vectorization: '向量化', + sync: '同步' + }, + tip: { + professionalMessage: '社区版最多支持 50 个知识库,如需拥有更多知识库,请升级为专业版。', + syncSuccess: '同步任务发送成功', + updateModeMessage: '修改知识库向量模型后,需要对知识库向量化,是否继续保存?' + }, + delete: { + confirmTitle: '是否删除知识库:', + confirmMessage1: '此知识库关联', + confirmMessage2: '个应用,删除后无法恢复,请谨慎操作。' + }, + + datasetForm: { + title: { + info: '基本信息' + }, + form: { + datasetName: { + label: '知识库名称', + placeholder: '请输入知识库名称', + requiredMessage: '请输入知识库名称' + }, + datasetDescription: { + label: '知识库描述', + placeholder: + '描述知识库的内容,详尽的描述将帮助AI能深入理解该知识库的内容,能更准确的检索到内容,提高该知识库的命中率。', + requiredMessage: '请输入知识库描述' + }, + EmbeddingModel: { + label: '向量模型', + placeholder: '请选择向量模型', + requiredMessage: '请输入Embedding模型' + }, + datasetType: { + label: '知识库类型', + generalInfo: '通过上传文件或手动录入构建知识库', + webInfo: '通过网站链接构建知识库', + larkInfo: '通过飞书文档构建知识库', + yuqueInfo: '通过语雀文档构建知识库' + }, + source_url: { + label: 'Web 根地址', + placeholder: '请输入 Web 根地址', + requiredMessage: ' 请输入 Web 根地址' + }, + user_id: { + requiredMessage: '请输入User ID' + }, + token: { + requiredMessage: '请输入Token' + }, + selector: { + label: '选择器', + placeholder: '默认为 body,可输入 .classname/#idname/tagname' + } + } + }, + ResultSuccess: { + title: '知识库创建成功', + paragraph: '分段', + paragraph_count: '个分段', + documentList: '文档列表', + loading: '导入中', + buttons: { + toDataset: '返回知识库列表', + toDocument: '前往文档' + } + }, + syncWeb: { + title: '同步知识库', + syncMethod: '同步方式', + replace: '替换同步', + replaceText: '重新获取 Web 站点文档,覆盖替换本地知识库中的文档', + complete: '整体同步', + completeText: '先删除本地知识库所有文档,重新获取 Web 站点文档', + tip: '注意:所有同步都会删除已有数据重新获取新数据,请谨慎操作。' + } +} diff --git a/ui/src/locales/lang/zh-CN/views/document.ts b/ui/src/locales/lang/zh-CN/views/document.ts new file mode 100644 index 00000000000..bfdf2907ea4 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/document.ts @@ -0,0 +1,176 @@ +export default { + uploadDocument: '上传文档', + importDocument: '导入文档', + syncDocument: '同步文档', + selected: '已选', + items: '项', + searchBar: { + placeholder: '按 文档名称 搜索' + }, + setting: { + migration: '迁移', + cancelGenerateQuestion: '取消生成问题', + cancelVectorization: '取消向量化', + cancelGenerate: '取消生成', + export: '导出' + }, + tip: { + saveMessage: '当前的更改尚未保存,确认退出吗?', + cancelSuccess: '批量取消成功', + sendMessage: '发送成功', + vectorizationSuccess: '批量向量化成功', + nameMessage: '文件名称不能为空!', + importMessage: '导入成功', + migrationSuccess: '迁移成功' + }, + upload: { + selectFile: '选择文件', + selectFiles: '选择文件夹', + uploadMessage: '拖拽文件至此上传或', + formats: '支持格式:', + requiredMessage: '请上传文件', + errorMessage1: '文件大小超过 100MB', + errorMessage2: '文件格式不支持', + errorMessage3: '文件不能为空', + errorMessage4: '每次最多上传50个文件', + template: '模版', + download: '下载' + }, + + fileType: { + txt: { + label: '文本文件', + tip1: '1、文件上传前,建议规范文件的分段标识', + tip2: '2、每次最多上传 50 个文件,每个文件不超过 100MB' + }, + table: { + label: '表格', + tip1: '1、点击下载对应模版并完善信息:', + tip2: '2、第一行必须是列标题,且列标题必须是有意义的术语,表中每条记录将作为一个分段', + tip3: '3、上传的表格文件中每个 sheet 会作为一个文档,sheet名称为文档名称', + tip4: '4、每次最多上传 50 个文件,每个文件不超过 100MB' + }, + QA: { + label: 'QA 问答对', + tip1: '1、点击下载对应模版并完善信息', + tip2: '2、上传的表格文件中每个 sheet 会作为一个文档,sheet名称为文档名称', + tip3: '3、每次最多上传 50 个文件,每个文件不超过 100MB' + }, + lark: {} + }, + setRules: { + title: { + setting: '设置分段规则', + preview: '分段预览' + }, + intelligent: { + label: '智能分段(推荐)', + text: '不了解如何设置分段规则推荐使用智能分段' + }, + advanced: { + label: '高级分段', + text: '用户可根据文档规范自行设置分段标识符、分段长度以及清洗规则' + }, + patterns: { + label: '分段标识', + tooltip: '按照所选符号先后顺序做递归分割,分割结果超出分段长度将截取至分段长度。', + placeholder: '请选择' + }, + limit: { + label: '分段长度' + }, + with_filter: { + label: '自动清洗', + text: '去掉重复多余符号空格、空行、制表符' + }, + checkedConnect: { + label: '导入时添加分段标题为关联问题(适用于标题为问题的问答对)' + } + }, + buttons: { + prev: '上一步', + next: '下一步', + import: '开始导入', + preview: '生成预览' + }, + table: { + name: '文件名称', + char_length: '字符数', + paragraph: '分段', + all: '全部', + updateTime: '更新时间' + }, + fileStatus: { + label: '文件状态', + SUCCESS: '成功', + FAILURE: '失败', + EMBEDDING: '索引中', + PENDING: '排队中', + GENERATE: '生成中', + SYNC: '同步中', + REVOKE: '取消中', + finish: '完成' + }, + enableStatus: { + label: '启用状态', + enable: '开启', + close: '关闭' + }, + sync: { + label: '同步', + confirmTitle: '确认同步文档?', + confirmMessage1: '同步将删除已有数据重新获取新数据,请谨慎操作。', + confirmMessage2: '无法同步,请先去设置文档 URL地址', + successMessage: '同步文档成功' + }, + delete: { + confirmTitle1: '是否批量删除', + confirmTitle2: '个文档?', + confirmMessage: '所选文档中的分段会跟随删除,请谨慎操作。', + successMessage: '批量删除成功', + confirmTitle3: '是否删除文档:', + confirmMessage1: '此文档下的', + confirmMessage2: '个分段都会被删除,请谨慎操作。' + }, + form: { + source_url: { + label: '文档地址', + placeholder: '请输入文档地址,一行一个,地址不正确文档会导入失败。', + requiredMessage: '请输入文档地址' + }, + selector: { + label: '选择器', + placeholder: '默认为 body,可输入 .classname/#idname/tagname' + }, + hit_handling_method: { + label: '命中处理方式', + tooltip: '用户提问时,命中文档下的分段时按照设置的方式进行处理。' + }, + similarity: { + label: '相似度高于', + placeholder: '直接返回分段内容', + requiredMessage: '请输入相似度' + } + }, + hitHandlingMethod: { + optimization: '模型优化', + directly_return: '直接回答' + }, + generateQuestion: { + title: '生成问题', + successMessage: '生成问题成功', + tip1: '提示词中的 {data} 为分段内容的占位符,执行时替换为分段内容发送给 AI 模型;', + tip2: 'AI 模型根据分段内容生成相关问题,请将生成的问题放至', + tip3: '标签中,系统会自动关联标签中的问题;', + tip4: '生成效果依赖于所选模型和提示词,用户可自行调整至最佳效果。', + prompt1: `内容:{data}\n\n请总结上面的内容,并根据内容总结生成 5 个问题。\n回答要求:\n- 请只输出问题;\n- 请将每个问题放置`, + prompt2: `标签中。` + }, + feishu: { + selectDocument: '选择文档', + tip1: '支持文档和表格类型,包含TXT、Markdown、PDF、DOCX、HTML、XLS、XLSX、CSV、ZIP格式;', + tip2: '系统不存储原始文档,导入文档前,建议规范文档的分段标识。', + allCheck: '全选', + errorMessage1: '请选择文档' + } +} diff --git a/ui/src/locales/lang/zh-CN/views/function-lib.ts b/ui/src/locales/lang/zh-CN/views/function-lib.ts new file mode 100644 index 00000000000..573d7b57edf --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/function-lib.ts @@ -0,0 +1,78 @@ +export default { + title: '函数库', + internalTitle: '内置函数', + added: '已添加', + createFunction: '创建函数', + editFunction: '编辑函数', + copyFunction: '复制函数', + importFunction: '导入函数', + searchBar: { + placeholder: '按函数名称搜索' + }, + setting: { + disabled: '禁用' + }, + tip: { + saveMessage: '当前的更改尚未保存,确认退出吗?' + }, + delete: { + confirmTitle: '是否删除函数:', + confirmMessage: '删除后,引用了该函数的应用提问时会报错 ,请谨慎操作。' + }, + disabled: { + confirmTitle: '是否禁用函数:', + confirmMessage: '禁用后,引用了该函数的应用提问时会报错 ,请谨慎操作。' + }, + functionForm: { + title: { + copy: '副本', + baseInfo: '基础信息' + }, + form: { + functionName: { + label: '名称', + name: '函数名称', + placeholder: '请输入函数名称', + requiredMessage: '请输入函数名称' + }, + functionDescription: { + label: '描述', + placeholder: '请输入函数的描述' + }, + permission_type: { + label: '权限', + requiredMessage: '请选择' + }, + paramName: { + label: '参数名', + placeholder: '请输入参数名', + requiredMessage: '请输入参数名' + }, + dataType: { + label: '数据类型' + }, + source: { + label: '来源', + custom: '自定义', + reference: '引用参数' + }, + required: { + label: '是否必填' + }, + param: { + paramInfo1: '使用函数时显示', + paramInfo2: '使用函数时不显示', + code: '函数内容(Python)', + selectPlaceholder: '请选择参数', + inputPlaceholder: '请输入参数值' + }, + debug: { + run: '运行', + output: '输出', + runResult: '运行结果', + runSuccess: '运行成功', + runFailed: '运行失败' + } + } + } +} diff --git a/ui/src/locales/lang/zh-CN/views/index.ts b/ui/src/locales/lang/zh-CN/views/index.ts new file mode 100644 index 00000000000..b8c85b0310e --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/index.ts @@ -0,0 +1,34 @@ +import notFound from './404' +import application from './application' +import applicationOverview from './application-overview' +import dataset from './dataset' +import system from './system' +import functionLib from './function-lib' +import user from './user' +import team from './team' +import template from './template' +import document from './document' +import paragraph from './paragraph' +import problem from './problem' +import log from './log' +import applicationWorkflow from './application-workflow' +import login from './login' +import operateLog from './operate-log' +export default { + notFound, + application, + applicationOverview, + dataset, + system, + functionLib, + user, + team, + template, + document, + paragraph, + problem, + log, + applicationWorkflow, + login, + operateLog +} diff --git a/ui/src/locales/lang/zh-CN/views/log.ts b/ui/src/locales/lang/zh-CN/views/log.ts new file mode 100644 index 00000000000..c866a955192 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/log.ts @@ -0,0 +1,41 @@ +export default { + title: '对话日志', + delete: { + confirmTitle: '是否删除问题:', + confirmMessage1: '删除问题关联的', + confirmMessage2: '个分段会被取消关联,请谨慎操作。' + }, + buttons: { + clearStrategy: '清除策略', + prev: '上一条', + next: '下一条' + }, + table: { + abstract: '摘要', + chat_record_count: '对话提问数', + user: '用户', + feedback: { + label: '用户反馈', + star: '赞同', + trample: '反对' + }, + mark: '改进标注', + recenTimes: '最近对话时间' + }, + addToDataset: '添加至知识库', + daysText: '天之前的对话记录', + selectDataset: '选择知识库', + selectDatasetPlaceholder: '请选择知识库', + saveToDocument: '保存至文档', + documentPlaceholder: '请选择文档', + editContent: '修改内容', + editMark: '修改标注', + form: { + content: { + placeholder: '请输入内容' + }, + title: { + placeholder: '请给当前内容设置一个标题,以便管理查看' + } + } +} diff --git a/ui/src/locales/lang/zh-CN/views/login.ts b/ui/src/locales/lang/zh-CN/views/login.ts new file mode 100644 index 00000000000..c3ccf725e3a --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/login.ts @@ -0,0 +1,24 @@ +export default { + title: '普通登录', + jump_tip: '即将跳转至认证源页面进行认证', + jump: '跳转', + resetPassword: '修改密码', + forgotPassword: '忘记密码', + userRegister: '用户注册', + buttons: { + login: '登录', + register: '注册', + backLogin: '返回登录', + checkCode: '立即验证' + }, + newPassword: '新密码', + enterPassword: '请输入修改密码', + useEmail: '使用邮箱', + moreMethod: '更多登录方式', + verificationCode: { + placeholder: '请输入验证码', + getVerificationCode: '获取验证码', + successMessage: '验证码发送成功', + resend: '重新发送' + } +} diff --git a/ui/src/locales/lang/zh-CN/views/operate-log.ts b/ui/src/locales/lang/zh-CN/views/operate-log.ts new file mode 100644 index 00000000000..3bb1d87d30f --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/operate-log.ts @@ -0,0 +1,31 @@ +export default { + title: '操作日志', + table: { + menu: { + label: '操作菜单' + }, + operate: { + label: '操作', + detail: '操作详情' + }, + user: { + label: '操作用户' + }, + status: { + label: '状态', + success: '成功', + fail: '失败', + all: '全部' + }, + ip_address: { + label: 'IP地址' + }, + opt: { + label: 'API详情' + }, + operateTime: { + label: '操作时间' + } + }, + close: '关闭' +} diff --git a/ui/src/locales/lang/zh-CN/views/paragraph.ts b/ui/src/locales/lang/zh-CN/views/paragraph.ts new file mode 100644 index 00000000000..cf38b908f8b --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/paragraph.ts @@ -0,0 +1,32 @@ +export default { + title: '段落', + paragraph_count: '段落', + editParagraph: '编辑分段', + addParagraph: '添加分段', + paragraphDetail: '分段详情', + character_count: '个字符', + setting: { + batchSelected: '批量选择', + cancelSelected: '取消选择' + }, + delete: { + confirmTitle: '是否删除段落:', + confirmMessage: '删除后无法恢复,请谨慎操作。' + }, + relatedProblem: { + title: '关联问题', + placeholder: '请选择问题' + }, + form: { + paragraphTitle: { + label: '分段标题', + placeholder: '请输入分段标题' + }, + content: { + label: '分段内容', + placeholder: '请输入分段内容', + requiredMessage1: '请输入分段内容', + requiredMessage2: '内容最多不超过 100000 个字' + } + } +} diff --git a/ui/src/locales/lang/zh-CN/views/problem.ts b/ui/src/locales/lang/zh-CN/views/problem.ts new file mode 100644 index 00000000000..bb53275aa28 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/problem.ts @@ -0,0 +1,37 @@ +export default { + title: '问题', + createProblem: '创建问题', + detailProblem: '问题详情', + quickCreateProblem: '快速创建问题', + quickCreateName: '问题', + tip: { + placeholder: '请输入问题,支持输入多个,一行一个。', + errorMessage: '问题不能为空!', + requiredMessage: '请输入问题', + relatedSuccess:'批量关联分段成功' + }, + + setting: { + batchDelete: '批量删除', + cancelRelated: '取消关联' + }, + searchBar: { + placeholder: '按名称搜索' + }, + table: { + paragraph_count: '关联分段数', + updateTime: '更新时间' + }, + delete: { + confirmTitle: '是否删除问题:', + confirmMessage1: '删除问题关联的', + confirmMessage2: '个分段会被取消关联,请谨慎操作。' + }, + relateParagraph: { + title: '关联分段', + selectDocument: '选择文档', + placeholder: '按 文档名称 搜索', + selectedParagraph: '已选分段', + count: '个' + }, +} diff --git a/ui/src/locales/lang/zh-CN/views/system.ts b/ui/src/locales/lang/zh-CN/views/system.ts new file mode 100644 index 00000000000..72624d26a48 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/system.ts @@ -0,0 +1,152 @@ +export default { + title: '系统管理', + subTitle: '系统设置', + test: '测试连接', + testSuccess: '测试连接成功', + testFailed: '测试连接失败', + password: '密码', + authentication: { + title: '登录认证', + ldap: { + title: 'LDAP', + address: 'LDAP 地址', + serverPlaceholder: '请输入LDAP 地址', + bindDN: '绑定DN', + bindDNPlaceholder: '请输入绑定 DN', + ou: '用户OU', + ouPlaceholder: '请输入用户 OU', + ldap_filter: '用户过滤器', + ldap_filterPlaceholder: '请输入用户过滤器', + ldap_mapping: 'LDAP 属性映射', + ldap_mappingPlaceholder: '请输入 LDAP 属性映射', + enableAuthentication: '启用 LDAP 认证' + }, + cas: { + title: 'CAS', + ldpUri: 'ldpUri', + ldpUriPlaceholder: '请输入ldpUri', + validateUrl: '验证地址', + validateUrlPlaceholder: '请输入验证地址', + redirectUrl: '回调地址', + redirectUrlPlaceholder: '请输入回调地址', + enableAuthentication: '启用 CAS 认证' + }, + oidc: { + title: 'OIDC', + authEndpoint: '授权端地址', + authEndpointPlaceholder: '请输入授权端地址', + tokenEndpoint: 'Token端地址', + tokenEndpointPlaceholder: '请输入 Token 端地址', + userInfoEndpoint: '用户信息端地址', + userInfoEndpointPlaceholder: '请输入用户信息端地址', + scopePlaceholder: '请输入连接范围', + clientId: '客户端 ID', + clientIdPlaceholder: '请输入客户端 ID', + clientSecret: '客户端密钥', + clientSecretPlaceholder: '请输入客户端密钥', + logoutEndpoint: '注销端地址', + logoutEndpointPlaceholder: '请输入注销端地址', + redirectUrl: '回调地址', + redirectUrlPlaceholder: '请输入回调地址', + enableAuthentication: '启用 OIDC 认证' + }, + + oauth2: { + title: 'OAuth2', + authEndpoint: '授权端地址', + authEndpointPlaceholder: '请输入授权端地址', + tokenEndpoint: 'Token 端地址', + tokenEndpointPlaceholder: '请输入 Token 端地址', + userInfoEndpoint: '用户信息端地址', + userInfoEndpointPlaceholder: '请输入用户信息端地址', + scope: '连接范围', + scopePlaceholder: '请输入连接范围', + clientId: '客户端 ID', + clientIdPlaceholder: '请输入客户端 ID', + clientSecret: '客户端密钥', + clientSecretPlaceholder: '请输入客户端密钥', + redirectUrl: '回调地址', + redirectUrlPlaceholder: '请输入回调地址', + filedMapping: '字段映射', + filedMappingPlaceholder: '请输入字段映射', + enableAuthentication: '启用 OAuth2 认证' + }, + scanTheQRCode: { + title: '扫码登录', + wecom: '企业微信', + dingtalk: '钉钉', + lark: '飞书', + effective: '有效', + alreadyTurnedOn: '已开启', + notEnabled: '未开启', + validate: '校验', + validateSuccess: '校验成功', + validateFailed: '校验失败', + validateFailedTip: '请填写所有必填项并确保格式正确', + appKeyPlaceholder: '请输入 App Key', + appSecretPlaceholder: '请输入 App Secret', + corpIdPlaceholder: '请输入 Corp Id', + agentIdPlaceholder: '请输入 Agent Id', + callbackWarning: '请输入有效的 URL 地址', + larkQrCode: '飞书扫码登录', + dingtalkQrCode: '钉钉扫码登录', + setting: '设置', + access: '接入' + } + }, + theme: { + title: '外观设置', + platformDisplayTheme: '平台显示主题', + customTheme: '自定义主题', + platformLoginSettings: '平台登录设置', + custom: '自定义', + pagePreview: '页面预览', + default: '默认', + restoreDefaults: '恢复默认', + orange: '活力橙', + green: '松石绿', + purple: '神秘紫', + red: '胭脂红', + loginBackground: '登录背景图', + loginLogo: '登录 Logo', + websiteLogo: '网站 Logo', + replacePicture: '替换图片', + websiteLogoTip: '顶部网站显示的 Logo,建议尺寸 48*48,支持 JPG、PNG、GIF,大小不超过 10MB', + loginLogoTip: '登录页面右侧 Logo,建议尺寸 204*52,支持 JPG、PNG、GIF,大小不超过 10 MB', + loginBackgroundTip: + '左侧背景图,矢量图建议尺寸 576*900,位图建议尺寸 1152*1800;支持 JPG、PNG、GIF,大小不超过 10 MB', + websiteName: '网站名称', + websiteNamePlaceholder: '请输入网站名称', + websiteNameTip: '显示在网页 Tab 的平台名称', + websiteSlogan: '欢迎语', + websiteSloganPlaceholder: '请输入欢迎语', + websiteSloganTip: '产品 Logo 下的欢迎语', + defaultSlogan: '欢迎使用 MaxKB 开源 AI 助手', + logoDefaultTip: '默认为 MaxKB 登录界面,支持自定义设置', + defaultTip: '默认为 MaxKB 平台界面,支持自定义设置', + platformSetting: '平台设置', + showUserManual: '显示用户手册', + showForum: '显示论坛求助', + showProject: '显示项目地址', + urlPlaceholder: '请输入 URL 地址', + abandonUpdate: '放弃更新', + saveAndApply: '保存并应用', + fileMessageError: '文件大小超过 10M', + saveSuccess: '外观设置成功' + }, + email: { + title: '邮箱设置', + smtpHost: 'SMTP Host', + smtpHostPlaceholder: '请输入 SMTP Host', + smtpPort: 'SMTP Port', + smtpPortPlaceholder: '请输入 SMTP Port', + smtpUser: 'SMTP 账户', + smtpUserPlaceholder: '请输入 SMTP 账户', + sendEmail: '发件人邮箱', + sendEmailPlaceholder: '请输入发件人邮箱', + smtpPassword: '发件人密码', + smtpPasswordPlaceholder: '请输入发件人密码', + enableSSL: '启用 SSL(如果 SMTP 端口是 465,通常需要启用 SSL)', + enableTLS: '启用 TLS(如果 SMTP 端口是 587,通常需要启用 TLS)' + } +} diff --git a/ui/src/locales/lang/zh-CN/views/team.ts b/ui/src/locales/lang/zh-CN/views/team.ts new file mode 100644 index 00000000000..58a2bc30cf7 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/team.ts @@ -0,0 +1,31 @@ +export default { + title: '团队成员', + member: '成员', + manage: '所有者', + permissionSetting: '权限设置', + addMember: '添加成员', + addSubTitle: '成员登录后可以访问到您授权的数据。', + searchBar: { + placeholder: '请输入用户名搜索' + }, + delete: { + button: '移除', + confirmTitle: '是否移除成员:', + confirmMessage: '移除后将会取消成员拥有的知识库和应用权限。' + }, + setting: { + management: '管理', + check: '查看' + }, + teamForm: { + form: { + userName: { + label: '用户名/邮箱', + placeholder: '请输入成员的用户名或邮箱', + requiredMessage: '请输入用户名/邮箱' + }, + + }, + + } +} diff --git a/ui/src/locales/lang/zh-CN/views/template.ts b/ui/src/locales/lang/zh-CN/views/template.ts new file mode 100644 index 00000000000..84241990d4e --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/template.ts @@ -0,0 +1,83 @@ +export default { + title: '模型设置', + provider: '供应商', + providerPlaceholder: '选择供应商', + addModel: '添加模型', + searchBar: { + placeholder: '按名称搜索' + }, + delete: { + confirmTitle: '删除模型', + confirmMessage: '是否删除模型:' + }, + tip: { + createSuccessMessage: '创建模型成功', + createErrorMessage: '基础信息有填写错误', + errorMessage: '变量已存在: ', + emptyMessage1: '请先选择基础信息的模型类型和基础模型', + emptyMessage2: '所选模型不支持参数设置', + updateSuccessMessage: '修改模型成功', + saveSuccessMessage: '模型参数保存成功', + downloadError: '下载失败', + noModel: '模型在Ollama不存在' + }, + model: { + allModel: '全部模型', + publicModel: '公有模型', + privateModel: '私有模型', + LLM: '大语言模型', + EMBEDDING: '向量模型', + RERANKER: '重排模型', + STT: '语音识别', + TTS: '语音合成', + IMAGE: '视觉模型', + TTI: '图片生成' + }, + templateForm: { + title: { + baseInfo: '基础信息', + advancedInfo: '高级设置', + modelParams: '模型参数', + editParam: '编辑参数', + addParam: '添加参数', + paramSetting: '模型参数设置', + apiParamPassing: '接口传参' + }, + form: { + templateName: { + label: '模型名称', + placeholder: '请给基础模型设置一个名称', + tooltip: 'MaxKB 中自定义的模型名称', + requiredMessage: '模型名称不能为空' + }, + permissionType: { + label: '权限', + privateDesc: '仅当前用户使用', + publicDesc: '所有用户都可使用', + requiredMessage: '权限不能为空' + }, + model_type: { + label: '模型类型', + placeholder: '请选择模型类型', + tooltip1: '大语言模型:在应用中与AI对话的推理模型。', + tooltip2: '向量模型:在知识库中对文档内容进行向量化的模型。', + tooltip3: '语音识别:在应用中开启语音识别后用于语音转文字的模型。', + tooltip4: '语音合成:在应用中开启语音播放后用于文字转语音的模型。', + tooltip5: '重排模型:在高级编排应用中使用多路召回时,对候选分段进行重新排序的模型。', + tooltip6: '视觉模型:在高级编排应用中用于图片理解的视觉模型。', + tooltip7: '图片生成:在高级编排应用中用于图片生成的视觉模型。', + requiredMessage: '模型类型不能为空' + }, + base_model: { + label: '基础模型', + tooltip: '列表中未列出的模型,直接输入模型名称,回车即可添加', + placeholder: '自定义输入基础模型后回车即可', + requiredMessage: '基础模型不能为空' + } + } + }, + download: { + downloading: '正在下载中', + cancelDownload: '取消下载' + } +} diff --git a/ui/src/locales/lang/zh-CN/views/user.ts b/ui/src/locales/lang/zh-CN/views/user.ts new file mode 100644 index 00000000000..191074c0c06 --- /dev/null +++ b/ui/src/locales/lang/zh-CN/views/user.ts @@ -0,0 +1,72 @@ +export default { + title: '用户管理', + createUser: '创建用户', + editUser: '编辑用户', + setting: { + updatePwd: '修改用户密码' + }, + tip: { + professionalMessage: '社区版最多支持 2 个用户,如需拥有更多用户,请升级为专业版。', + updatePwdSuccess: '修改用户密码成功' + }, + delete: { + confirmTitle: '是否删除用户:', + confirmMessage: '删除用户,该用户创建的资源(应用、知识库、模型)都会删除,请谨慎操作。' + }, + disabled: { + confirmTitle: '是否禁用函数:', + confirmMessage: '禁用后,引用了该函数的应用提问时会报错 ,请谨慎操作。' + }, + userForm: { + form: { + username: { + label: '用户名', + placeholder: '请输入用户名', + requiredMessage: '请输入用户名', + lengthMessage: '长度在 6 到 20 个字符' + }, + captcha: { + label: '验证码', + placeholder: '请输入验证码' + }, + nick_name: { + label: '姓名', + placeholder: '请输入姓名' + }, + email: { + label: '邮箱', + placeholder: '请输入邮箱', + requiredMessage: '请输入邮箱', + validatorEmail: '请输入有效邮箱格式!' + }, + phone: { + label: '手机号', + placeholder: '请输入手机号' + }, + password: { + label: '登录密码', + placeholder: '请输入密码', + requiredMessage: '请输入密码', + lengthMessage: '长度在 6 到 20 个字符' + }, + new_password: { + label: '新密码', + placeholder: '请输入新密码', + requiredMessage: '请输入新密码' + }, + re_password: { + label: '确认密码', + placeholder: '请输入确认密码', + requiredMessage: '请输入确认密码', + validatorMessage: '密码不一致' + } + } + }, + source: { + label: '用户类型', + local: '系统用户', + wecom: '企业微信', + lark: '飞书', + dingtalk: '钉钉' + } +} diff --git a/ui/src/locales/lang/zh-Hant/ai-chat.ts b/ui/src/locales/lang/zh-Hant/ai-chat.ts new file mode 100644 index 00000000000..75f9949a6dc --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/ai-chat.ts @@ -0,0 +1,97 @@ +export default { + noHistory: '暫無歷史記錄', + createChat: '新建對話', + history: '歷史記錄', + only20history: '僅顯示最近 20 條對話', + question_count: '條提問', + exportRecords: '導出聊天記錄', + chatId: '對話 ID', + userInput: '用戶輸入', + quote: '引用', + download: '點擊下載文件', + transcribing: '轉文字中', + passwordValidator: { + title: '請輸入密碼打開連結', + errorMessage1: '密碼不能為空', + errorMessage2: '密碼錯誤' + }, + operation: { + play: '點擊播放', + pause: '停止', + regeneration: '換個答案', + like: '贊同', + cancelLike: '取消贊同', + oppose: '反對', + cancelOppose: '取消反對', + continue: '繼續', + stopChat: '停止回答', + startChat: '開始對話' + }, + tip: { + error500Message: '抱歉,當前正在維護,無法提供服務,請稍後再試!', + errorIdentifyMessage: '無法識別用戶身份', + errorLimitMessage: '抱歉,您的提問已達最大限制,請明天再來吧!', + answerMessage: '抱歉,沒有查找到相關內容,請重新描述您的問題或提供更多資訊。', + stopAnswer: '已停止回答', + answerLoading: '回答中', + recorderTip: `

該功能需要使用麥克風,瀏覽器禁止不安全頁面錄音,解決方案如下:
+1、可開啟 https 解決;
+2、若無 https 配置則需要修改瀏覽器安全配置,Chrome 設定如下:
+(1) 地址欄輸入 chrome://flags/#unsafely-treat-insecure-origin-as-secure;
+(2) 將 http 站點配置在文字框中,例如: http://127.0.0.1:8080。

`, + recorderError: '錄音失敗', + confirm: '我知道了', + requiredMessage: '請填寫所有必填欄位', + inputParamMessage1: '請在 URL 中填寫參數', + inputParamMessage2: '的值', + prologueMessage: '抱歉,當前正在維護,無法提供服務,請稍後再試!' + }, + inputPlaceholder: { + speaking: '說話中', + recorderLoading: '轉文字中', + default: '請輸入問題' + }, + uploadFile: { + label: '上傳文件', + most: '最多', + limit: '個,每個文件限制', + fileType: '文件類型', + tipMessage: '請在文件上傳配置中選擇文件類型', + limitMessage1: '最多上傳', + limitMessage2: '個文件', + sizeLimit: '單個文件大小不能超過', + imageMessage: '請解析圖片內容', + errorMessage: '上傳失敗' + }, + executionDetails: { + title: '執行詳細', + paramOutputTooltip: '每個文件僅支持預覽 500 字', + audioFile: '語音文件', + searchContent: '檢索內容', + searchResult: '檢索結果', + conditionResult: '判斷結果', + currentChat: '本次對話', + answer: 'AI 回答', + replyContent: '回覆內容', + textContent: '文本內容', + input: '輸入', + output: '輸出', + rerankerContent: '重排內容', + rerankerResult: '重排結果', + paragraph: '段落', + noSubmit: '用戶未提交', + errMessage: '錯誤日誌' + }, + KnowledgeSource: { + title: '知識來源', + referenceParagraph: '引用段落', + consume: '消耗tokens', + consumeTime: '耗時' + }, + paragraphSource: { + title: '知識庫引用', + question: '用戶問題', + optimizationQuestion: '優化後問題' + }, + editTitle: '編輯標題' +} diff --git a/ui/src/locales/lang/zh-Hant/common.ts b/ui/src/locales/lang/zh-Hant/common.ts new file mode 100644 index 00000000000..8e6293076c9 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/common.ts @@ -0,0 +1,67 @@ +export default { + create: '創建', + createSuccess: '創建成功', + copy: '複製', + copySuccess: '複製成功', + copyError: '複製失敗', + save: '儲存', + saveSuccess: '儲存成功', + delete: '刪除', + deleteSuccess: '刪除成功', + setting: '設定', + settingSuccess: '設定成功', + submit: '提交', + submitSuccess: '提交成功', + edit: '編輯', + editSuccess: '編輯成功', + modify: '修改', + modifySuccess: '修改成功', + add: '添加', + addSuccess: '添加成功', + cancel: '取消', + confirm: '確認', + tip: '提示', + refresh: '重新整理', + search: '搜尋', + clear: '清除', + professional: '購買專業版', + createDate: '創建日期', + createTime: '創建時間', + operation: '操作', + character: '字符', + export: '匯出', + exportSuccess: '匯出成功', + unavailable: '(不可用)', + public: '公有', + private: '私有', + paramSetting: '參數設定', + creator: '建立者', + author: '作者', + debug: '調試', + required: '必填', + noData: '暂无数据', + result: '結果', + fileUpload: { + document: '文檔', + image: '圖片', + audio: '音頻', + video: '視頻', + other: '其他文件', + addExtensions: '添加後綴名', + existingExtensionsTip: '文件後綴已存在', + }, + status: { + label: '狀態', + enableSuccess: '啟用成功', + disableSuccess: '停用成功' + }, + inputPlaceholder: '請輸入', + title: '標題', + content: '内容', + param: { + outputParam: '輸出參數', + inputParam: '輸入參數', + initParam: '啟動參數' + }, + rename: '重命名' +} diff --git a/ui/src/locales/lang/zh-Hant/components.ts b/ui/src/locales/lang/zh-Hant/components.ts new file mode 100644 index 00000000000..da25a3709d2 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/components.ts @@ -0,0 +1,12 @@ +export default { + quickCreatePlaceholder: '快速創建空白文檔', + quickCreateName: '文檔名稱', + noData: '無匹配数据', + loading: '加載中', + noMore: '到底啦!', + selectParagraph: { + title: '選擇分段', + error: '僅執行未成功分段', + all: '全部分段' + } +} diff --git a/ui/src/locales/lang/zh-Hant/dynamics-form.ts b/ui/src/locales/lang/zh-Hant/dynamics-form.ts new file mode 100644 index 00000000000..e75c2393d18 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/dynamics-form.ts @@ -0,0 +1,102 @@ +export default { + input_type_list: { + TextInput: '文字框', + PasswordInput: '密文框', + Slider: '滑桿', + SwitchInput: '開關', + SingleSelect: '單選框', + MultiSelect: '多選框', + DatePicker: '日期選擇器', + JsonInput: 'JSON文字框', + RadioCard: '選項卡', + RadioRow: '單行選項卡' + }, + default: { + label: '預設值', + placeholder: '請輸入預設值', + requiredMessage: '為必填屬性', + show: '顯示預設值' + }, + tip: { + requiredMessage: '不能為空', + jsonMessage: 'JSON格式不正確' + }, + searchBar: { + placeholder: '請輸入關鍵字搜索' + }, + paramForm: { + field: { + label: '參數', + placeholder: '請輸入參數', + requiredMessage: '參數 為必填屬性', + requiredMessage2: '只能輸入字母、數字和底線' + }, + name: { + label: '顯示名稱', + placeholder: '請輸入顯示名稱', + requiredMessage: '顯示名稱 為必填屬性' + }, + tooltip: { + label: '參數提示說明', + placeholder: '請輸入參數提示說明' + }, + required: { + label: '是否必填', + requiredMessage: '是否必填 為必填屬性' + }, + input_type: { + label: '組件類型', + placeholder: '請選擇組件類型', + requiredMessage: '組件類型 為必填屬性' + } + }, + DatePicker: { + placeholder: '選擇日期', + year: '年', + month: '月', + date: '日期', + datetime: '日期時間', + dataType: { + label: '時間類型', + placeholder: '請選擇時間類型' + }, + format: { + label: '格式', + placeholder: '請選擇格式' + } + }, + Select: { + label: '選項值', + placeholder: '請輸入選項值' + }, + tag: { + label: '標籤', + placeholder: '請輸入選項標籤' + }, + Slider: { + showInput: { + label: '是否帶輸入框' + }, + valueRange: { + label: '取值範圍', + minRequired: '最小值必填', + maxRequired: '最大值必填' + }, + step: { + label: '步長值', + requiredMessage1: '步長值必填', + requiredMessage2: '步長不能為0' + } + }, + TextInput: { + length: { + label: '文字長度', + minRequired: '最小長度必填', + maxRequired: '最大長度必填', + requiredMessage1: '長度在', + requiredMessage2: '到', + requiredMessage3: '個字元', + requiredMessage4: '文字長度為必填參數' + } + } +} diff --git a/ui/src/locales/lang/zh-Hant/index.ts b/ui/src/locales/lang/zh-Hant/index.ts new file mode 100644 index 00000000000..eb809475d7e --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/index.ts @@ -0,0 +1,17 @@ +import zhTw from 'element-plus/es/locale/lang/zh-tw' +import components from './components' +import layout from './layout' +import views from './views' +import common from './common' +import dynamicsForm from './dynamics-form' +import chat from './ai-chat' +export default { + lang: '繁體中文', + layout, + common, + views, + components, + zhTw, + dynamicsForm, + chat +} diff --git a/ui/src/locales/lang/zh-Hant/layout.ts b/ui/src/locales/lang/zh-Hant/layout.ts new file mode 100644 index 00000000000..2f8662f84a1 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/layout.ts @@ -0,0 +1,34 @@ +export default { + github: '項目地址', + wiki: '使用者手冊', + forum: '論壇求助', + logout: '退出', + + apiKey: 'API Key 管理', + apiServiceAddress: 'API 服務地址', + language: '語言', + isExpire: '未上傳 License 或 License 已過期。', + about: { + title: '關於', + expiredTime: '到期時間', + edition: { + label: '版本', + community: '社群版', + professional: '專業版' + }, + version: '版本號', + serialNo: '序列號', + remark: '備註', + update: '更新', + authorize: '授權給' + }, + time: { + daysLater: '天後', + hoursLater: '小時後', + expired: '已過期', + expiringSoon: '即將到期' + }, + copyright: '版權所有 © 2014-2025 杭州飛致雲信息科技有限公司', + userManualUrl:'https://maxkb.cn/docs/', + forumUrl: 'https://github.com/1Panel-dev/MaxKB/discussions' +} diff --git a/ui/src/locales/lang/zh-Hant/views/404.ts b/ui/src/locales/lang/zh-Hant/views/404.ts new file mode 100644 index 00000000000..e85fc220646 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/404.ts @@ -0,0 +1,5 @@ +export default { + title: '404', + message: '無法訪問應用', + operate: '返回首頁' +} diff --git a/ui/src/locales/lang/zh-Hant/views/application-overview.ts b/ui/src/locales/lang/zh-Hant/views/application-overview.ts new file mode 100644 index 00000000000..b2711298bda --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/application-overview.ts @@ -0,0 +1,113 @@ +export default { + title: '概覽', + appInfo: { + header: '應用資訊', + publicAccessLink: '公開訪問連結', + openText: '開', + closeText: '關', + copyLinkText: '複製連結', + refreshLinkText: '重新整理連結', + demo: '示範', + embedInWebsite: '嵌入第三方', + accessControl: '訪問限制', + displaySetting: '顯示設定', + apiAccessCredentials: 'API 存取憑證', + apiKey: 'API Key', + refreshToken: { + msgConfirm1: '是否重新產生公開訪問連結?', + msgConfirm2: + '重新產生公開訪問連結會影響嵌入第三方腳本變更,需要將新腳本重新嵌入第三方,請謹慎操作!', + refreshSuccess: '重新整理成功' + }, + APIKeyDialog: { + saveSettings: '儲存設定', + msgConfirm1: '是否刪除API Key', + msgConfirm2: '刪除API Key後將無法恢復,請確認是否刪除?', + enabledSuccess: '已啟用', + disabledSuccess: '已停用' + }, + EditAvatarDialog: { + title: '應用頭像', + customizeUpload: '自訂上傳', + upload: '上傳', + default: '預設logo', + custom: '自訂', + sizeTip: '建議尺寸 32*32,支援 JPG、PNG、GIF,大小不超過 10 MB', + fileSizeExceeded: '檔案大小超過 10 MB', + uploadImagePrompt: '請上傳一張圖片' + }, + EmbedDialog: { + fullscreenModeTitle: '全螢幕模式', + copyInstructions: '複製以下程式碼進行嵌入', + floatingModeTitle: '浮窗模式', + mobileModeTitle: '移動端模式' + }, + LimitDialog: { + dialogTitle: '訪問限制', + showSourceLabel: '顯示知識來源', + clientQueryLimitLabel: '每個用戶端提問限制', + timesDays: '次/天', + authentication: '身份驗證', + authenticationValue: '驗證密碼', + whitelistLabel: '白名單', + whitelistPlaceholder: + '請輸入允許嵌入第三方的來源位址,一行一個,如:\nhttp://127.0.0.1:5678\nhttps://dataease.io' + }, + SettingAPIKeyDialog: { + dialogTitle: '設定', + allowCrossDomainLabel: '允許跨域位址', + crossDomainPlaceholder: + '請輸入允許的跨域位址,開啟後不輸入跨域位址則不限制。\n跨域位址一行一個,如:\nhttp://127.0.0.1:5678 \nhttps://dataease.io' + }, + SettingDisplayDialog: { + dialogTitle: '顯示設定', + languageLabel: '語言', + showSourceLabel: '顯示知識來源', + showExecutionDetail: '顯示執行細節', + restoreDefault: '恢復預設', + customThemeColor: '自訂主題色', + headerTitleFontColor: '標頭標題字體顏色', + default: '預設', + askUserAvatar: '提問用戶頭像', + replace: '取代', + display: '顯示', + imageMessage: '建議尺寸 32*32,支援 JPG、PNG、GIF,大小不超過 10 MB', + AIAvatar: 'AI 回覆頭像', + floatIcon: '浮窗入口圖示', + iconDefaultPosition: '圖示預設位置', + iconPosition: { + left: '左', + right: '右', + bottom: '下', + top: '上' + }, + draggablePosition: '可拖曳位置', + showHistory: '顯示歷史紀錄', + displayGuide: '顯示引導圖(浮窗模式)', + disclaimer: '免責聲明', + disclaimerValue: '「以上內容均由 AI 生成,僅供參考和借鏡」' + } + }, + monitor: { + monitoringStatistics: '監控統計', + customRange: '自訂範圍', + startDatePlaceholder: '開始時間', + endDatePlaceholder: '結束時間', + pastDayOptions: { + past7Days: '過去7天', + past30Days: '過去30天', + past90Days: '過去90天', + past183Days: '過去半年', + other: '自訂义' + }, + charts: { + customerTotal: '用戶總數', + customerNew: '用戶新增數', + queryCount: '提問次數', + tokensTotal: 'Tokens 總數', + userSatisfaction: '用戶滿意度', + approval: '贊同', + disapproval: '反對' + } + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/application-workflow.ts b/ui/src/locales/lang/zh-Hant/views/application-workflow.ts new file mode 100644 index 00000000000..c2a32c9db51 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/application-workflow.ts @@ -0,0 +1,301 @@ +export default { + node: '節點', + nodeName: '節點名稱', + baseComponent: '基礎組件', + nodeSetting: '節點設置', + workflow: '工作流', + searchBar: { + placeholder: '按名稱搜索' + }, + info: { + previewVersion: '預覽版本:', + saveTime: '保存時間:' + }, + setting: { + restoreVersion: '恢復版本', + restoreCurrentVersion: '恢復此版本', + addComponent: '添加組件', + public: '發布', + releaseHistory: '發布歷史', + autoSave: '自動保存', + latestRelease: '最近發布', + copyParam: '複製參數', + debug: '調試', + exit: '直接退出', + exitSave: '保存並退出' + }, + tip: { + publicSuccess: '發布成功', + noData: '沒有找到相關結果', + nameMessage: '名字不能為空!', + onlyRight: '只允許從右邊的錨點連出', + notRecyclable: '不可循環連線', + onlyLeft: '只允許連接左邊的錨點', + applicationNodeError: '該應用不可用', + functionNodeError: '該函數不可用', + repeatedNodeError: '節點名稱已存在!', + cannotCopy: '不能被複製', + copyError: '已複製節點', + paramErrorMessage: '參數已存在: ', + saveMessage: '當前修改未保存,是否保存後退出?' + }, + delete: { + confirmTitle: '確定刪除該節點?', + deleteMessage: '節點不允許刪除' + }, + control: { + zoomOut: '縮小', + zoomIn: '放大', + fitView: '適應', + retract: '收起全部節點', + extend: '展開全部節點', + beautify: '一鍵美化' + }, + variable: { + label: '變量', + global: '全局變量', + Referencing: '引用變量', + ReferencingRequired: '引用變量必填', + ReferencingError: '引用變量錯誤', + NoReferencing: '不存在的引用變量', + placeholder: '請選擇變量' + }, + condition: { + title: '執行條件', + front: '前置', + AND: '所有', + OR: '任一', + text: '連線節點執行完,執行當前節點' + }, + validate: { + startNodeRequired: '開始節點必填', + startNodeOnly: '開始節點只能有一個', + baseNodeRequired: '基本信息節點必填', + baseNodeOnly: '基本信息節點只能有一個', + notInWorkFlowNode: '未在流程中的節點', + noNextNode: '不存在的下一個節點', + nodeUnavailable: '節點不可用', + needConnect1: '節點的', + needConnect2: '分支需要連接', + cannotEndNode: '節點不能當做結束節點' + }, + nodes: { + startNode: { + label: '開始', + question: '用戶問題', + currentTime: '當前時間' + }, + baseNode: { + label: '基本信息', + appName: { + label: '應用名稱' + }, + appDescription: { + label: '應用描述' + }, + fileUpload: { + label: '文件上傳', + tooltip: '開啟後,問答頁面會顯示上傳文件的按鈕。' + }, + FileUploadSetting: { + title: '文件上傳設置', + maxFiles: '單次上傳最多文件數', + fileLimit: '每個文件最大(MB)', + fileUploadType: { + label: '上傳的文件類型', + documentText: '需要使用「文檔內容提取」節點解析文檔內容', + imageText: '需要使用「圖片理解」節點解析圖片內容', + audioText: '需要使用「語音轉文本」節點解析音頻內容', + otherText: '需要自行解析該類型文件' + } + } + }, + aiChatNode: { + label: 'AI 對話', + text: '與 AI 大模型進行對話', + answer: 'AI 回答內容', + returnContent: { + label: '返回內容', + tooltip: `關閉後該節點的內容則不輸出給用戶。 + 如果你想讓用戶看到該節點的輸出內容,請打開開關。` + }, + defaultPrompt: '已知信息', + think: '思考過程' + }, + searchDatasetNode: { + label: '知識庫檢索', + text: '關聯知識庫,查找與問題相關的分段', + paragraph_list: '檢索結果的分段列表', + is_hit_handling_method_list: '滿足直接回答的分段列表', + result: '檢索結果', + directly_return: '滿足直接回答的分段內容', + searchParam: '檢索參數', + searchQuestion: { + label: '檢索問題', + placeholder: '請選擇檢索問題', + requiredMessage: '請選擇檢索問題' + } + }, + questionNode: { + label: '問題優化', + text: '根據歷史聊天記錄優化完善當前問題,更利於匹配知識庫分段', + result: '問題優化結果', + defaultPrompt1: `根據上下文優化和完善用戶問題:`, + defaultPrompt2: `請輸出一個優化後的問題。`, + systemDefault: '你是一個問題優化大師' + }, + conditionNode: { + label: '判斷器', + text: '根據不同條件執行不同的節點', + branch_name: '分支名稱', + conditions: { + label: '條件', + info: '符合以下', + requiredMessage: '請選擇條件' + }, + valueMessage: '請輸入值', + addCondition: '添加條件', + addBranch: '添加分支' + }, + replyNode: { + label: '指定回覆', + text: '指定回覆內容,引用變量會轉換為字符串進行輸出', + content: '內容', + replyContent: { + label: '回覆內容', + custom: '自定義', + reference: '引用變量' + } + }, + rerankerNode: { + label: '多路召回', + text: '使用重排模型對多個知識庫的檢索結果進行二次召回', + result_list: '重排結果列表', + result: '重排結果', + rerankerContent: { + label: '重排內容', + requiredMessage: '請選擇重排內容' + }, + higher: '高於', + ScoreTooltip: 'Score越高相關性越強。', + max_paragraph_char_number: '最大引用字符數', + reranker_model: { + label: '重排模型', + placeholder: '請選擇重排模型' + } + }, + formNode: { + label: '表單收集', + text: '在問答過程中用於收集用戶信息,可以根據收集到表單數據執行後續流程', + form_content_format1: '你好,請先填寫下面表單內容:', + form_content_format2: '填寫後請點擊【提交】按鈕進行提交。', + form_data: '表單全部內容', + formContent: { + label: '表單輸出內容', + requiredMessage: '請表單輸出內容', + tooltip: '設置執行該節點輸出的內容,{ form } 為表單的佔位符。' + }, + formAllContent: '表單全部內容', + formSetting: '表單配置' + }, + documentExtractNode: { + label: '文檔內容提取', + text: '提取文檔中的內容', + content: '文檔內容' + }, + imageUnderstandNode: { + label: '圖片理解', + text: '識別出圖片中的物件、場景等信息回答用戶問題', + answer: 'AI 回答內容', + model: { + label: '圖片理解模型', + requiredMessage: '請選擇圖片理解模型' + }, + image: { + label: '選擇圖片', + requiredMessage: '請選擇圖片' + } + }, + variableAssignNode: { + label: '變數賦值', + text: '更新全域變數的值', + assign: '賦值' + }, + mcpNode: { + label: 'MCP 調用', + text: '透過SSE/Streamable HTTP方式執行MCP服務中的工具', + getToolsSuccess: '獲取工具成功', + getTool: '獲取工具', + tool: '工具', + toolParam: '工具變數', + mcpServerTip: '請輸入JSON格式的MCP服務器配置', + mcpToolTip: '請選擇工具', + configLabel: 'MCP Server Config (僅支持SSE/Streamable HTTP調用方式)' + }, + imageGenerateNode: { + label: '圖片生成', + text: '根據提供的文本內容生成圖片', + answer: 'AI 回答內容', + model: { + label: '圖片生成模型', + requiredMessage: '請選擇圖片生成模型' + }, + prompt: { + label: '提示詞(正向)', + tooltip: '正向提示詞,用來描述生成圖像中期望包含的元素和視覺特點' + }, + negative_prompt: { + label: '提示詞(負向)', + tooltip: '反向提示詞,用來描述不希望在畫面中看到的內容,可以對畫面進行限制。', + placeholder: '請描述不想生成的圖片內容,比如:顏色、血腥內容' + } + }, + speechToTextNode: { + label: '語音轉文本', + text: '將音頻通過語音識別模型轉換為文本', + stt_model: { + label: '語音識別模型' + }, + audio: { + label: '選擇語音文件', + placeholder: '請選擇語音文件' + } + }, + textToSpeechNode: { + label: '文本轉語音', + text: '將文本通過語音合成模型轉換為音頻', + tts_model: { + label: '語音合成模型' + }, + content: { + label: '選擇文本內容' + } + }, + functionNode: { + label: '自定義函數', + text: '通過執行自定義腳本,實現數據處理' + }, + applicationNode: { + label: '應用節點' + } + }, + compare: { + is_null: '為空', + is_not_null: '不為空', + contain: '包含', + not_contain: '不包含', + eq: '等於', + ge: '大於等於', + gt: '大於', + le: '小於等於', + lt: '小於', + len_eq: '長度等於', + len_ge: '長度大於等於', + len_gt: '長度大於', + len_le: '長度小於等於', + len_lt: '長度小於', + is_true: '為真', + is_not_true: '不為真' + }, + FileUploadSetting: {} +} diff --git a/ui/src/locales/lang/zh-Hant/views/application.ts b/ui/src/locales/lang/zh-Hant/views/application.ts new file mode 100644 index 00000000000..3b6f1756ed7 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/application.ts @@ -0,0 +1,215 @@ +export default { + title: '應用', + createApplication: '建立應用', + importApplication: '匯入應用', + copyApplication: '複製應用', + workflow: '進階編排', + simple: '簡單配置', + searchBar: { + placeholder: '按名稱搜尋' + }, + setting: { + demo: '示範' + }, + delete: { + confirmTitle: '是否刪除應用:', + confirmMessage: '刪除後該應用將不再提供服務,請謹慎操作。' + }, + tip: { + ExportError: '匯出失敗', + professionalMessage: '社群版最多支援 5 個應用,如需擁有更多應用,請升級為專業版。', + saveErrorMessage: '儲存失敗,請檢查輸入或稍後再試', + loadingErrorMessage: '載入配置失敗,請檢查輸入或稍後再試' + }, + applicationForm: { + title: { + appTest: '調試預覽', + copy: '副本' + }, + form: { + appName: { + label: '名稱', + placeholder: '請輸入應用名稱', + requiredMessage: '請輸入應用名稱' + }, + appDescription: { + label: '描述', + placeholder: '描述該應用的應用場景及用途,如:XXX 小助手回答用戶提出的 XXX 產品使用問題' + }, + appType: { + label: '類型', + simplePlaceholder: '適合新手建立小助手', + workflowPlaceholder: '適合高階用戶自訂小助手的工作流程' + }, + appTemplate: { + blankApp: '空白應用', + assistantApp: '知識庫問答助手' + }, + aiModel: { + label: 'AI 模型', + placeholder: '請選擇 AI 模型' + }, + roleSettings: { + label: '角色設定', + placeholder: '你是 xxx 小助手' + }, + prompt: { + label: '提示詞', + noReferences: ' (無引用知識庫)', + references: ' (引用知識庫)', + placeholder: '請輸入提示詞', + requiredMessage: '請輸入提示詞', + tooltip: '透過調整提示詞內容,可以引導大模型對話方向,該提示詞會被固定在上下文的開頭。', + + noReferencesTooltip: + '透過調整提示詞內容,可以引導大模型對話方向,該提示詞會被固定在上下文的開頭。可以使用變數:{question} 是用戶提出問題的佔位符。', + referencesTooltip: + '透過調整提示詞內容,可以引導大模型對話方向,該提示詞會被固定在上下文的開頭。可以使用變數:{data} 是引用知識庫中分段的佔位符;{question} 是用戶提出問題的佔位符。', + defaultPrompt: `已知資訊:{data} +用戶問題:{question} +回答要求: + - 請使用中文回答用戶問題` + }, + historyRecord: { + label: '歷史對話紀錄' + }, + relatedKnowledge: { + label: '關聯知識庫', + placeholder: '關聯的知識庫展示在這裡' + }, + multipleRoundsDialogue: '多輪對話', + + prologue: '開場白', + defaultPrologue: + '您好,我是 XXX 小助手,您可以向我提出 XXX 使用問題。\n- XXX 主要功能有什麼?\n- XXX 如何收費?\n- 需要轉人工服務', + + problemOptimization: { + label: '問題優化', + tooltip: '根據歷史對話優化完善當前問題,更利於匹配知識點。' + }, + voiceInput: { + label: '語音輸入', + placeholder: '請選擇語音辨識模型', + requiredMessage: '請選擇語音輸入模型', + autoSend: '自動發送' + }, + voicePlay: { + label: '語音播放', + placeholder: '請選擇語音合成模型', + requiredMessage: '請選擇語音播放模型', + autoPlay: '自動播放', + browser: '瀏覽器播放(免費)', + tts: 'TTS模型', + listeningTest: '試聽' + }, + reasoningContent: { + label: '輸出思考', + tooltip: '請根據模型返回的思考標簽設置,標簽中間的內容將會認定爲思考過程', + start: '開始', + end: '結束' + } + }, + buttons: { + publish: '儲存並發佈', + addModel: '新增模型' + }, + + dialog: { + addDataset: '新增關聯知識庫', + addDatasetPlaceholder: '所選知識庫必須使用相同的 Embedding 模型', + selected: '已選', + countDataset: '個知識庫', + + selectSearchMode: '檢索模式', + vectorSearch: '向量檢索', + vectorSearchTooltip: '向量檢索是一種基於向量相似度的檢索方式,適用於知識庫中的大數據量場景。', + fullTextSearch: '全文檢索', + fullTextSearchTooltip: + '全文檢索是一種基於文本相似度的檢索方式,適用於知識庫中的小數據量場景。', + hybridSearch: '混合檢索', + hybridSearchTooltip: + '混合檢索是一種基於向量和文本相似度的檢索方式,適用於知識庫中的中等數據量場景。', + similarityThreshold: '相似度高於', + similarityTooltip: '相似度越高相關性越強。', + topReferences: '引用分段數 TOP', + maxCharacters: '最多引用字元數', + noReferencesAction: '無引用知識庫分段時', + continueQuestioning: '繼續向 AI 模型提問', + provideAnswer: '指定回答內容', + designated_answer: + '你好,我是 XXX 小助手,我的知識庫只包含了 XXX 產品相關知識,請重新描述您的問題。', + defaultPrompt1: + '()裡面是用戶問題,根據上下文回答揣測用戶問題({question}) 要求: 輸出一個補全問題,並且放在', + defaultPrompt2: '標籤中' + } + }, + applicationAccess: { + title: '應用接入', + wecom: '企業微信應用', + wecomTip: '打造企業微信智慧應用', + dingtalk: '釘釘應用', + dingtalkTip: '打造釘釘智慧應用', + wechat: '公眾號', + wechatTip: '打造公眾號智慧應用', + lark: '飛書應用', + larkTip: '打造飛書智慧應用', + slack: 'Slack', + slackTip: '打造 Slack 智慧應用', + setting: '配置', + callback: '回呼位址', + callbackTip: '請輸入回呼位址', + wecomPlatform: '企業微信後台', + wechatPlatform: '微信公众平台', + dingtalkPlatform: '釘釘開放平台', + larkPlatform: '飛書開放平台', + wecomSetting: { + title: '企業微信應用配置', + cropId: '企業 ID', + cropIdPlaceholder: '請輸入企業 ID', + agentIdPlaceholder: '請輸入Agent ID', + secretPlaceholder: '請輸入Secret', + tokenPlaceholder: '請輸入Token', + encodingAesKeyPlaceholder: '請輸入EncodingAESKey', + authenticationSuccessful: '認證成功', + urlInfo: '-應用管理-自建-建立的應用-接收消息-設定 API 接收的 "URL" 中' + }, + dingtalkSetting: { + title: '釘釘應用配置', + clientIdPlaceholder: '請輸入Client ID', + clientSecretPlaceholder: '請輸入Client Secret', + urlInfo: '-機器人頁面,設定 "消息接收模式" 為 HTTP模式 ,並把上面URL填寫到"消息接收位址"中' + }, + wechatSetting: { + title: '公眾號應用配置', + appId: '開發者ID (APP ID)', + appIdPlaceholder: '請輸入開發者ID (APP ID)', + appSecret: '開發者密鑰 (APP SECRET)', + appSecretPlaceholder: '請輸入開發者密鑰 (APP SECRET)', + token: '權杖 (TOKEN)', + tokenPlaceholder: '請輸入權杖 (TOKEN)', + aesKey: '消息加解密密鑰', + aesKeyPlaceholder: '請輸入消息加解密密鑰', + urlInfo: '-設定與開發-基本配置-伺服器配置的 "伺服器位址URL" 中' + }, + larkSetting: { + title: '飛書應用配置', + appIdPlaceholder: '請輸入App ID', + appSecretPlaceholder: '請輸入App Secret', + verificationTokenPlaceholder: '請輸入Verification Token', + urlInfo: '-事件與回呼-事件配置-配置訂閱方式的 "請求位址" 中', + folderTokenPlaceholder: '請輸入Folder Token' + }, + slackSetting: { + title: 'Slack 應用配置', + signingSecretPlaceholder: '請輸入 Signing Secret', + botUserTokenPlaceholder: '請輸入 Bot User Token' + }, + copyUrl: '複製連結填入到' + }, + hitTest: { + title: '命中測試', + text: '針對用戶提問調試段落匹配情況,保障回答效果。', + emptyMessage1: '命中的段落顯示在這裡', + emptyMessage2: '沒有命中的分段' + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/dataset.ts b/ui/src/locales/lang/zh-Hant/views/dataset.ts new file mode 100644 index 00000000000..39a5ad7b075 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/dataset.ts @@ -0,0 +1,93 @@ +export default { + title: '知識庫', + createDataset: '建立知識庫', + general: '通用型', + web: 'Web 站點', + lark: '飛書', + yuque: '語雀', + relatedApplications: '關聯應用', + document_count: '文檔數', + relatedApp_count: '關聯應用', + searchBar: { + placeholder: '按名稱搜尋' + }, + setting: { + vectorization: '向量化', + sync: '同步' + }, + tip: { + professionalMessage: '社群版最多支援 50 個知識庫,如需擁有更多知識庫,請升級為專業版。', + syncSuccess: '同步任務發送成功', + updateModeMessage: '修改知識庫向量模型後,需要對知識庫向量化,是否繼續保存?' + }, + delete: { + confirmTitle: '是否刪除知識庫:', + confirmMessage1: '此知識庫關聯', + confirmMessage2: '個應用,刪除後無法恢復,請謹慎操作。' + }, + datasetForm: { + title: { + info: '基本資訊' + }, + form: { + datasetName: { + label: '知識庫名稱', + placeholder: '請輸入知識庫名稱', + requiredMessage: '請輸入知識庫名稱' + }, + datasetDescription: { + label: '知識庫描述', + placeholder: + '描述知識庫的內容,詳盡的描述將幫助AI能深入理解該知識庫的內容,能更準確的檢索到內容,提高該知識庫的命中率。', + requiredMessage: '請輸入知識庫描述' + }, + EmbeddingModel: { + label: '向量模型', + placeholder: '請選擇向量模型', + requiredMessage: '請輸入Embedding模型' + }, + datasetType: { + label: '知識庫類型', + generalInfo: '透過上傳檔案或手動錄入建置知識庫', + webInfo: '透過網站連結建立知識庫', + larkInfo: '透過飛書文檔建構知識庫', + yuqueInfo: '透過語雀文件建構知識庫' + }, + source_url: { + label: 'Web 根位址', + placeholder: '請輸入 Web 根位址', + requiredMessage: '請輸入 Web 根位址' + }, + user_id: { + requiredMessage: '請輸入 User ID' + }, + token: { + requiredMessage: '請輸入 Token' + }, + selector: { + label: '選擇器', + placeholder: '預設為 body,可輸入 .classname/#idname/tagname' + } + } + }, + ResultSuccess: { + title: '知識庫建立成功', + paragraph: '段落', + paragraph_count: '個段落', + documentList: '文件列表', + loading: '正在導入', + buttons: { + toDataset: '返回知識庫列表', + toDocument: '前往文件' + } + }, + syncWeb: { + title: '同步知識庫', + syncMethod: '同步方式', + replace: '替換同步', + replaceText: '重新獲取 Web 站點文件,覆蓋替換本地知識庫中的文件', + complete: '完整同步', + completeText: '先刪除本地知識庫所有文件,重新獲取 Web 站點文件', + tip: '注意:所有同步都會刪除現有數據並重新獲取新數據,請謹慎操作。' + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/document.ts b/ui/src/locales/lang/zh-Hant/views/document.ts new file mode 100644 index 00000000000..adfc8cc463b --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/document.ts @@ -0,0 +1,175 @@ +export default { + uploadDocument: '上傳文檔', + importDocument: '導入文檔', + syncDocument: '同步文檔', + selected: '已選', + items: '項', + searchBar: { + placeholder: '按 文檔名稱 搜索' + }, + setting: { + migration: '遷移', + cancelGenerateQuestion: '取消生成問題', + cancelVectorization: '取消向量化', + cancelGenerate: '取消生成', + export: '匯出' + }, + tip: { + saveMessage: '當前的更改尚未保存,確認退出嗎?', + cancelSuccess: '批量取消成功', + sendMessage: '發送成功', + vectorizationSuccess: '批量向量化成功', + nameMessage: '文件名稱不能为空!', + importMessage: '導入成功', + migrationSuccess: '遷移成功' + }, + upload: { + selectFile: '選擇文件', + selectFiles: '選擇文件夾', + uploadMessage: '拖拽文件至此上傳或', + formats: '支持格式:', + requiredMessage: '請上傳文件', + errorMessage1: '文件大小超過 100MB', + errorMessage2: '文件格式不支持', + errorMessage3: '文件不能为空', + errorMessage4: '每次最多上傳50個文件', + template: '模板', + download: '下載' + }, + + fileType: { + txt: { + label: '文本文件', + tip1: '1、文件上傳前,建議規範文件的分段標識', + tip2: '2、每次最多上傳 50 個文件,每個文件不超过 100MB' + }, + table: { + label: '表格', + tip1: '1、點擊下載對應模板並完善信息:', + tip2: '2、第一行必須是列標題,且列標題必須是有意義的術語,表中每條記錄將作為一個分段', + tip3: '3、上傳的表格文件中每個 sheet 會作為一個文檔,sheet 名稱為文檔名稱', + tip4: '4、每次最多上傳 50 個文件,每個文件不超过 100MB' + }, + QA: { + label: 'QA 問答對', + tip1: '1、點擊下載對應模板並完善信息', + tip2: '2、上傳的表格文件中每個 sheet 會作為一個文檔,sheet 名稱為文檔名稱', + tip3: '3、每次最多上傳 50 個文件,每個文件不超过 100MB' + } + }, + setRules: { + title: { + setting: '設置分段規則', + preview: '分段預覽' + }, + intelligent: { + label: '智能分段(推薦)', + text: '不了解如何設置分段規則推薦使用智能分段' + }, + advanced: { + label: '高級分段', + text: '用戶可根據文檔規範自行設置分段標識符、分段長度以及清洗規則' + }, + patterns: { + label: '分段標識', + tooltip: '按照所選符號先後順序做遞歸分割,分割結果超出分段長度將截取至分段長度。', + placeholder: '請選擇' + }, + limit: { + label: '分段長度' + }, + with_filter: { + label: '自動清洗', + text: '去掉重複多餘符號空格、空行、制表符' + }, + checkedConnect: { + label: '導入時添加分段標題為關聯問題(適用於標題為問題的問答對)' + } + }, + buttons: { + prev: '上一步', + next: '下一步', + import: '開始導入', + preview: '生成預覽' + }, + table: { + name: '文件名稱', + char_length: '字符數', + paragraph: '分段', + all: '全部', + updateTime: '更新時間' + }, + fileStatus: { + label: '文件狀態', + SUCCESS: '成功', + FAILURE: '失敗', + EMBEDDING: '索引中', + PENDING: '排隊中', + GENERATE: '生成中', + SYNC: '同步中', + REVOKE: '取消中', + finish: '完圓' + }, + enableStatus: { + label: '啟用狀態', + enable: '開啟', + close: '關閉' + }, + sync: { + label: '同步', + confirmTitle: '確認同步文檔?', + confirmMessage1: '同步將刪除已有數據重新獲取新數據,請謹慎操作。', + confirmMessage2: '無法同步,請先去設置文檔 URL地址', + successMessage: '同步文檔成功' + }, + delete: { + confirmTitle1: '是否批量刪除', + confirmTitle2: '個文檔?', + confirmMessage: '所選文檔中的分段會跟隨刪除,請謹慎操作。', + successMessage: '批量刪除成功', + confirmTitle3: '是否刪除文檔:', + confirmMessage1: '此文檔下的', + confirmMessage2: '個分段都會被刪除,請謹慎操作。' + }, + form: { + source_url: { + label: '文檔地址', + placeholder: '請輸入文檔地址,一行一個,地址不正確文檔會導入失敗。', + requiredMessage: '請輸入文檔地址' + }, + selector: { + label: '選擇器', + placeholder: '默認為 body,可輸入 .classname/#idname/tagname' + }, + hit_handling_method: { + label: '命中處理方式', + tooltip: '用戶提問時,命中文檔下的分段時按照設置的方式進行處理。' + }, + similarity: { + label: '相似度高于', + placeholder: '直接返回分段内容', + requiredMessage: '请输入相似度' + } + }, + hitHandlingMethod: { + optimization: '模型優化', + directly_return: '直接回答' + }, + generateQuestion: { + title: '生成問題', + successMessage: '生成問題成功', + tip1: '提示詞中的 {data} 為分段內容的佔位符,執行時替換為分段內容並發送給 AI 模型;', + tip2: 'AI 模型根據分段內容生成相關問題,請將生成的問題放置於', + tip3: '標籤中,系統會自動關聯標籤中的問題;', + tip4: '生成效果取決於所選模型和提示詞,用戶可自行調整至最佳效果。', + prompt1: `內容:{data}\n\n請總結上面的內容,並根據內容總結生成 5 個問題。\n回答要求:\n - 請只輸出問題;\n - 請將每個問題放置在`, + prompt2: `標籤中。` + }, + feishu: { + selectDocument: '選擇文檔', + tip1: '支持文檔和表格類型,包含TXT、Markdown、PDF、DOCX、HTML、XLS、XLSX、CSV、ZIP格式;', + tip2: '系統不存儲原始文檔,導入文檔前,建議規範文檔的分段標識。', + allCheck: '全選', + errorMessage1: '請選擇文檔' + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/function-lib.ts b/ui/src/locales/lang/zh-Hant/views/function-lib.ts new file mode 100644 index 00000000000..f39fea51a22 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/function-lib.ts @@ -0,0 +1,78 @@ +export default { + title: '函數庫', + internalTitle: '內置函數', + added: '已新增', + createFunction: '建立函數', + editFunction: '編輯函數', + copyFunction: '複製函數', + importFunction: '匯入函數', + searchBar: { + placeholder: '按函數名稱搜尋' + }, + setting: { + disabled: '停用' + }, + tip: { + saveMessage: '當前的更改尚未保存,確認退出嗎?' + }, + delete: { + confirmTitle: '是否刪除函數:', + confirmMessage: '刪除後,引用該函數的應用在查詢時會報錯,請謹慎操作。' + }, + disabled: { + confirmTitle: '是否停用函數:', + confirmMessage: '停用後,引用該函數的應用在查詢時會報錯,請謹慎操作。' + }, + functionForm: { + title: { + copy: '副本', + baseInfo: '基礎信息' + }, + form: { + functionName: { + label: '名稱', + name: '函數名稱', + placeholder: '請輸入函數名稱', + requiredMessage: '請輸入函數名稱' + }, + functionDescription: { + label: '描述', + placeholder: '請輸入函數的描述' + }, + permission_type: { + label: '權限', + requiredMessage: '請選擇' + }, + paramName: { + label: '參數名', + placeholder: '請輸入參數名', + requiredMessage: '請輸入參數名' + }, + dataType: { + label: '數據類型' + }, + source: { + label: '來源', + custom: '自定義', + reference: '引用參數' + }, + required: { + label: '是否必填' + }, + param: { + paramInfo1: '使用函數時顯示', + paramInfo2: '使用函數時不顯示', + code: '函数内容(Python)', + selectPlaceholder: '請选择參數', + inputPlaceholder: '請輸入參數值' + }, + debug: { + run: '運行', + output: '輸出', + runResult: '運行結果', + runSuccess: '運行成功', + runFailed: '運行失敗' + } + } + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/index.ts b/ui/src/locales/lang/zh-Hant/views/index.ts new file mode 100644 index 00000000000..c63d40492f8 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/index.ts @@ -0,0 +1,34 @@ +import notFound from './404' +import application from './application' +import applicationOverview from './application-overview' +import dataset from './dataset' +import system from './system' +import functionLib from './function-lib' +import user from './user' +import team from './team' +import template from './template' +import document from './document' +import paragraph from './paragraph' +import problem from './problem' +import log from './log' +import applicationWorkflow from './application-workflow' +import login from './login' +import operateLog from './operate-log' +export default { + notFound, + application, + applicationOverview, + system, + functionLib, + user, + team, + template, + dataset, + applicationWorkflow, + document, + paragraph, + problem, + log, + login, + operateLog +} diff --git a/ui/src/locales/lang/zh-Hant/views/log.ts b/ui/src/locales/lang/zh-Hant/views/log.ts new file mode 100644 index 00000000000..5e84a698d61 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/log.ts @@ -0,0 +1,41 @@ +export default { + title: '對話日誌', + delete: { + confirmTitle: '是否刪除問題:', + confirmMessage1: '刪除問題關聯的', + confirmMessage2: '個分段會被取消關聯,請謹慎操作。' + }, + buttons: { + clearStrategy: '清除策略', + prev: '上一條', + next: '下一條' + }, + table: { + abstract: '摘要', + chat_record_count: '對話提問數', + user: '用戶', + feedback: { + label: '用戶反饋', + star: '贊同', + trample: '反對' + }, + mark: '改進標註', + recenTimes: '最近對話時間' + }, + addToDataset: '添加至知識庫', + daysText: '天之前的對話記錄', + selectDataset: '選擇知識庫', + selectDatasetPlaceholder: '請選擇知識庫', + saveToDocument: '保存至文件', + documentPlaceholder: '請選擇文件', + editContent: '修改內容', + editMark: '修改標註', + form: { + content: { + placeholder: '請輸入內容' + }, + title: { + placeholder: '請給當前內容設定一個標題,以便管理查看' + } + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/login.ts b/ui/src/locales/lang/zh-Hant/views/login.ts new file mode 100644 index 00000000000..dded5473049 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/login.ts @@ -0,0 +1,24 @@ +export default { + title: '普通登錄', + jump_tip: '即將跳轉至認證源頁面進行認證', + jump: '跳轉', + resetPassword: '修改密碼', + forgotPassword: '忘記密碼', + userRegister: '用戶註冊', + buttons: { + login: '登錄', + register: '註冊', + backLogin: '返回登錄', + checkCode: '立即驗證' + }, + newPassword: '新密碼', + enterPassword: '請輸入新密碼', + useEmail: '使用電子郵箱', + moreMethod: '更多登錄方式', + verificationCode: { + placeholder: '請輸入驗證碼', + getVerificationCode: '獲取驗證碼', + successMessage: '驗證碼發送成功', + resend: '重新發送' + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/operate-log.ts b/ui/src/locales/lang/zh-Hant/views/operate-log.ts new file mode 100644 index 00000000000..e0c1b47dd27 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/operate-log.ts @@ -0,0 +1,31 @@ +export default { + title: '操作日誌', + table: { + menu: { + label: '操作菜單' + }, + operate: { + label: '操作', + detail: '操作詳情' + }, + user: { + label: '操作用戶' + }, + status: { + label: '狀態', + success: '成功', + fail: '失敗', + all: '全部' + }, + ip_address: { + label: 'IP地址' + }, + opt: { + label: 'API詳情' + }, + operateTime: { + label: '操作時間' + } + }, + close: '關閉' +} diff --git a/ui/src/locales/lang/zh-Hant/views/paragraph.ts b/ui/src/locales/lang/zh-Hant/views/paragraph.ts new file mode 100644 index 00000000000..a5b4b3317a4 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/paragraph.ts @@ -0,0 +1,32 @@ +export default { + title: '段落', + paragraph_count: '段落', + editParagraph: '編輯分段', + addParagraph: '添加分段', + paragraphDetail: '分段詳情', + character_count: '個字符', + setting: { + batchSelected: '批量選擇', + cancelSelected: '取消選擇' + }, + delete: { + confirmTitle: '是否刪除段落:', + confirmMessage: '刪除後無法恢復,請謹慎操作。' + }, + relatedProblem: { + title: '關聯問題', + placeholder: '請選擇問題' + }, + form: { + paragraphTitle: { + label: '分段標題', + placeholder: '請輸入分段標題' + }, + content: { + label: '分段內容', + placeholder: '請輸入分段內容', + requiredMessage1: '請輸入分段內容', + requiredMessage2: '內容最多不超過 100000 個字' + } + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/problem.ts b/ui/src/locales/lang/zh-Hant/views/problem.ts new file mode 100644 index 00000000000..12cb86020bc --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/problem.ts @@ -0,0 +1,37 @@ +export default { + title: '問題', + createProblem: '建立問題', + detailProblem: '問題詳情', + quickCreateProblem: '快速建立問題', + quickCreateName: '問題', + tip: { + placeholder: '請輸入問題,支持輸入多個,一行一個。', + errorMessage: '問題不能為空!', + requiredMessage: '請輸入問題', + relatedSuccess: '批量關聯分段成功' + }, + + setting: { + batchDelete: '批量刪除', + cancelRelated: '取消關聯' + }, + searchBar: { + placeholder: '按名稱搜尋' + }, + table: { + paragraph_count: '關聯分段數', + updateTime: '更新時間' + }, + delete: { + confirmTitle: '是否刪除問題:', + confirmMessage1: '刪除問題關聯的', + confirmMessage2: '個分段會被取消關聯,請謹慎操作。' + }, + relateParagraph: { + title: '關聯分段', + selectDocument: '選擇文件', + placeholder: '按 文件名稱 搜尋', + selectedParagraph: '已選分段', + count: '個' + }, +} diff --git a/ui/src/locales/lang/zh-Hant/views/system.ts b/ui/src/locales/lang/zh-Hant/views/system.ts new file mode 100644 index 00000000000..1e33f22fb33 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/system.ts @@ -0,0 +1,153 @@ +export default { + title: '系統管理', + subTitle: '系統設置', + test: '測試連線', + testSuccess: '測試連線成功', + testFailed: '測試連線失敗', + password: '密碼', + authentication: { + title: '登入認證', + ldap: { + title: 'LDAP', + address: 'LDAP 位址', + serverPlaceholder: '請輸入LDAP 位址', + bindDN: '綁定DN', + bindDNPlaceholder: '請輸入綁定 DN', + + ou: '使用者OU', + ouPlaceholder: '請輸入使用者 OU', + ldap_filter: '使用者過濾器', + ldap_filterPlaceholder: '請輸入使用者過濾器', + ldap_mapping: 'LDAP 屬性對應', + ldap_mappingPlaceholder: '請輸入 LDAP 屬性對應', + enableAuthentication: '啟用 LDAP 認證' + }, + cas: { + title: 'CAS', + ldpUri: 'ldpUri', + ldpUriPlaceholder: '請輸入ldpUri', + validateUrl: '驗證位址', + validateUrlPlaceholder: '請輸入驗證位址', + redirectUrl: '回呼位址', + redirectUrlPlaceholder: '請輸入回呼位址', + enableAuthentication: '啟用 CAS 認證' + }, + oidc: { + title: 'OIDC', + authEndpoint: '授權端位址', + authEndpointPlaceholder: '請輸入授權端位址', + tokenEndpoint: 'Token端位址', + tokenEndpointPlaceholder: '請輸入 Token 端位址', + userInfoEndpoint: '使用者資訊端位址', + userInfoEndpointPlaceholder: '請輸入使用者資訊端位址', + clientId: '用戶端 ID', + scopePlaceholder: '請輸入連線範圍', + clientIdPlaceholder: '請輸入用戶端 ID', + clientSecret: '用戶端密鑰', + clientSecretPlaceholder: '請輸入用戶端密鑰', + logoutEndpoint: '登出端位址', + logoutEndpointPlaceholder: '請輸入登出端位址', + redirectUrl: '回呼位址', + redirectUrlPlaceholder: '請輸入回呼位址', + enableAuthentication: '啟用 OIDC 認證' + }, + + oauth2: { + title: 'OAuth2', + authEndpoint: '授權端位址', + authEndpointPlaceholder: '請輸入授權端位址', + tokenEndpoint: 'Token 端位址', + tokenEndpointPlaceholder: '請輸入 Token 端位址', + userInfoEndpoint: '使用者資訊端位址', + userInfoEndpointPlaceholder: '請輸入使用者資訊端位址', + scope: '連線範圍', + scopePlaceholder: '請輸入連線範圍', + clientId: '用戶端 ID', + clientIdPlaceholder: '請輸入用戶端 ID', + clientSecret: '用戶端密鑰', + clientSecretPlaceholder: '請輸入用戶端密鑰', + redirectUrl: '回呼位址', + redirectUrlPlaceholder: '請輸入回呼位址', + filedMapping: '欄位對應', + filedMappingPlaceholder: '請輸入欄位對應', + enableAuthentication: '啟用 OAuth2 認證' + }, + scanTheQRCode: { + title: '掃碼登入', + wecom: '企業微信', + dingtalk: '釘釘', + lark: '飛書', + effective: '有效', + alreadyTurnedOn: '已開啟', + notEnabled: '未開啟', + validate: '驗證', + validateSuccess: '驗證成功', + validateFailed: '驗證失敗', + validateFailedTip: '請填寫所有必填項並確保格式正確', + appKeyPlaceholder: '請輸入 App Key', + appSecretPlaceholder: '請輸入 App Secret', + corpIdPlaceholder: '請輸入 Corp Id', + agentIdPlaceholder: '請輸入 Agent Id', + callbackWarning: '請輸入有效的 URL 位址', + larkQrCode: '飛書掃碼登錄', + dingtalkQrCode: '釘釘掃碼登錄', + setting: '設置', + access: '接入' + } + }, + theme: { + title: '外觀設置', + platformDisplayTheme: '平台顯示主題', + customTheme: '自定義主題', + platformLoginSettings: '平台登錄設置', + custom: '自定義', + pagePreview: '頁面預覽', + default: '默認', + restoreDefaults: '恢復默認', + orange: '活力橙', + green: '松石綠', + purple: '神秘紫', + red: '胭脂紅', + loginBackground: '登錄背景圖', + loginLogo: '登錄 Logo', + websiteLogo: '網站 Logo', + replacePicture: '替換圖片', + websiteLogoTip: '頂部網站顯示的 Logo,建議尺寸 48*48,支持 JPG、PNG、GIF,大小不超過 10MB', + loginLogoTip: '登錄頁面右側 Logo,建議尺寸 204*52,支持 JPG、PNG、GIF,大小不超過 10 MB', + loginBackgroundTip: + '左側背景圖,矢量圖建議尺寸 576*900,位圖建議尺寸 1152*1800;支持 JPG、PNG、GIF,大小不超過 10 MB', + websiteName: '網站名稱', + websiteNamePlaceholder: '請輸入網站名稱', + websiteNameTip: '顯示在網頁 Tab 的平台名稱', + websiteSlogan: '歡迎語', + websiteSloganPlaceholder: '請輸入歡迎語', + websiteSloganTip: '產品 Logo 下的歡迎語', + logoDefaultTip: '默认为 MaxKB 登錄界面,支持自定义设置', + defaultSlogan: '歡迎使用 MaxKB 開源 AI 助手', + defaultTip: '默認為 MaxKB 平台界面,支持自定義設置', + platformSetting: '平台設置', + showUserManual: '顯示用戶手冊', + showForum: '顯示論壇求助', + showProject: '顯示項目地址', + urlPlaceholder: '請輸入 URL 地址', + abandonUpdate: '放棄更新', + saveAndApply: '保存並應用', + fileMessageError: '文件大小超過 10M', + saveSuccess: '外觀設置成功' + }, + email: { + title: '郵箱設置', + smtpHost: 'SMTP Host', + smtpHostPlaceholder: '請輸入 SMTP Host', + smtpPort: 'SMTP Port', + smtpPortPlaceholder: '請輸入 SMTP Port', + smtpUser: 'SMTP 帳戶', + smtpUserPlaceholder: '請輸入 SMTP 帳戶', + sendEmail: '發件人信箱', + sendEmailPlaceholder: '請輸入發件人信箱', + smtpPassword: '發件人密碼', + smtpPasswordPlaceholder: '請輸入發件人密碼', + enableSSL: '啟用 SSL(如果 SMTP 端口是 465,通常需要啟用 SSL)', + enableTLS: '啟用 TLS(如果 SMTP 端口是 587,通常需要啟用 TLS)' + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/team.ts b/ui/src/locales/lang/zh-Hant/views/team.ts new file mode 100644 index 00000000000..1b1fb4192b8 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/team.ts @@ -0,0 +1,29 @@ +export default { + title: '團隊成員', + member: '成員', + manage: '所有者', + permissionSetting: '權限設定', + addMember: '新增成員', + addSubTitle: '成員登入後可以存取您授權的資料。', + searchBar: { + placeholder: '請輸入使用者名稱搜尋' + }, + delete: { + button: '移除', + confirmTitle: '是否移除成員:', + confirmMessage: '移除後將會取消成員擁有之知識庫和應用程式權限。' + }, + setting: { + management: '管理', + check: '查看' + }, + teamForm: { + form: { + userName: { + label: '使用者名稱/電子郵件', + placeholder: '請輸入成員的使用者名稱或電子郵件', + requiredMessage: '請輸入使用者名稱/電子郵件' + } + } + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/template.ts b/ui/src/locales/lang/zh-Hant/views/template.ts new file mode 100644 index 00000000000..241f9d8c516 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/template.ts @@ -0,0 +1,83 @@ +export default { + title: '模型設定', + provider: '供應商', + providerPlaceholder: '選擇供應商', + addModel: '新增模型', + searchBar: { + placeholder: '按名稱搜尋' + }, + delete: { + confirmTitle: '刪除模型', + confirmMessage: '是否刪除模型:' + }, + tip: { + createSuccessMessage: '創建模型成功', + createErrorMessage: '基礎資訊有填寫錯誤', + errorMessage: '變數已存在: ', + emptyMessage1: '請先選擇基礎資訊的模型類型和基礎模型', + emptyMessage2: '所選模型不支援參數設定', + updateSuccessMessage: '修改模型成功', + saveSuccessMessage: '模型參數儲存成功', + downloadError: '下載失敗', + noModel: '模型在Ollama不存在' + }, + model: { + allModel: '全部模型', + publicModel: '公有模型', + privateModel: '私有模型', + LLM: '大語言模型', + EMBEDDING: '向量模型', + RERANKER: '重排模型', + STT: '語音辨識', + TTS: '語音合成', + IMAGE: '圖片理解', + TTI: '圖片生成' + }, + templateForm: { + title: { + baseInfo: '基礎資訊', + advancedInfo: '進階設定', + modelParams: '模型參數', + editParam: '編輯參數', + addParam: '新增參數', + paramSetting: '模型參數設定', + apiParamPassing: '接口傳參' + }, + form: { + templateName: { + label: '模型名稱', + placeholder: '請給基礎模型設定一個名稱', + tooltip: 'MaxKB 中自訂的模型名稱', + requiredMessage: '模型名稱不能為空' + }, + permissionType: { + label: '權限', + privateDesc: '僅當前使用者使用', + publicDesc: '所有使用者都可使用', + requiredMessage: '權限不能為空' + }, + model_type: { + label: '模型類型', + placeholder: '請選擇模型類型', + tooltip1: '大語言模型:在應用中與AI對話的推理模型。', + tooltip2: '向量模型:在知識庫中對文件內容進行向量化化的模型。', + tooltip3: '語音辨識:在應用中開啟語音辨識後用於語音轉文字的模型。', + tooltip4: '語音合成:在應用中開啟語音播放後用於文字轉語音的模型。', + tooltip5: '重排模型:在高階編排應用中使用多路召回時,對候選分段進行重新排序的模型。', + tooltip6: '圖片理解:在高階編排應用中用於圖片理解的視覺模型。', + tooltip7: '圖片生成:在高階編排應用中用於圖片生成的視覺模型。', + requiredMessage: '模型類型不能為空' + }, + base_model: { + label: '基礎模型', + tooltip: '列表中未列出的模型,直接輸入模型名稱,按 Enter 即可新增', + placeholder: '自訂輸入基礎模型後按 Enter 即可', + requiredMessage: '基礎模型不能為空' + } + } + }, + download: { + downloading: '正在下載中', + cancelDownload: '取消下載' + } +} diff --git a/ui/src/locales/lang/zh-Hant/views/user.ts b/ui/src/locales/lang/zh-Hant/views/user.ts new file mode 100644 index 00000000000..7b8f1a88000 --- /dev/null +++ b/ui/src/locales/lang/zh-Hant/views/user.ts @@ -0,0 +1,72 @@ +export default { + title: '使用者管理', + createUser: '建立使用者', + editUser: '編輯使用者', + setting: { + updatePwd: '修改使用者密碼' + }, + tip: { + professionalMessage: '社群版最多支援 2 個使用者,如需擁有更多使用者,請升級為專業版。', + updatePwdSuccess: '使用者密碼修改成功' + }, + delete: { + confirmTitle: '是否刪除該使用者?', + confirmMessage: + '刪除該使用者後,該使用者建立的所有資源(應用、知識庫、模型)都會被刪除,請謹慎操作。' + }, + disabled: { + confirmTitle: '是否停用函數?', + confirmMessage: '停用後,引用該函數的應用在查詢時會報錯,請謹慎操作。' + }, + userForm: { + form: { + username: { + label: '使用者名稱', + placeholder: '請輸入使用者名稱', + requiredMessage: '請輸入使用者名稱', + lengthMessage: '長度須介於 6 到 20 個字元之間' + }, + captcha: { + label: '驗證碼', + placeholder: '請輸入驗證碼' + }, + nick_name: { + label: '姓名', + placeholder: '請輸入姓名' + }, + email: { + label: '電子信箱', + placeholder: '請輸入電子信箱', + requiredMessage: '請輸入電子信箱' + }, + phone: { + label: '手機號碼', + placeholder: '請輸入手機號碼' + }, + password: { + label: '登入密碼', + placeholder: '請輸入密碼', + requiredMessage: '請輸入密碼', + lengthMessage: '長度須介於 6 到 20 個字元之間' + }, + new_password: { + label: '新密碼', + placeholder: '請輸入新密碼', + requiredMessage: '請輸入新密碼' + }, + re_password: { + label: '確認密碼', + placeholder: '請輸入確認密碼', + requiredMessage: '請輸入確認密碼', + validatorMessage: '密碼不一致' + } + } + }, + source: { + label: '使用者類型', + local: '系統使用者', + wecom: '企業微信', + lark: '飛書', + dingtalk: '釘釘' + } +} diff --git a/ui/src/locales/lang/zh_CN/components/index.ts b/ui/src/locales/lang/zh_CN/components/index.ts deleted file mode 100644 index bd77588db6f..00000000000 --- a/ui/src/locales/lang/zh_CN/components/index.ts +++ /dev/null @@ -1,4 +0,0 @@ - -export default { - -}; diff --git a/ui/src/locales/lang/zh_CN/index.ts b/ui/src/locales/lang/zh_CN/index.ts deleted file mode 100644 index db4da99423c..00000000000 --- a/ui/src/locales/lang/zh_CN/index.ts +++ /dev/null @@ -1,12 +0,0 @@ -import zhCn from 'element-plus/es/locale/lang/zh-cn'; -import components from './components'; -import layout from './layout'; -import pages from './pages'; - -export default { - lang: '简体中文', - layout, - pages, - components, - zhCn, -}; diff --git a/ui/src/locales/lang/zh_CN/layout.ts b/ui/src/locales/lang/zh_CN/layout.ts deleted file mode 100644 index eb5d1d93ca7..00000000000 --- a/ui/src/locales/lang/zh_CN/layout.ts +++ /dev/null @@ -1,39 +0,0 @@ -export default { - breadcrumb: { - - }, - sidebar: { - - }, - topbar: { - github: "项目地址", - wiki: "用户手册", - forum: "论坛求助", - MenuItem: { - application: "应用", - dataset: "知识库", - setting: "系统设置" - }, - avatar: { - resetPassword: "修改密码", - about: "关于", - logout: "退出", - version:"版本号", - dialog:{ - newPassword:"新密码", - enterPassword: "请输入修改密码", - confirmPassword: "确认密码", - passwordLength:"密码长度在 6 到 20 个字符", - passwordMismatch:"两次密码输入不一致", - useEmail:"使用邮箱", - enterEmail: "请输入邮箱", - enterVerificationCode: "请输入验证码", - getVerificationCode: "获取验证码", - verificationCodeSentSuccess:"验证码发送成功", - resend:"重新发送", - cancel:"取消", - save:"保存", - } - } - }, -}; diff --git a/ui/src/locales/lang/zh_CN/pages/index.ts b/ui/src/locales/lang/zh_CN/pages/index.ts deleted file mode 100644 index bd77588db6f..00000000000 --- a/ui/src/locales/lang/zh_CN/pages/index.ts +++ /dev/null @@ -1,4 +0,0 @@ - -export default { - -}; diff --git a/ui/src/locales/useLocale.ts b/ui/src/locales/useLocale.ts index c8021627ca7..60b8db1f248 100644 --- a/ui/src/locales/useLocale.ts +++ b/ui/src/locales/useLocale.ts @@ -9,11 +9,11 @@ export function useLocale() { function changeLocale(lang: string) { // 如果切换的语言不在对应语言文件里则默认为简体中文 if (!langCode.includes(lang)) { - lang = 'zh_CN'; + lang = 'en-US'; } locale.value = lang; - useLocalStorage(localeConfigKey, 'zh_CN').value = lang; + useLocalStorage(localeConfigKey, 'en-US').value = lang; } const getComponentsLocale = computed(() => { diff --git a/ui/src/main.ts b/ui/src/main.ts index a978a7efa20..a31326d7e37 100644 --- a/ui/src/main.ts +++ b/ui/src/main.ts @@ -2,14 +2,49 @@ import '@/styles/index.scss' import ElementPlus from 'element-plus' import * as ElementPlusIcons from '@element-plus/icons-vue' import zhCn from 'element-plus/dist/locale/zh-cn.mjs' +import enUs from 'element-plus/dist/locale/en.mjs' +import zhTW from 'element-plus/dist/locale/zh-tw.mjs' import { createApp } from 'vue' import { store } from '@/stores' -import theme from '@/theme' import directives from '@/directives' import App from './App.vue' import router from '@/router' import Components from '@/components' -import i18n from './locales'; +import i18n from './locales' +import { config } from 'md-editor-v3' +import screenfull from 'screenfull' + +import katex from 'katex' +import 'katex/dist/katex.min.css' + +import Cropper from 'cropperjs' +import 'cropperjs/dist/cropper.css' + +import mermaid from 'mermaid' + +import highlight from 'highlight.js' +import 'highlight.js/styles/atom-one-dark.css' + +config({ + editorExtensions: { + highlight: { + instance: highlight + }, + screenfull: { + instance: screenfull + }, + katex: { + instance: katex + }, + cropper: { + instance: Cropper + }, + mermaid: { + instance: mermaid + } + } +}) + const app = createApp(App) app.use(store) app.use(directives) @@ -17,13 +52,17 @@ app.use(directives) for (const [key, component] of Object.entries(ElementPlusIcons)) { app.component(key, component) } +const locale_map: any = { + 'zh-CN': zhCn, + 'zh-Hant': zhTW, + 'en-US': enUs +} app.use(ElementPlus, { - locale: zhCn + locale: locale_map[localStorage.getItem('MaxKB-locale') || navigator.language || 'en-US'] }) -app.use(theme) - app.use(router) -app.use(i18n); +app.use(i18n) app.use(Components) app.mount('#app') +export { app } diff --git a/ui/src/request/index.ts b/ui/src/request/index.ts index 7d748ffd8e3..72588d2c6f2 100644 --- a/ui/src/request/index.ts +++ b/ui/src/request/index.ts @@ -1,4 +1,4 @@ -import axios, { type AxiosRequestConfig } from 'axios' +import axios, { type InternalAxiosRequestConfig, AxiosHeaders } from 'axios' import { MsgError } from '@/utils/message' import type { NProgress } from 'nprogress' import type { Ref } from 'vue' @@ -11,7 +11,7 @@ import { ref, type WritableComputedRef } from 'vue' const axiosConfig = { baseURL: '/api', withCredentials: false, - timeout: 60000, + timeout: 600000, headers: {} } @@ -19,12 +19,14 @@ const instance = axios.create(axiosConfig) /* 设置请求拦截器 */ instance.interceptors.request.use( - (config: AxiosRequestConfig) => { + (config: InternalAxiosRequestConfig) => { if (config.headers === undefined) { - config.headers = {} + config.headers = new AxiosHeaders() } const { user } = useStore() const token = user.getToken() + const language = user.getLanguage() + config.headers['Accept-Language'] = `${language}` if (token) { config.headers['AUTHORIZATION'] = `${token}` } @@ -40,8 +42,16 @@ instance.interceptors.response.use( (response: any) => { if (response.data) { if (response.data.code !== 200 && !(response.data instanceof Blob)) { - MsgError(response.data.message) - return Promise.reject(response.data) + if (response.config.url.includes('/application/authentication')) { + return Promise.reject(response.data) + } + if ( + !response.config.url.includes('/valid') && + !response.config.url.includes('/function_lib/debug') + ) { + MsgError(response.data.message) + return Promise.reject(response.data) + } } } return response @@ -195,10 +205,12 @@ export const postStream: (url: string, data?: unknown) => Promise | ) => { const { user } = useStore() const token = user.getToken() + const language = user.getLanguage() const headers: HeadersInit = { 'Content-Type': 'application/json' } if (token) { headers['AUTHORIZATION'] = `${token}` } + headers['Accept-Language'] = `${language}` return fetch(url, { method: 'POST', body: data ? JSON.stringify(data) : undefined, @@ -211,9 +223,14 @@ export const exportExcel: ( url: string, params: any, loading?: NProgress | Ref -) => void = (fileName: string, url: string, params: any, loading?: NProgress | Ref) => { - promise(request({ url: url, method: 'get', params, responseType: 'blob' }), loading) - .then((res: any) => { +) => Promise = ( + fileName: string, + url: string, + params: any, + loading?: NProgress | Ref +) => { + return promise(request({ url: url, method: 'get', params, responseType: 'blob' }), loading).then( + (res: any) => { if (res) { const blob = new Blob([res], { type: 'application/vnd.ms-excel' @@ -225,8 +242,92 @@ export const exportExcel: ( //释放内存 window.URL.revokeObjectURL(link.href) } - }) - .catch((e) => {}) + return true + } + ) +} + +export const exportFile: ( + fileName: string, + url: string, + params: any, + loading?: NProgress | Ref +) => Promise = ( + fileName: string, + url: string, + params: any, + loading?: NProgress | Ref +) => { + return promise(request({ url: url, method: 'get', params, responseType: 'blob' }), loading).then( + (res: any) => { + if (res) { + const blob = new Blob([res], { + type: 'application/octet-stream' + }) + const link = document.createElement('a') + link.href = window.URL.createObjectURL(blob) + link.download = fileName + link.click() + //释放内存 + window.URL.revokeObjectURL(link.href) + } + return true + } + ) +} + +export const exportExcelPost: ( + fileName: string, + url: string, + params: any, + data: any, + loading?: NProgress | Ref +) => Promise = ( + fileName: string, + url: string, + params: any, + data: any, + loading?: NProgress | Ref +) => { + return promise( + request({ + url: url, + method: 'post', + params, // 查询字符串参数 + data, // 请求体数据 + responseType: 'blob' + }), + loading + ).then((res: any) => { + if (res) { + const blob = new Blob([res], { + type: 'application/vnd.ms-excel' + }) + const link = document.createElement('a') + link.href = window.URL.createObjectURL(blob) + link.download = fileName + link.click() + // 释放内存 + window.URL.revokeObjectURL(link.href) + } + return true + }) +} + +export const download: ( + url: string, + method: string, + data?: any, + params?: any, + loading?: NProgress | Ref +) => Promise = ( + url: string, + method: string, + data?: any, + params?: any, + loading?: NProgress | Ref +) => { + return promise(request({ url: url, method: method, data, params, responseType: 'blob' }), loading) } /** diff --git a/ui/src/router/index.ts b/ui/src/router/index.ts index a8259a2452c..360523cb100 100644 --- a/ui/src/router/index.ts +++ b/ui/src/router/index.ts @@ -1,4 +1,5 @@ import { hasPermission } from '@/utils/permission/index' +import NProgress from 'nprogress' import { createRouter, createWebHistory, @@ -9,6 +10,7 @@ import { } from 'vue-router' import useStore from '@/stores' import { routes } from '@/router/routes' +NProgress.configure({ showSpinner: false, speed: 500, minimum: 0.3 }) const router = createRouter({ history: createWebHistory(import.meta.env.BASE_URL), routes: routes @@ -17,6 +19,7 @@ const router = createRouter({ // 路由前置拦截器 router.beforeEach( async (to: RouteLocationNormalized, from: RouteLocationNormalized, next: NavigationGuardNext) => { + NProgress.start() if (to.name === '404') { next() return @@ -25,6 +28,9 @@ router.beforeEach( const notAuthRouteNameList = ['register', 'login', 'forgot_password', 'reset_password', 'Chat'] if (!notAuthRouteNameList.includes(to.name ? to.name.toString() : '')) { + if (to.query && to.query.token) { + localStorage.setItem('token', to.query.token.toString()) + } const token = user.getToken() if (!token) { next({ @@ -45,6 +51,9 @@ router.beforeEach( } } ) +router.afterEach(() => { + NProgress.done() +}) export const getChildRouteListByPathAndName = (path: any, name?: RouteRecordName | any) => { return getChildRouteList(routes, path, name) diff --git a/ui/src/router/modules/application.ts b/ui/src/router/modules/application.ts index bfc1293e53f..018429b8971 100644 --- a/ui/src/router/modules/application.ts +++ b/ui/src/router/modules/application.ts @@ -1,24 +1,21 @@ -import Layout from '@/layout/main-layout/index.vue' +import Layout from '@/layout/layout-template/DetailLayout.vue' +import { ComplexPermission } from '@/utils/permission/type' + const applicationRouter = { path: '/application', name: 'application', - meta: { title: '应用', permission: 'APPLICATION:READ' }, + meta: { title: 'views.application.title', permission: 'APPLICATION:READ' }, redirect: '/application', + component: () => import('@/layout/layout-template/AppLayout.vue'), children: [ { path: '/application', - name: 'application', + name: 'application-index', + meta: { title: '应用主页', activeMenu: '/application' }, component: () => import('@/views/application/index.vue') }, { - path: '/application/create', - name: 'CreateApplication', - meta: { activeMenu: '/application' }, - component: () => import('@/views/application/CreateAndSetting.vue'), - hidden: true - }, - { - path: '/application/:id', + path: '/application/:id/:type', name: 'ApplicationDetail', meta: { title: '应用详情', activeMenu: '/application' }, component: Layout, @@ -30,34 +27,48 @@ const applicationRouter = { meta: { icon: 'app-all-menu', iconActive: 'app-all-menu-active', - title: '概览', + title: 'views.applicationOverview.title', active: 'overview', - parentPath: '/application/:id', + parentPath: '/application/:id/:type', parentName: 'ApplicationDetail' }, component: () => import('@/views/application-overview/index.vue') }, { - path: 'setting', + path: 'setting', name: 'AppSetting', meta: { icon: 'app-setting', iconActive: 'app-setting-active', - title: '设置', + title: 'common.setting', active: 'setting', - parentPath: '/application/:id', + parentPath: '/application/:id/:type', parentName: 'ApplicationDetail' }, - component: () => import('@/views/application/CreateAndSetting.vue') + component: () => import('@/views/application/ApplicationSetting.vue') + }, + { + path: 'access', + name: 'AppAccess', + meta: { + icon: 'app-access', + iconActive: 'app-access-active', + title: 'views.application.applicationAccess.title', + active: 'access', + parentPath: '/application/:id/:type', + parentName: 'ApplicationDetail', + permission: new ComplexPermission([], ['x-pack'], 'OR') + }, + component: () => import('@/views/application/ApplicationAccess.vue') }, { path: 'hit-test', name: 'AppHitTest', meta: { icon: 'app-hit-test', - title: '命中测试', + title: 'views.application.hitTest.title', active: 'hit-test', - parentPath: '/application/:id', + parentPath: '/application/:id/:type', parentName: 'ApplicationDetail' }, component: () => import('@/views/hit-test/index.vue') @@ -68,15 +79,15 @@ const applicationRouter = { meta: { icon: 'app-document', iconActive: 'app-document-active', - title: '对话日志', + title: 'views.log.title', active: 'log', - parentPath: '/application/:id', + parentPath: '/application/:id/:type', parentName: 'ApplicationDetail' }, component: () => import('@/views/log/index.vue') } ] - }, + } ] } diff --git a/ui/src/router/modules/dataset.ts b/ui/src/router/modules/dataset.ts index abc8a540669..b138f073cca 100644 --- a/ui/src/router/modules/dataset.ts +++ b/ui/src/router/modules/dataset.ts @@ -1,26 +1,28 @@ -import Layout from '@/layout/main-layout/index.vue' +import Layout from '@/layout/layout-template/DetailLayout.vue' const datasetRouter = { path: '/dataset', name: 'dataset', - meta: { title: '知识库', permission: 'DATASET:READ' }, + meta: { title: 'views.dataset.title', permission: 'DATASET:READ' }, + component: () => import('@/layout/layout-template/AppLayout.vue'), redirect: '/dataset', children: [ { path: '/dataset', - name: 'dataset', + name: 'dataset-index', + meta: { title: '知识库主页', activeMenu: '/dataset' }, component: () => import('@/views/dataset/index.vue') }, { - path: '/dataset/:type', // create 或者 upload - name: 'CreateDataset', + path: '/dataset/upload', + name: 'UploadDocumentDataset', meta: { activeMenu: '/dataset' }, - component: () => import('@/views/dataset/CreateDataset.vue'), + component: () => import('@/views/dataset/UploadDocumentDataset.vue'), hidden: true }, { path: '/dataset/:id', name: 'DatasetDetail', - meta: { title: '文档', activeMenu: '/dataset' }, + meta: { title: 'common.fileUpload.document', activeMenu: '/dataset' }, component: Layout, hidden: true, children: [ @@ -30,7 +32,7 @@ const datasetRouter = { meta: { icon: 'app-document', iconActive: 'app-document-active', - title: '文档', + title: 'common.fileUpload.document', active: 'document', parentPath: '/dataset/:id', parentName: 'DatasetDetail' @@ -42,7 +44,8 @@ const datasetRouter = { name: 'Problem', meta: { icon: 'app-problems', - title: '问题', + iconActive: 'QuestionFilled', + title: 'views.problem.title', active: 'problem', parentPath: '/dataset/:id', parentName: 'DatasetDetail' @@ -54,7 +57,7 @@ const datasetRouter = { name: 'DatasetHitTest', meta: { icon: 'app-hit-test', - title: '命中测试', + title: 'views.application.hitTest.title', active: 'hit-test', parentPath: '/dataset/:id', parentName: 'DatasetDetail' @@ -67,7 +70,7 @@ const datasetRouter = { meta: { icon: 'app-setting', iconActive: 'app-setting-active', - title: '设置', + title: 'common.setting', active: 'setting', parentPath: '/dataset/:id', parentName: 'DatasetDetail' @@ -82,7 +85,14 @@ const datasetRouter = { meta: { activeMenu: '/dataset' }, component: () => import('@/views/paragraph/index.vue'), hidden: true - } + }, + { + path: '/dataset/import', + name: 'ImportDocumentDataset', + meta: { activeMenu: '/dataset' }, + component: () => import('@/views/dataset/ImportDocumentDataset.vue'), + hidden: true + }, ] } diff --git a/ui/src/router/modules/function-lib.ts b/ui/src/router/modules/function-lib.ts new file mode 100644 index 00000000000..2b4dd9543ea --- /dev/null +++ b/ui/src/router/modules/function-lib.ts @@ -0,0 +1,17 @@ +const functionLibRouter = { + path: '/function-lib', + name: 'function_lib', + meta: { title: 'views.functionLib.title', permission: 'APPLICATION:READ' }, + redirect: '/function-lib', + component: () => import('@/layout/layout-template/AppLayout.vue'), + children: [ + { + path: '/function-lib', + name: 'function-lib-index', + meta: { title: '函数库主页', activeMenu: '/function-lib' }, + component: () => import('@/views/function-lib/index.vue') + } + ] +} + +export default functionLibRouter diff --git a/ui/src/router/modules/setting.ts b/ui/src/router/modules/setting.ts index 48404a017fc..eaedb6a5f50 100644 --- a/ui/src/router/modules/setting.ts +++ b/ui/src/router/modules/setting.ts @@ -1,11 +1,11 @@ import { hasPermission } from '@/utils/permission/index' -import Layout from '@/layout/main-layout/index.vue' -import { Role } from '@/utils/permission/type' +import Layout from '@/layout/layout-template/SystemLayout.vue' +import { Role, ComplexPermission } from '@/utils/permission/type' const settingRouter = { path: '/setting', name: 'setting', - meta: { icon: 'Setting', title: '系统设置', permission: 'SETTING:READ' }, - redirect: (to: any) => { + meta: { icon: 'Setting', title: 'views.system.title', permission: 'SETTING:READ' }, + redirect: () => { if (hasPermission(new Role('ADMIN'), 'AND')) { return '/user' } @@ -19,7 +19,7 @@ const settingRouter = { meta: { icon: 'User', iconActive: 'UserFilled', - title: '用户管理', + title: 'views.user.title', activeMenu: '/setting', parentPath: '/setting', parentName: 'setting', @@ -33,7 +33,7 @@ const settingRouter = { meta: { icon: 'app-team', iconActive: 'app-team-active', - title: '团队成员', + title: 'views.team.title', activeMenu: '/setting', parentPath: '/setting', parentName: 'setting' @@ -46,7 +46,7 @@ const settingRouter = { meta: { icon: 'app-template', iconActive: 'app-template-active', - title: '模型设置', + title: 'views.template.title', activeMenu: '/setting', parentPath: '/setting', parentName: 'setting' @@ -54,17 +54,69 @@ const settingRouter = { component: () => import('@/views/template/index.vue') }, { - path: '/email', - name: 'email', + path: '/system', + name: 'system', meta: { - icon: 'Message', - title: '邮箱设置', + icon: 'app-setting', + iconActive: 'app-setting-active', + title: 'views.system.subTitle', activeMenu: '/setting', parentPath: '/setting', parentName: 'setting', permission: new Role('ADMIN') }, - component: () => import('@/views/email/index.vue') + children: [ + { + path: '/system/theme', + name: 'theme', + meta: { + title: 'views.system.theme.title', + activeMenu: '/setting', + parentPath: '/setting', + parentName: 'setting', + permission: new ComplexPermission(['ADMIN'], ['x-pack'], 'AND') + }, + component: () => import('@/views/theme/index.vue') + }, + { + path: '/system/authentication', + name: 'authentication', + meta: { + title: 'views.system.authentication.title', + activeMenu: '/setting', + parentPath: '/setting', + parentName: 'setting', + permission: new ComplexPermission(['ADMIN'], ['x-pack'], 'AND') + }, + component: () => import('@/views/authentication/index.vue') + }, + { + path: '/system/email', + name: 'email', + meta: { + title: 'views.system.email.title', + activeMenu: '/setting', + parentPath: '/setting', + parentName: 'setting', + permission: new Role('ADMIN') + }, + component: () => import('@/views/email/index.vue') + } + ] + }, + { + path: '/operate', + name: 'operate', + meta: { + icon: 'app-document', + iconActive: 'app-document-active', + title: 'views.operateLog.title', + activeMenu: '/setting', + parentPath: '/setting', + parentName: 'setting', + permission: new ComplexPermission(['ADMIN'], ['x-pack'], 'AND') + }, + component: () => import('@/views/operate-log/index.vue') } ] } diff --git a/ui/src/router/routes.ts b/ui/src/router/routes.ts index 4edc08cae69..82ddb0596b4 100644 --- a/ui/src/router/routes.ts +++ b/ui/src/router/routes.ts @@ -8,18 +8,16 @@ export const routes: Array = [ { path: '/', name: 'home', - component: () => import('@/layout/app-layout/index.vue'), redirect: '/application', - children: [ - // TODO 待处理 - // { - // path: '/first', - // name: 'first', - // meta: { icon: 'House', title: '首页' }, - // component: () => import('@/views/first/index.vue') - // }, - ...rolesRoutes - ] + children: [...rolesRoutes] + }, + + // 高级编排 + { + path: '/application/:id/workflow', + name: 'ApplicationWorkflow', + meta: { activeMenu: '/application' }, + component: () => import('@/views/application-workflow/index.vue') }, { diff --git a/ui/src/stores/index.ts b/ui/src/stores/index.ts index 6177aea4ba6..73298551851 100644 --- a/ui/src/stores/index.ts +++ b/ui/src/stores/index.ts @@ -10,6 +10,7 @@ import useApplicationStore from './modules/application' import useDocumentStore from './modules/document' import useProblemStore from './modules/problem' import useLogStore from './modules/log' +import usePromptStore from './modules/prompt' const useStore = () => ({ common: useCommonStore(), @@ -20,7 +21,8 @@ const useStore = () => ({ application: useApplicationStore(), document: useDocumentStore(), problem: useProblemStore(), - log: useLogStore() + log: useLogStore(), + prompt: usePromptStore(), }) export default useStore diff --git a/ui/src/stores/modules/application.ts b/ui/src/stores/modules/application.ts index 49ce795fea2..d3de13782ae 100644 --- a/ui/src/stores/modules/application.ts +++ b/ui/src/stores/modules/application.ts @@ -1,7 +1,9 @@ import { defineStore } from 'pinia' import applicationApi from '@/api/application' +import applicationXpackApi from '@/api/application-xpack' import { type Ref } from 'vue' - +import { getBrowserLang } from '@/locales/index' +import useUserStore from './user' const useApplicationStore = defineStore({ id: 'application', state: () => ({ @@ -48,11 +50,37 @@ const useApplicationStore = defineStore({ }, async asyncGetAccessToken(id: string, loading?: Ref) { + return new Promise((resolve, reject) => { + const user = useUserStore() + if (user.isEnterprise()) { + applicationXpackApi + .getAccessToken(id, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + } else { + applicationApi + .getAccessToken(id, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + } + }) + }, + + async asyncGetAppProfile(loading?: Ref) { return new Promise((resolve, reject) => { applicationApi - .getAccessToken(id, loading) - .then((data) => { - resolve(data) + .getAppProfile(loading) + .then((res) => { + sessionStorage.setItem('language', res.data?.language || getBrowserLang()) + resolve(res) }) .catch((error) => { reject(error) @@ -60,12 +88,17 @@ const useApplicationStore = defineStore({ }) }, - async asyncAppAuthentication(token: string, loading?: Ref) { + async asyncAppAuthentication( + token: string, + loading?: Ref, + authentication_value?: any + ) { return new Promise((resolve, reject) => { applicationApi - .postAppAuthentication(token, loading) + .postAppAuthentication(token, loading, authentication_value) .then((res) => { - localStorage.setItem('accessToken', res.data) + localStorage.setItem(`${token}-accessToken`, res.data) + sessionStorage.setItem(`${token}-accessToken`, res.data) resolve(res) }) .catch((error) => { @@ -88,6 +121,18 @@ const useApplicationStore = defineStore({ reject(error) }) }) + }, + async validatePassword(id: string, password: string, loading?: Ref) { + return new Promise((resolve, reject) => { + applicationApi + .validatePassword(id, password, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + }) } } }) diff --git a/ui/src/stores/modules/common.ts b/ui/src/stores/modules/common.ts index 606d574f092..636a36dc294 100644 --- a/ui/src/stores/modules/common.ts +++ b/ui/src/stores/modules/common.ts @@ -1,9 +1,13 @@ import { defineStore } from 'pinia' +import { DeviceType, ValidType } from '@/enums/common' +import type { Ref } from 'vue' +import userApi from '@/api/user' export interface commonTypes { breadcrumb: any paginationConfig: any | null search: any + device: string } const useCommonStore = defineStore({ @@ -12,7 +16,8 @@ const useCommonStore = defineStore({ breadcrumb: null, // 搜索和分页缓存 paginationConfig: {}, - search: {} + search: {}, + device: DeviceType.Desktop }), actions: { saveBreadcrumb(data: any) { @@ -23,6 +28,24 @@ const useCommonStore = defineStore({ }, saveCondition(val: string, data: any) { this.search[val] = data + }, + toggleDevice(value: DeviceType) { + this.device = value + }, + isMobile() { + return this.device === DeviceType.Mobile + }, + async asyncGetValid(valid_type: ValidType, valid_count: number, loading?: Ref) { + return new Promise((resolve, reject) => { + userApi + .getValid(valid_type, valid_count, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + }) } } }) diff --git a/ui/src/stores/modules/dataset.ts b/ui/src/stores/modules/dataset.ts index 22289ea5e84..b185d795a1c 100644 --- a/ui/src/stores/modules/dataset.ts +++ b/ui/src/stores/modules/dataset.ts @@ -7,6 +7,7 @@ import { type Ref } from 'vue' export interface datasetStateTypes { baseInfo: datasetData | null webInfo: any + documentsType: string documentsFiles: UploadUserFile[] } @@ -15,6 +16,7 @@ const useDatasetStore = defineStore({ state: (): datasetStateTypes => ({ baseInfo: null, webInfo: null, + documentsType: '', documentsFiles: [] }), actions: { @@ -24,6 +26,9 @@ const useDatasetStore = defineStore({ saveWebInfo(info: any) { this.webInfo = info }, + saveDocumentsType(val: string) { + this.documentsType = val + }, saveDocumentsFile(file: UploadUserFile[]) { this.documentsFiles = file }, diff --git a/ui/src/stores/modules/log.ts b/ui/src/stores/modules/log.ts index 662eab28434..d487c1baf70 100644 --- a/ui/src/stores/modules/log.ts +++ b/ui/src/stores/modules/log.ts @@ -18,6 +18,60 @@ const useLogStore = defineStore({ reject(error) }) }) + }, + async asyncChatRecordLog( + id: string, + chatId: string, + page: pageRequest, + loading?: Ref, + order_asc?: boolean + ) { + return new Promise((resolve, reject) => { + logApi + .getChatRecordLog(id, chatId, page, loading, order_asc) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + }) + }, + async asyncGetChatLogClient(id: string, page: pageRequest, loading?: Ref) { + return new Promise((resolve, reject) => { + logApi + .getChatLogClient(id, page, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + }) + }, + async asyncDelChatClientLog(id: string, chatId: string, loading?: Ref) { + return new Promise((resolve, reject) => { + logApi + .delChatClientLog(id, chatId, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + }) + }, + async asyncPutChatClientLog(id: string, chatId: string, data: any, loading?: Ref) { + return new Promise((resolve, reject) => { + logApi + .putChatClientLog(id, chatId, data, loading) + .then((data) => { + resolve(data) + }) + .catch((error) => { + reject(error) + }) + }) } } }) diff --git a/ui/src/stores/modules/model.ts b/ui/src/stores/modules/model.ts index a68b1e54ab1..0875e200e1e 100644 --- a/ui/src/stores/modules/model.ts +++ b/ui/src/stores/modules/model.ts @@ -1,11 +1,11 @@ import { defineStore } from 'pinia' import modelApi from '@/api/model' -import type { modelRequest, Provider } from '@/api/type/model' +import type { ListModelRequest, Provider } from '@/api/type/model' const useModelStore = defineStore({ id: 'model', state: () => ({}), actions: { - async asyncGetModel(data?: modelRequest) { + async asyncGetModel(data?: ListModelRequest) { return new Promise((resolve, reject) => { modelApi .getModel(data) diff --git a/ui/src/stores/modules/prompt.ts b/ui/src/stores/modules/prompt.ts new file mode 100644 index 00000000000..2ec5bcdedca --- /dev/null +++ b/ui/src/stores/modules/prompt.ts @@ -0,0 +1,38 @@ +import { defineStore } from 'pinia' +import { t } from '@/locales' +export interface promptTypes { + user: string + formValue: { model_id: string; prompt: string } +} + +const usePromptStore = defineStore({ + id: 'prompt', + state: (): promptTypes[] => JSON.parse(localStorage.getItem('PROMPT_CACHE') || '[]'), + actions: { + save(user: string, formValue: any) { + this.$state.forEach((item: any, index: number) => { + if (item.user === user) { + this.$state.splice(index, 1) + } + }) + this.$state.push({ user, formValue }) + localStorage.setItem('PROMPT_CACHE', JSON.stringify(this.$state)) + }, + get(user: string) { + for (let i = 0; i < this.$state.length; i++) { + if (this.$state[i].user === user) { + return this.$state[i].formValue + } + } + return { + model_id: '', + prompt: + t('views.document.generateQuestion.prompt1', { data: '{data}' }) + + '' + + t('views.document.generateQuestion.prompt2') + } + } + } +}) + +export default usePromptStore diff --git a/ui/src/stores/modules/user.ts b/ui/src/stores/modules/user.ts index f4c0a146349..a5f0eae3a60 100644 --- a/ui/src/stores/modules/user.ts +++ b/ui/src/stores/modules/user.ts @@ -1,12 +1,22 @@ import { defineStore } from 'pinia' +import { type Ref } from 'vue' import type { User } from '@/api/type/user' +import { cloneDeep } from 'lodash' import UserApi from '@/api/user' - +import ThemeApi from '@/api/theme' +import { useElementPlusTheme } from 'use-element-plus-theme' +import { defaultPlatformSetting } from '@/utils/theme' +import { useLocalStorage } from '@vueuse/core' +import { localeConfigKey, getBrowserLang } from '@/locales/index' export interface userStateTypes { userType: number // 1 系统操作者 2 对话用户 userInfo: User | null token: any version?: string + userAccessToken?: string + XPACK_LICENSE_IS_VALID: false + isXPack: false + themeInfo: any } const useUserStore = defineStore({ @@ -15,21 +25,58 @@ const useUserStore = defineStore({ userType: 1, userInfo: null, token: '', - version: '' + version: '', + userAccessToken: '', + XPACK_LICENSE_IS_VALID: false, + isXPack: false, + themeInfo: null }), actions: { + getLanguage() { + return this.userType === 1 + ? localStorage.getItem('MaxKB-locale') || getBrowserLang() + : sessionStorage.getItem('language') || getBrowserLang() + }, + showXpack() { + return this.isXPack + }, + isDefaultTheme() { + return !this.themeInfo?.theme || this.themeInfo?.theme === '#3370FF' + }, + setTheme(data: any) { + const { changeTheme } = useElementPlusTheme(this.themeInfo?.theme) + changeTheme(data?.['theme']) + this.themeInfo = cloneDeep(data) + }, + isExpire() { + return this.isXPack && !this.XPACK_LICENSE_IS_VALID + }, + isEnterprise() { + return this.isXPack && this.XPACK_LICENSE_IS_VALID + }, getToken(): String | null { if (this.token) { return this.token } - return this.userType === 1 - ? localStorage.getItem('token') - : localStorage.getItem('accessToken') + return this.userType === 1 ? localStorage.getItem('token') : this.getAccessToken() + }, + getAccessToken() { + const token = sessionStorage.getItem(`${this.userAccessToken}-accessToken`) + if (token) { + return token + } + const local_token = localStorage.getItem(`${this.userAccessToken}-accessToken`) + if (local_token) { + return local_token + } + return localStorage.getItem(`accessToken`) }, getPermissions() { if (this.userInfo) { - return this.userInfo?.permissions + return this.isXPack && this.XPACK_LICENSE_IS_VALID + ? [...this.userInfo?.permissions, 'x-pack'] + : this.userInfo?.permissions } else { return [] } @@ -41,25 +88,83 @@ const useUserStore = defineStore({ return '' } }, - changeUserType(num: number) { + changeUserType(num: number, token?: string) { this.userType = num + this.userAccessToken = token + }, + + async asyncGetProfile() { + return new Promise((resolve, reject) => { + UserApi.getProfile() + .then(async (ok) => { + this.version = ok.data?.version || '-' + this.isXPack = ok.data?.IS_XPACK + this.XPACK_LICENSE_IS_VALID = ok.data?.XPACK_LICENSE_IS_VALID + + if (this.isEnterprise()) { + await this.theme() + } else { + this.themeInfo = { + ...defaultPlatformSetting + } + } + resolve(ok) + }) + .catch((error) => { + reject(error) + }) + }) }, - async asyncGetVersion() { - return UserApi.getVersion().then((ok) => { - this.version = ok.data?.version || '-' + async theme(loading?: Ref) { + return await ThemeApi.getThemeInfo(loading).then((ok) => { + this.setTheme(ok.data) + // window.document.title = this.themeInfo['title'] || 'MaxKB' + // const link = document.querySelector('link[rel="icon"]') as any + // if (link) { + // link['href'] = this.themeInfo['icon'] || '/favicon.ico' + // } }) }, async profile() { - return UserApi.profile().then((ok) => { + return UserApi.profile().then(async (ok) => { this.userInfo = ok.data - this.asyncGetVersion() + useLocalStorage(localeConfigKey, 'en-US').value = ok.data?.language || this.getLanguage() + return this.asyncGetProfile() }) }, - async login(username: string, password: string) { - return UserApi.login({ username, password }).then((ok) => { + async login(auth_type: string, username: string, password: string, captcha: string) { + return UserApi.login(auth_type, { username, password, captcha }).then((ok) => { + this.token = ok.data + localStorage.setItem('token', ok.data) + return this.profile() + }) + }, + async dingCallback(code: string) { + return UserApi.getDingCallback(code).then((ok) => { + this.token = ok.data + localStorage.setItem('token', ok.data) + return this.profile() + }) + }, + async dingOauth2Callback(code: string) { + return UserApi.getDingOauth2Callback(code).then((ok) => { + this.token = ok.data + localStorage.setItem('token', ok.data) + return this.profile() + }) + }, + async wecomCallback(code: string) { + return UserApi.getWecomCallback(code).then((ok) => { + this.token = ok.data + localStorage.setItem('token', ok.data) + return this.profile() + }) + }, + async larkCallback(code: string) { + return UserApi.getlarkCallback(code).then((ok) => { this.token = ok.data localStorage.setItem('token', ok.data) return this.profile() @@ -71,6 +176,34 @@ const useUserStore = defineStore({ localStorage.removeItem('token') return true }) + }, + async getAuthType() { + return UserApi.getAuthType().then((ok) => { + return ok.data + }) + }, + async getQrType() { + return UserApi.getQrType().then((ok) => { + return ok.data + }) + }, + async getQrSource() { + return UserApi.getQrSource().then((ok) => { + return ok.data + }) + }, + async postUserLanguage(lang: string, loading?: Ref) { + return new Promise((resolve, reject) => { + UserApi.postLanguage({ language: lang }, loading) + .then(async (ok) => { + useLocalStorage(localeConfigKey, 'en-US').value = lang + window.location.reload() + resolve(ok) + }) + .catch((error) => { + reject(error) + }) + }) } } }) diff --git a/ui/src/styles/app.scss b/ui/src/styles/app.scss index 707d4fb2c41..8646670b07a 100644 --- a/ui/src/styles/app.scss +++ b/ui/src/styles/app.scss @@ -1,3 +1,12 @@ +@font-face { + font-family: AlibabaPuHuiTi; + src: + url('./font/AlibabaPuHuiTi-3-55-Regular.woff') format('woff'), + url('./font/AlibabaPuHuiTi-3-55-Regular.ttf') format('truetype'), + url('./font/AlibabaPuHuiTi-3-55-Regular.eot') format('eot'), + url('./font/AlibabaPuHuiTi-3-55-Regular.otf') format('opentype'), + url('./font/AlibabaPuHuiTi-3-55-Regular.woff2') format('woff2'); +} * { margin: 0; padding: 0; @@ -6,13 +15,13 @@ html { height: 100%; box-sizing: border-box; + font-size: 100%; } body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; - font-family: 'PingFang SC', 'Helvetica Neue', Helvetica, 'Hiragino Sans GB', 'Microsoft YaHei', - '微软雅黑', Arial, sans-serif; + font-family: 'PingFang SC', AlibabaPuHuiTi !important; font-size: 14px; font-style: normal; font-weight: 500; @@ -92,6 +101,11 @@ h4 { font-size: 16px; } +h5 { + font-size: 14px; + font-weight: 500; +} + .bold { font-weight: 600; } @@ -111,9 +125,18 @@ h4 { .w-240 { width: 240px; } +.w-280 { + width: 280px; +} .w-500 { width: 500px; } +.max-w-200 { + max-width: 200px; +} +.max-w-350 { + max-width: 350px; +} .mt-4 { margin-top: calc(var(--app-base-px) - 4px); @@ -131,6 +154,9 @@ h4 { .mt-20 { margin-top: calc(var(--app-base-px) * 2 + 4px); } +.mt-24 { + margin-top: calc(var(--app-base-px) * 3); +} .mb-4 { margin-bottom: calc(var(--app-base-px) - 4px); @@ -174,7 +200,9 @@ h4 { .mr-16 { margin-right: calc(var(--app-base-px) * 2); } - +.mr-24 { + margin-right: calc(var(--app-base-px) * 3); +} .p-8 { padding: var(--app-base-px); } @@ -184,6 +212,18 @@ h4 { .p-24 { padding: calc(var(--app-base-px) * 3); } +.p-8-12 { + padding: calc(var(--app-base-px)) calc(var(--app-base-px) + 4px); +} +.p-12-16 { + padding: calc(var(--app-base-px) + 4px) calc(var(--app-base-px) * 2); +} +.p-12-24 { + padding: calc(var(--app-base-px) + 4px) calc(var(--app-base-px) * 3); +} +.p-16-24 { + padding: calc(var(--app-base-px) * 2) calc(var(--app-base-px) * 3); +} .pt-0 { padding-top: 0; @@ -212,6 +252,12 @@ h4 { align-items: center; } +.flex-wrap { + display: flex; + flex-wrap: wrap; + align-content: space-between; +} + .align-center { align-items: center; } @@ -238,6 +284,10 @@ h4 { vertical-align: middle; } +.line-height-22 { + line-height: 22px; +} + .border { border: 1px solid var(--el-border-color); } @@ -259,10 +309,31 @@ h4 { .border-b-light { border-bottom: 1px solid var(--el-border-color-lighter); } +.border-r-4 { + border-radius: 4px; +} +.border-r-8 { + border-radius: 8px; +} + +.border-t-dashed { + border-top: 1px dashed var(--el-border-color); +} +.border-primary { + border: 1px solid var(--el-color-primary); + color: var(--el-color-primary); +} + +.border-none { + border: none; +} .cursor { cursor: pointer; } +.notAllowed { + cursor: not-allowed; +} /* 超出省略号 @@ -297,8 +368,8 @@ h4 { word-break: break-all; } -.pre-line { - white-space: pre-line; +.pre-wrap { + white-space: pre-wrap; } /* @@ -327,20 +398,15 @@ h4 { } } -.app-logo-font { - background: var(--app-logo-color); - background-clip: text; - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - font-family: Arial Black; - font-style: normal; - font-weight: 900; -} - /* tag */ .default-tag { - background: var(--tag-default-bg); - color: var(--tag-default-color); + background: var(--el-color-primary-light-8); + color: var(--el-color-primary); + border: none; +} +.danger-tag { + background: var(--tag-danger-bg); + color: #d03f3b; border: none; } .success-tag { @@ -354,12 +420,24 @@ h4 { border: none; } +.info-tag { + background: var(--app-text-color-light-1); + color: var(--app-text-color-secondary); + border: none; +} + .purple-tag { background: #f2ebfe; color: #7f3bf5; border-color: #e0d7f0; } +.blue-tag { + background: #ebf1ff; + color: #3370ff; + border-color: #d6e2ff; +} + /* card 无边框无阴影 灰色背景 */ @@ -374,7 +452,9 @@ h4 { .rotate-90 { transform: rotateZ(90deg); } - +.rotate-180 { + transform: rotateZ(180deg); +} /* 表格第一行插入自定义行 */ @@ -419,12 +499,22 @@ h4 { } .avatar-light { - background: var(--el-color-primary-light-4); + background: var(--el-color-primary-light-3); } .avatar-purple { background: #7f3bf5; } +.avatar-blue { + background: #3370ff; +} + +.avatar-green { + background: #34c724; +} +.avatar-grey { + background: #bbbfc4; +} .success { color: var(--el-color-success); @@ -446,6 +536,14 @@ h4 { color: var(--app-text-color-secondary); } +.layout-bg { + background: var(--app-layout-bg-color); +} + +.white-bg { + background: #ffffff; +} + .app-warning-icon { font-size: 16px; color: var(--app-text-color-secondary); @@ -510,7 +608,7 @@ h4 { .card__radio { width: 100%; display: block; - + line-height: 22px; .el-radio { white-space: break-spaces; width: 100%; @@ -535,15 +633,6 @@ h4 { } } -// AI模型选择:添加模型hover样式 -.select-model { - .el-select-dropdown__footer { - &:hover { - background-color: var(--el-fill-color-light); - } - } -} - // 段落card .paragraph-source-card { height: 210px; @@ -606,3 +695,99 @@ h4 { border: 1px solid var(--el-color-primary); } } + +.app-card { + background: #fff; + border-radius: 8px; + box-shadow: 0px 2px 4px 0px rgba(31, 35, 41, 0.12); +} + +.app-radio-button-group { + border: 1px solid var(--app-border-color-dark); + border-radius: var(--el-border-radius-base); + .el-radio-button { + padding: 3px; + } + .el-radio-button__inner { + border: none !important; + border-radius: var(--el-border-radius-base) !important; + padding: 5px 8px; + font-weight: 400; + } + .el-radio-button__original-radio:checked + .el-radio-button__inner { + color: var(--el-color-primary) !important; + background: var(--el-color-primary-light-9) !important; + border: none !important; + box-shadow: none !important; + font-weight: 500; + } +} + +// 自定义主题 +.custom-header { + background: var(--el-color-primary-light-9) !important; +} + +.edit-avatar { + position: relative; + .edit-mask { + position: absolute; + left: 0; + background: rgba(0, 0, 0, 0.4); + } +} + +.record-tip-confirm { + max-width: 800px !important; +} + +//企业微信 +.wwLogin_qrcode_head { + padding: 20px 0 !important; +} + +// 复合搜索框 +.complex-search { + border: 1px solid var(--el-border-color); + border-radius: var(--el-border-radius-base); + .el-select__wrapper { + box-shadow: none !important; + } + .el-input__wrapper { + box-shadow: none !important; + } + &__left { + border-right: 1px solid var(--el-border-color); + } +} + +@media only screen and (max-width: 768px) { + .responsive-dialog { + width: 90% !important; + } +} + +// 蓝色提示框 +.update-info { + background: #d6e2ff; + line-height: 25px; +} + +// 参数设置dialog +.param-dialog { + padding: 8px 8px 24px 8px; + .el-dialog__header { + padding: 16px 16px 0 16px; + } + .el-dialog__body { + padding: 0 !important; + } + .dialog-max-height { + height: 550px; + } + .custom-slider { + .el-input-number.is-without-controls .el-input__wrapper { + padding: 0 !important; + } + } +} diff --git a/ui/src/styles/element-plus.scss b/ui/src/styles/element-plus.scss index 8aa2ef73473..d1f067b18fd 100644 --- a/ui/src/styles/element-plus.scss +++ b/ui/src/styles/element-plus.scss @@ -1,11 +1,13 @@ :root { --el-color-primary: #3370ff; - --el-color-primary-light-9: rgba(51, 112, 255, 0.1); --el-menu-item-height: 45px; --el-box-shadow-light: 0px 2px 4px 0px rgba(31, 35, 41, 0.12); --el-border-color: #dee0e3; --el-text-color-regular: #1f2329; - --el-color-info: #8f959e; + --el-color-info: #8f959e !important; + --el-disabled-bg-color: #eff0f1 !important; + --el-text-color-primary: #1f2329; + --el-font-line-height-primary: 22px; } .el-button { @@ -29,6 +31,12 @@ border: none; } } +.el-button--text { + border: none !important; + &:focus { + border: none !important; + } +} .el-button--large { font-size: 16px; } @@ -54,6 +62,7 @@ } .el-form-item__label { font-weight: 400; + width: 100%; } .el-form-item__error { @@ -84,6 +93,7 @@ .el-dialog__header { padding-bottom: 24px; font-weight: 500; + margin-top: -5px; } .el-dialog__footer { padding-top: 0; @@ -94,6 +104,7 @@ } .el-message-box { --el-messagebox-font-size: 16px; + --el-messagebox-width: 475px; padding: 24px; .el-message-box__header { padding: 0; @@ -125,7 +136,7 @@ } .el-message-box__headerbtn { right: 10px; - top: 15px; + top: 16px; .el-message-box__close { font-size: 20px; } @@ -133,6 +144,7 @@ .el-card { --el-card-padding: calc(var(--app-base-px) * 2); + color: var(--el-text-color-regular); } .el-dropdown { color: var(--app-text-color); @@ -223,7 +235,7 @@ margin-right: calc(var(--app-base-px) + 4px); } .el-slider__input { - width: 60px; + width: 72px; } .input-with-select { @@ -256,6 +268,12 @@ } } +.el-select__wrapper.is-disabled { + background-color: var(--el-disabled-bg-color) !important; +} +.el-select__placeholder { + font-weight: 400; +} .el-select__placeholder.is-transparent { color: var(--app-input-color-placeholder); font-weight: 400; @@ -264,6 +282,9 @@ .el-select-group .el-select-dropdown__item { padding-left: 11px; } +.el-select-dropdown__item { + font-weight: 400; +} .el-select__caret { color: var(--app-text-color-secondary); @@ -275,6 +296,12 @@ padding: 0 14px; } +.el-tabs__nav-wrap:after { + height: 1px; +} +.el-tabs__active-bar { + height: 3px; +} .el-drawer { .el-drawer__header { padding: 16px 24px; @@ -324,6 +351,10 @@ color: var(--app-text-color); } +.radio_content .is-disabled { + background-color: var(--el-disabled-bg-color) !important; +} + .el-input-number.is-controls-right .el-input__wrapper { padding-left: 15px !important; padding-right: 42px !important; @@ -348,6 +379,22 @@ // radio 一行一个样式 .radio-block { + width: 100%; + display: inline-flex; + .el-radio { + align-items: flex-start; + height: 100%; + width: 100%; + margin-top: 8px; + } + .el-radio__label { + width: 100%; + margin-top: -8px; + line-height: 30px; + } +} +// radio 一行一个样式 有输入框 上传头像的内容 +.radio-block-avatar { width: 100%; display: block; .el-radio { @@ -361,3 +408,39 @@ line-height: 30px; } } + +// 提示横幅 +.el-alert__title { + color: var(--el-text-color-regular) !important; + font-weight: 400; +} +.el-alert--warning.is-light { + background-color: #ffe7cc; + .el-alert__icon { + color: #ff8800; + } +} +.el-alert--success.is-light { + background-color: #d6f4d3; + .el-alert__icon { + color: #34c724; + } +} +.el-alert--danger.is-light { + background-color: #fddbda; + .el-alert__icon { + color: #f54a45; + } +} + +.el-checkbox__input.is-checked + .el-checkbox__label { + color: var(--el-checkbox-text-color); +} + +.el-table .el-popper { + max-width: 500px !important; +} + +.el-tree { + --el-tree-node-content-height: 38px; +} diff --git a/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.eot b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.eot new file mode 100644 index 00000000000..82f27fd7891 Binary files /dev/null and b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.eot differ diff --git a/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.otf b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.otf new file mode 100644 index 00000000000..541e3c108c3 Binary files /dev/null and b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.otf differ diff --git a/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.ttf b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.ttf new file mode 100644 index 00000000000..a6eaf3613ee Binary files /dev/null and b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.ttf differ diff --git a/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.woff b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.woff new file mode 100644 index 00000000000..f5763765443 Binary files /dev/null and b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.woff differ diff --git a/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.woff2 b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.woff2 new file mode 100644 index 00000000000..4b574fbd884 Binary files /dev/null and b/ui/src/styles/font/AlibabaPuHuiTi-3-55-Regular.woff2 differ diff --git a/ui/src/styles/index.scss b/ui/src/styles/index.scss index 373b1d80baa..36251d3f805 100644 --- a/ui/src/styles/index.scss +++ b/ui/src/styles/index.scss @@ -3,6 +3,5 @@ @import './app.scss'; @import './element-plus.scss'; @import 'nprogress/nprogress.css'; -@import 'highlight.js/styles/default.css'; @import 'md-editor-v3/lib/style.css'; -@import './md-editor.scss'; \ No newline at end of file +@import './md-editor.scss'; diff --git a/ui/src/styles/md-editor.scss b/ui/src/styles/md-editor.scss index ce87022e1db..c60f51f4e96 100644 --- a/ui/src/styles/md-editor.scss +++ b/ui/src/styles/md-editor.scss @@ -1,7 +1,15 @@ +.md-editor { + font-weight: 400; +} + .md-editor-preview { padding: 0; margin: 0; font-size: inherit; + word-break: break-word; + table { + display: block; + } p { padding: 0 !important; } @@ -11,6 +19,15 @@ } img { border: 0 !important; + max-width: 360px !important; + } +} + +@media only screen and (max-width: 768px) { + .md-editor-preview { + img { + max-width: 100% !important; + } } } diff --git a/ui/src/styles/variables.scss b/ui/src/styles/variables.scss index ef092ca6aa1..f2c2a0adebf 100644 --- a/ui/src/styles/variables.scss +++ b/ui/src/styles/variables.scss @@ -9,13 +9,13 @@ --app-view-padding: 24px; --app-view-bg-color: #ffffff; --app-border-color-dark: #bbbfc4; - + --md-bk-hover-color:var(--el-border-color-hover); /** header 组件 */ --app-header-height: 56px; --app-header-padding: 0 20px; --app-header-bg-color: linear-gradient(90deg, #ebf1ff 24.34%, #e5fbf8 56.18%, #f2ebfe 90.18%); - --app-logo-color: linear-gradient(180deg, #3370ff 0%, #7f3bf5 100%); - --app-avatar-gradient-color: linear-gradient(270deg, #9258f7 0%, #3370ff 100%); + --app-logo-color: linear-gradient(180deg, #3370FF 0%, #7f3bf5 100%); + --app-avatar-gradient-color: linear-gradient(270deg, #9258f7 0%, #3370FF 100%); /* 计算高度 */ --app-main-height: calc(100vh - var(--app-header-height) - var(--app-view-padding) * 2 - 40px); @@ -30,10 +30,11 @@ --tag-success-color: #2ca91f; --tag-warning-bg: rgba(255, 136, 0, 0.2); --tag-warning-color: #d97400; + --tag-danger-bg: rgba(245, 74, 69, 0.2); /** card */ --card-width: 330px; - --card-min-height: 160px; + --card-min-height: 166px; --card-min-width: 220px; /** setting */ diff --git a/ui/src/theme/defaultInferData.ts b/ui/src/theme/defaultInferData.ts deleted file mode 100644 index acc50ce22d6..00000000000 --- a/ui/src/theme/defaultInferData.ts +++ /dev/null @@ -1,13 +0,0 @@ -import type { InferData } from "./type"; -const inferData: Array = [ - { - key: "primary", - value: "#3370FF", - }, - { key: "success", value: "#67c23a" }, - { key: "warning", value: "#e6a23c" }, - { key: "danger", value: "#f56c6c" }, - { key: "error", value: "#F54A45" }, - { key: "info", value: "#909399" }, -]; -export default inferData; diff --git a/ui/src/theme/defaultKeyValueData.ts b/ui/src/theme/defaultKeyValueData.ts deleted file mode 100644 index 694ad0f028d..00000000000 --- a/ui/src/theme/defaultKeyValueData.ts +++ /dev/null @@ -1,5 +0,0 @@ -import type { KeyValueData } from './type' -const keyValueData: KeyValueData = { - '--el-header-padding': '0px' -} -export default keyValueData diff --git a/ui/src/theme/index.ts b/ui/src/theme/index.ts deleted file mode 100644 index 451558c92ba..00000000000 --- a/ui/src/theme/index.ts +++ /dev/null @@ -1,281 +0,0 @@ -import type { - ThemeSetting, - InferData, - KeyValueData, - UpdateInferData, - UpdateKeyValueData -} from './type' -import { TinyColor } from '@ctrl/tinycolor' -// 引入默认推断数据 -import inferData from './defaultInferData' -// 引入默认keyValue数据 -import keyValueData from './defaultKeyValueData' -// 引入设置对象 -import setting from './setting' -import type { App } from 'vue' -declare global { - interface ChildNode { - innerText: string - } -} -class Theme { - /** - * 主题设置 - */ - themeSetting: ThemeSetting - /** - * 键值数据 - */ - keyValue: KeyValueData - /** - * 外推数据 - */ - inferData: Array - /** - *是否是第一次初始化 - */ - isFirstWriteStyle: boolean - /** - * 混色白 - */ - colorWhite: string - /** - * 混色黑 - */ - colorBlack: string - - constructor(themeSetting: ThemeSetting, keyValue: KeyValueData, inferData: Array) { - this.themeSetting = themeSetting - this.keyValue = keyValue - this.inferData = inferData - this.isFirstWriteStyle = true - this.colorWhite = '#ffffff' - this.colorBlack = '#000000' - this.initDefaultTheme() - } - - /** - * 拼接 - * @param setting 主题设置 - * @param names 需要拼接的所有值 - * @returns 拼接后的数据 - */ - getVarName = (setting: ThemeSetting, ...names: Array) => { - return ( - setting.startDivision + setting.namespace + setting.division + names.join(setting.division) - ) - } - - /** - * 转换外推数据 - * @param setting 主题设置对象 - * @param inferData 外推数据 - * @returns - */ - mapInferMainStyle = (setting: ThemeSetting, inferData: InferData) => { - const key: string = this.getVarName( - setting, - inferData.setting ? inferData.setting.type : setting.colorInferSetting.type, - inferData.key - ) - return { - [key]: inferData.value, - ...this.mapInferDataStyle(setting, inferData) - } - } - /** - * 转换外推数据 - * @param setting 设置 - * @param inferData 外推数据 - */ - mapInferData = (setting: ThemeSetting, inferData: Array) => { - return inferData - .map((itemData) => { - return this.mapInferMainStyle(setting, itemData) - }) - .reduce((pre, next) => { - return { ...pre, ...next } - }, {}) - } - /** - * 转换外推数据 - * @param setting 主题设置对象 - * @param inferData 外推数据 - * @returns - */ - mapInferDataStyle = (setting: ThemeSetting, inferData: InferData) => { - const inferSetting = inferData.setting ? inferData.setting : setting.colorInferSetting - if (inferSetting.type === 'color') { - return Object.keys(inferSetting) - .map((key: string) => { - if (key === 'light' || key === 'dark') { - return inferSetting[key] - .map((l: any) => { - const varName = this.getVarName( - setting, - inferSetting.type, - inferData.key, - key, - l.toString() - ) - return { - [varName]: new TinyColor(inferData.value) - .mix(key === 'light' ? this.colorWhite : this.colorBlack, l * 10) - .toHexString() - } - }) - .reduce((pre: any, next: any) => { - return { ...pre, ...next } - }, {}) - } - return {} - }) - .reduce((pre, next) => { - return { ...pre, ...next } - }, {}) - } - return {} - } - - /** - * - * @param themeSetting 主题设置 - * @param keyValueData 键值数据 - * @returns 映射后的键值数据 - */ - mapKeyValue = (themeSetting: ThemeSetting, keyValueData: KeyValueData) => { - return Object.keys(keyValueData) - .map((key: string) => { - return { - [this.updateKeyBySetting(key, themeSetting)]: keyValueData[key] - } - }) - .reduce((pre, next) => { - return { ...pre, ...next } - }, {}) - } - /** - * 根据配置文件修改Key - * @param key key - * @param themeSetting 主题设置 - * @returns - */ - updateKeyBySetting = (key: string, themeSetting: ThemeSetting) => { - return key.startsWith(themeSetting.startDivision) - ? key - : key.startsWith(themeSetting.namespace) - ? themeSetting.startDivision + key - : key.startsWith(themeSetting.division) - ? themeSetting.startDivision + themeSetting.namespace - : themeSetting.startDivision + themeSetting.namespace + themeSetting.division + key - } - /** - * - * @param setting 主题设置 - * @param keyValue 主题键值对数据 - * @param inferData 外推数据 - * @returns 合并后的键值对数据 - */ - tokeyValueStyle = () => { - return { - ...this.mapInferData(this.themeSetting, this.inferData), - ...this.mapKeyValue(this.themeSetting, this.keyValue) - } - } - - /** - * 将keyValue对象转换为S - * @param keyValue - * @returns - */ - toString = (keyValue: KeyValueData) => { - const inner = Object.keys(keyValue) - .map((key: string) => { - return key + ':' + keyValue[key] + ';' - }) - .join('') - return `@charset "UTF-8";:root{${inner}}` - } - - /** - * - * @param elNewStyle 新的变量样式 - */ - writeNewStyle = (elNewStyle: string) => { - if (this.isFirstWriteStyle) { - const style = document.createElement('style') - style.innerText = elNewStyle - document.head.appendChild(style) - this.isFirstWriteStyle = false - } else { - if (document.head.lastChild) { - document.head.lastChild.innerText = elNewStyle - } - } - } - - /** - * 修改数据并且写入dom - * @param updateInferData 平滑数据修改 - * @param updateKeyvalueData keyValue数据修改 - */ - updateWrite = (updateInferData?: UpdateInferData, updateKeyvalueData?: UpdateKeyValueData) => { - this.update(updateInferData, updateKeyvalueData) - const newStyle = this.tokeyValueStyle() - const newStyleString = this.toString(newStyle) - this.writeNewStyle(newStyleString) - } - - /** - * 修改数据 - * @param inferData - * @param keyvalueData - */ - update = (updateInferData?: UpdateInferData, updateKeyvalueData?: UpdateKeyValueData) => { - if (updateInferData) { - this.updateInferData(updateInferData) - } - if (updateKeyvalueData) { - this.updateOrCreateKeyValueData(updateKeyvalueData) - } - } - - /** - * 修改外推数据 外推数据只能修改,不能新增 - * @param inferData - */ - updateInferData = (updateInferData: UpdateInferData) => { - Object.keys(updateInferData).forEach((key) => { - const findInfer = this.inferData.find((itemInfer) => { - return itemInfer.key === key - }) - if (findInfer) { - findInfer.value = updateInferData[key] - } else { - this.inferData.push({ key, value: updateInferData[key] }) - } - }) - } - - /** - * 初始化默认主题 - */ - initDefaultTheme = () => { - this.updateWrite() - } - /** - * 修改KeyValue数据 - * @param keyvalueData keyValue数据 - */ - updateOrCreateKeyValueData = (updateKeyvalueData: UpdateKeyValueData) => { - Object.keys(updateKeyvalueData).forEach((key) => { - const newKey = this.updateKeyBySetting(key, this.themeSetting) - this.keyValue[newKey] = updateKeyvalueData[newKey] - }) - } -} - -const install = (app: App) => { - app.config.globalProperties.theme = new Theme(setting, keyValueData, inferData) -} -export default { install } diff --git a/ui/src/theme/setting.ts b/ui/src/theme/setting.ts deleted file mode 100644 index f801463fb9f..00000000000 --- a/ui/src/theme/setting.ts +++ /dev/null @@ -1,12 +0,0 @@ -import type { ThemeSetting } from "./type"; -const setting: ThemeSetting = { - namespace: "el", - division: "-", - startDivision: "--", - colorInferSetting: { - light: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], - dark: [2], - type: "color", - }, -}; -export default setting; diff --git a/ui/src/theme/type.ts b/ui/src/theme/type.ts deleted file mode 100644 index 76e9ffb226f..00000000000 --- a/ui/src/theme/type.ts +++ /dev/null @@ -1,71 +0,0 @@ -interface ThemeSetting { - /** - *element-ui Namespace - */ - namespace: string; - /** - * 数据分隔符 - */ - division: string; - /** - * 前缀 - */ - startDivision: string; - /** - * 颜色外推设置 - */ - colorInferSetting: ColorInferSetting; -} - -/** - * 颜色混和设置 - */ -interface ColorInferSetting { - /** - * 与白色混 - */ - light: Array; - /** - * 与黑色混 - */ - dark: Array; - /** - * 类型 - */ - type: string; -} - -/** - * 平滑数据 - */ -interface KeyValueData { - [propName: string]: string; -} -type UpdateInferData = KeyValueData; - -type UpdateKeyValueData = KeyValueData; -/** - *平滑数据 - */ -interface InferData { - /** - * 设置 - */ - setting?: ColorInferSetting | any; - /** - * 健 - */ - key: string; - /** - * 值 - */ - value: string; -} - -export type { - KeyValueData, - InferData, - ThemeSetting, - UpdateInferData, - UpdateKeyValueData, -}; diff --git a/ui/src/utils/application.ts b/ui/src/utils/application.ts index b8891735659..fd7d0a201bc 100644 --- a/ui/src/utils/application.ts +++ b/ui/src/utils/application.ts @@ -1,6 +1,10 @@ export const defaultIcon = '/ui/favicon.ico' // 是否显示字母 / icon -export function isAppIcon(url: string | undefined) { +export function isAppIcon(url: String | undefined) { return url === defaultIcon ? '' : url } + +export function isWorkFlow(type: string | undefined) { + return type === 'WORK_FLOW' +} diff --git a/ui/src/utils/clipboard.ts b/ui/src/utils/clipboard.ts index bde5e36303f..7ecd4f414ae 100644 --- a/ui/src/utils/clipboard.ts +++ b/ui/src/utils/clipboard.ts @@ -1,5 +1,6 @@ import Clipboard from 'vue-clipboard3' import { MsgSuccess, MsgError } from '@/utils/message' +import { t } from '@/locales' /* 复制粘贴 */ @@ -7,9 +8,9 @@ export async function copyClick(info: string) { const { toClipboard } = Clipboard() try { await toClipboard(info) - MsgSuccess('复制成功') + MsgSuccess(t('common.copySuccess')) } catch (e) { console.error(e) - MsgError('复制失败') + MsgError(t('common.copyError')) } } diff --git a/ui/src/utils/message.ts b/ui/src/utils/message.ts index f762d5a8e3c..fb20c5c4719 100644 --- a/ui/src/utils/message.ts +++ b/ui/src/utils/message.ts @@ -1,13 +1,12 @@ -import { h } from 'vue' -import { ElMessageBox, ElMessage, ElIcon } from 'element-plus' -import { WarningFilled } from '@element-plus/icons-vue' +import { ElMessageBox, ElMessage } from 'element-plus' +import { t } from '@/locales' export const MsgSuccess = (message: string) => { ElMessage.success({ message: message, type: 'success', showClose: true, - duration: 1500 + duration: 3000 }) } @@ -16,7 +15,7 @@ export const MsgInfo = (message: string) => { message: message, type: 'info', showClose: true, - duration: 1500 + duration: 3000 }) } @@ -25,7 +24,7 @@ export const MsgWarning = (message: string) => { message: message, type: 'warning', showClose: true, - duration: 1500 + duration: 3000 }) } @@ -34,10 +33,18 @@ export const MsgError = (message: string) => { message: message, type: 'error', showClose: true, - duration: 1500 + duration: 3000 }) } +export const MsgAlert = (title: string, description: string, options?: any) => { + const defaultOptions: Object = { + confirmButtonText: t('common.confirm'), + ...options + } + return ElMessageBox.alert(description, title, defaultOptions) +} + /** * 删除知识库 * @param 参数 message: {title, description,type} @@ -46,27 +53,9 @@ export const MsgError = (message: string) => { export const MsgConfirm = (title: string, description: string, options?: any) => { const defaultOptions: Object = { showCancelButton: true, - confirmButtonText: '确定', - cancelButtonText: '取消', + confirmButtonText: t('common.confirm'), + cancelButtonText: t('common.cancel'), ...options } return ElMessageBox.confirm(description, title, defaultOptions) } - -// export const MsgConfirm = ({ title, description }: any, options?: any) => { -// const message: any = h('div', { class: 'app-confirm' }, [ -// h('h4', { class: 'app-confirm-title flex align-center' }, [ -// h(ElIcon, { class: 'icon' }, [h(WarningFilled)]), -// h('span', { class: 'ml-16' }, title) -// ]), -// h('div', { class: 'app-confirm-description mt-8' }, description) -// ]) - -// const defaultOptions: Object = { -// showCancelButton: true, -// confirmButtonText: '确定', -// cancelButtonText: '取消', -// ...options -// } -// return ElMessageBox({ message, ...defaultOptions }) -// } diff --git a/ui/src/utils/status.ts b/ui/src/utils/status.ts new file mode 100644 index 00000000000..16f98599e5f --- /dev/null +++ b/ui/src/utils/status.ts @@ -0,0 +1,68 @@ +import { type Dict } from '@/api/type/common' +interface TaskTypeInterface { + // 向量化 + EMBEDDING: number + // 生成问题 + GENERATE_PROBLEM: number + // 同步 + SYNC: number +} +interface StateInterface { + // 等待 + PENDING: '0' + // 执行中 + STARTED: '1' + // 成功 + SUCCESS: '2' + // 失败 + FAILURE: '3' + // 取消任务 + REVOKE: '4' + // 取消成功 + REVOKED: '5' + IGNORED: 'n' +} +const TaskType: TaskTypeInterface = { + EMBEDDING: 1, + GENERATE_PROBLEM: 2, + SYNC: 3 +} +const State: StateInterface = { + // 等待 + PENDING: '0', + // 执行中 + STARTED: '1', + // 成功 + SUCCESS: '2', + // 失败 + FAILURE: '3', + // 取消任务 + REVOKE: '4', + // 取消成功 + REVOKED: '5', + IGNORED: 'n' +} +class Status { + task_status: Dict + constructor(status?: string) { + if (!status) { + status = '' + } + status = status.split('').reverse().join('') + this.task_status = {} + for (let key in TaskType) { + const value = TaskType[key as keyof TaskTypeInterface] + const index = value - 1 + this.task_status[value] = status[index] ? status[index] : 'n' + } + } + toString() { + const r = [] + for (let key in TaskType) { + const value = TaskType[key as keyof TaskTypeInterface] + r.push(this.task_status[value]) + } + return r.reverse().join('') + } +} +export { Status, State, TaskType, type TaskTypeInterface, type StateInterface } diff --git a/ui/src/utils/theme.ts b/ui/src/utils/theme.ts new file mode 100644 index 00000000000..c1e6ce5d172 --- /dev/null +++ b/ui/src/utils/theme.ts @@ -0,0 +1,64 @@ +import { t } from '@/locales' + +export const themeList = [ + { + label: t('views.system.theme.default'), + value: '#3370FF', + loginBackground: 'default' + }, + { + label: t('views.system.theme.orange'), + value: '#FF8800', + loginBackground: 'orange' + }, + { + label: t('views.system.theme.green'), + value: '#00B69D', + loginBackground: 'green' + }, + { + label: t('views.system.theme.purple'), + value: '#7F3BF5', + loginBackground: 'purple' + }, + { + label: t('views.system.theme.red'), + value: '#F01D94', + loginBackground: 'red' + } +] + +export function getThemeImg(val: string) { + return themeList.filter((v) => v.value === val)?.[0]?.loginBackground || 'default' +} + +export const defaultSetting = { + icon: '', + loginLogo: '', + loginImage: '', + title: 'MaxKB', + slogan: t('views.system.theme.defaultSlogan') +} + +export const defaultPlatformSetting = { + showUserManual: true, + userManualUrl: t('layout.userManualUrl'), + showForum: true, + forumUrl: t('layout.forumUrl'), + showProject: true, + projectUrl: 'https://github.com/1Panel-dev/MaxKB' +} + +export function hexToRgba(hex?: string, alpha?: number) { + // 将16进制颜色值的两个字符一起转换成十进制 + if (!hex) { + return '' + } else { + const r = parseInt(hex.slice(1, 3), 16) + const g = parseInt(hex.slice(3, 5), 16) + const b = parseInt(hex.slice(5, 7), 16) + + // 返回RGBA格式的字符串 + return `rgba(${r}, ${g}, ${b}, ${alpha})` + } +} diff --git a/ui/src/utils/time.ts b/ui/src/utils/time.ts index fcbc86a0510..7100351efd9 100644 --- a/ui/src/utils/time.ts +++ b/ui/src/utils/time.ts @@ -1,5 +1,7 @@ import moment from 'moment' - +import 'moment/dist/locale/zh-cn' +moment.locale('zh-cn') +import { t } from '@/locales' // 当天日期 YYYY-MM-DD export const nowDate = moment().format('YYYY-MM-DD') @@ -38,3 +40,41 @@ export const dateFormat = (timestamp: any) => { return `${y}-${m}-${d}` } + +export function fromNowDate(time: any) { + // 拿到当前时间戳和发布时的时间戳,然后得出时间戳差 + const curTime = new Date() + const futureTime = new Date(time) + const timeDiff = futureTime.getTime() - curTime.getTime() + + // 单位换算 + const min = 60 * 1000 + const hour = min * 60 + const day = hour * 24 + const week = day * 7 + + // 计算发布时间距离当前时间的周、天、时、分 + const exceedWeek = Math.floor(timeDiff / week) + const exceedDay = Math.floor(timeDiff / day) + const exceedHour = Math.floor(timeDiff / hour) + const exceedMin = Math.floor(timeDiff / min) + + // 最后判断时间差到底是属于哪个区间,然后return + if (exceedWeek > 0) { + return '' + } else { + if (exceedDay < 7 && exceedDay > 0) { + return exceedDay + t('layout.time.daysLater') + } else { + if (exceedHour < 24 && exceedHour > 0) { + return exceedHour + t('layout.time.hoursLater') + } else { + if (exceedMin < 0) { + return t('layout.time.expired') + } else { + return t('layout.time.expiringSoon') + } + } + } + } +} diff --git a/ui/src/utils/utils.ts b/ui/src/utils/utils.ts index 027b1a67bd4..44e68895c7f 100644 --- a/ui/src/utils/utils.ts +++ b/ui/src/utils/utils.ts @@ -1,3 +1,5 @@ +import { MsgError } from '@/utils/message' + export function toThousands(num: any) { return num?.toString().replace(/\d+/, function (n: any) { return n.replace(/(\d)(?=(?:\d{3})+$)/g, '$1,') @@ -37,14 +39,21 @@ export function fileType(name: string) { /* 获得文件对应图片 */ +const typeList: any = { + txt: ['txt', 'pdf', 'docx', 'md', 'html', 'zip', 'xlsx', 'xls', 'csv'], + table: ['xlsx', 'xls', 'csv'], + QA: ['xlsx', 'csv', 'xls', 'zip'] +} + export function getImgUrl(name: string) { - const type = isRightType(name) ? fileType(name) : 'unknow' - return new URL(`../assets/${type}-icon.svg`, import.meta.url).href + const list = Object.values(typeList).flat() + + const type = list.includes(fileType(name).toLowerCase()) ? fileType(name).toLowerCase() : 'unknown' + return new URL(`../assets/fileType/${type}-icon.svg`, import.meta.url).href } // 是否是白名单后缀 -export function isRightType(name: string) { - const typeList = ['txt', 'pdf', 'docx', 'csv', 'md'] - return typeList.includes(fileType(name)) +export function isRightType(name: string, type: string) { + return typeList[type].includes(fileType(name).toLowerCase()) } /* @@ -56,7 +65,7 @@ export function relatedObject(list: any, val: any, attr: string) { } // 排序 -export function arraySort(list: Array, property: any, desc?: boolean) { +export function arraySort(list: Array, property: any, desc?: boolean) { return list.sort((a: any, b: any) => { return desc ? b[property] - a[property] : a[property] - b[property] }) @@ -81,3 +90,78 @@ export function getAttrsArray(array: Array, attr: string) { export function getSum(array: Array) { return array.reduce((total, item) => total + item, 0) } + +// 下载 +export function downloadByURL(url: string, name: string) { + const a = document.createElement('a') + a.setAttribute('href', url) + a.setAttribute('target', '_blank') + a.setAttribute('download', name) + document.body.appendChild(a) + a.click() + document.body.removeChild(a) +} + +// 截取文件名 +export function cutFilename(filename: string, num: number) { + const lastIndex = filename.lastIndexOf('.') + const suffix = lastIndex === -1 ? '' : filename.substring(lastIndex + 1) + return filename.substring(0, num - suffix.length - 1) + '.' + suffix +} + +export function getNormalizedUrl(url: string) { + if (url && !url.endsWith('/') && !/\.[^/]+$/.test(url)) { + return url + '/' + } + return url +} + +interface LoadScriptOptions { + jsId?: string // 自定义脚本 ID + forceReload?: boolean // 是否强制重新加载(默认 false) +} + +export const loadScript = (url: string, options: LoadScriptOptions = {}): Promise => { + const { jsId, forceReload = false } = options + const scriptId = jsId || `script-${btoa(url).slice(0, 12)}` // 生成唯一 ID + + return new Promise((resolve, reject) => { + // 检查是否已存在且无需强制加载 + const existingScript = document.getElementById(scriptId) as HTMLScriptElement | null + if (existingScript && !forceReload) { + if (existingScript.src === url) { + existingScript.onload = () => resolve() // 复用现有脚本 + return + } + // URL 不同则移除旧脚本 + existingScript.parentElement?.removeChild(existingScript) + } + + // 创建新脚本 + const script = document.createElement('script') + script.id = scriptId + script.src = url + script.async = true // 明确启用异步加载 + + // 成功回调 + script.onload = () => { + resolve() + } + + // 错误处理(兼容性增强) + script.onerror = () => { + reject(new Error(`Failed to load script: ${url}`)) + cleanupScript(script) + } + + // 插入到 确保加载顺序 + document.head.appendChild(script) + }) +} + +// 清理脚本(可选) +const cleanupScript = (script: HTMLScriptElement) => { + script.onload = null + script.onerror = null + script.parentElement?.removeChild(script) +} diff --git a/ui/src/views/404/index.vue b/ui/src/views/404/index.vue index 4c7350ca113..bb00f107e46 100644 --- a/ui/src/views/404/index.vue +++ b/ui/src/views/404/index.vue @@ -2,10 +2,10 @@ -
404
-
很抱歉,无法访问应用!
+
{{ $t('views.notFound.title')}}
+
{{ $t('views.notFound.message') }}
- +
@@ -42,6 +42,7 @@ const router = useRouter() } } } + @media only screen and (max-width: 1000px) { .not-found-container .message-container { text-align: center; diff --git a/ui/src/views/application-overview/component/APIKeyDialog.vue b/ui/src/views/application-overview/component/APIKeyDialog.vue index 8a887d9110b..dc8c9aa37ee 100644 --- a/ui/src/views/application-overview/component/APIKeyDialog.vue +++ b/ui/src/views/application-overview/component/APIKeyDialog.vue @@ -1,7 +1,16 @@