`
-
-const getChatContainerHtml=(protocol,host,token)=>{
+
+const getChatContainerHtml=(protocol,host,token,query)=>{
return `
-
-
+
+
`
}
/**
* 初始化引导
- * @param {*} root
+ * @param {*} root
*/
const initGuide=(root)=>{
root.insertAdjacentHTML("beforeend",guideHtml)
@@ -84,16 +62,32 @@ const initChat=(root)=>{
// 添加对话icon
root.insertAdjacentHTML("beforeend",chatButtonHtml)
// 添加对话框
- root.insertAdjacentHTML('beforeend',getChatContainerHtml('{{protocol}}','{{host}}','{{token}}'))
+ root.insertAdjacentHTML('beforeend',getChatContainerHtml('{{protocol}}','{{host}}','{{token}}','{{query}}'))
// 按钮元素
const chat_button=root.querySelector('.maxkb-chat-button')
+ const chat_button_img=root.querySelector('.maxkb-chat-button > img')
// 对话框元素
const chat_container=root.querySelector('#maxkb-chat-container')
+ // 引导层
+ const mask_content = root.querySelector('.maxkb-mask > .maxkb-content')
+ const mask_tips = root.querySelector('.maxkb-tips')
+ chat_button_img.onload=(event)=>{
+ if(mask_content){
+ mask_content.style.width = chat_button_img.width + 'px'
+ mask_content.style.height = chat_button_img.height + 'px'
+ if('{{x_type}}'=='left'){
+ mask_tips.style.marginLeft = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px'
+ }else{
+ mask_tips.style.marginRight = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px'
+ }
+ }
+ }
const viewport=root.querySelector('.maxkb-openviewport')
const closeviewport=root.querySelector('.maxkb-closeviewport')
const close_func=()=>{
chat_container.style['display']=chat_container.style['display']=='block'?'none':'block'
+ chat_button.style['display']=chat_container.style['display']=='block'?'none':'block'
}
close_icon=chat_container.querySelector('.maxkb-chat-close')
chat_button.onclick = close_func
@@ -108,6 +102,26 @@ const initChat=(root)=>{
viewport.classList.add('maxkb-viewportnone')
closeviewport.classList.remove('maxkb-viewportnone')
}
+ }
+ const drag=(e)=>{
+ if (['touchmove','touchstart'].includes(e.type)) {
+ chat_button.style.top=(e.touches[0].clientY-chat_button_img.naturalHeight/2)+'px'
+ chat_button.style.left=(e.touches[0].clientX-chat_button_img.naturalWidth/2)+'px'
+ } else {
+ chat_button.style.top=(e.y-chat_button_img.naturalHeight/2)+'px'
+ chat_button.style.left=(e.x-chat_button_img.naturalWidth/2)+'px'
+ }
+ chat_button.style.width =chat_button_img.naturalWidth+'px'
+ chat_button.style.height =chat_button_img.naturalHeight+'px'
+ }
+ if({{is_draggable}}){
+ chat_button.addEventListener("drag",drag)
+ chat_button.addEventListener("dragover",(e)=>{
+ e.preventDefault()
+ })
+ chat_button.addEventListener("dragend",drag)
+ chat_button.addEventListener("touchstart",drag)
+ chat_button.addEventListener("touchmove",drag)
}
viewport.onclick=viewport_func
closeviewport.onclick=viewport_func
@@ -118,20 +132,21 @@ const initChat=(root)=>{
function initMaxkb(){
const maxkb=document.createElement('div')
const root=document.createElement('div')
- root.id="maxkb"
- initMaxkbStyle(maxkb)
+ const maxkbId = 'maxkb-'+'{{max_kb_id}}'
+ root.id=maxkbId
+ initMaxkbStyle(maxkb, maxkbId)
maxkb.appendChild(root)
document.body.appendChild(maxkb)
const maxkbMaskTip=localStorage.getItem('maxkbMaskTip')
- if(maxkbMaskTip==null){
+ if(maxkbMaskTip==null && {{show_guide}}){
initGuide(root)
}
initChat(root)
}
-
+
// 初始化全局样式
-function initMaxkbStyle(root){
+function initMaxkbStyle(root, maxkbId){
style=document.createElement('style')
style.type='text/css'
style.innerText= `
@@ -155,7 +170,7 @@ function initMaxkbStyle(root){
#maxkb .maxkb-mask {
position: fixed;
- z-index: 999;
+ z-index: 10001;
background-color: transparent;
height: 100%;
width: 100%;
@@ -163,25 +178,24 @@ function initMaxkbStyle(root){
left: 0;
}
#maxkb .maxkb-mask .maxkb-content {
- width: 45px;
- height: 48px;
- box-shadow: 1px 1px 1px 2000px rgba(0,0,0,.6);
- border-radius: 50% 0 0 50%;
+ width: 64px;
+ height: 64px;
+ box-shadow: 1px 1px 1px 9999px rgba(0,0,0,.6);
position: absolute;
- right: 0;
- bottom: 38px;
- z-index: 1000;
+ {{x_type}}: {{x_value}}px;
+ {{y_type}}: {{y_value}}px;
+ z-index: 10001;
}
#maxkb .maxkb-tips {
position: fixed;
- bottom: 30px;
- right: 60px;
+ {{x_type}}:calc({{x_value}}px + 75px);
+ {{y_type}}: calc({{y_value}}px + 0px);
padding: 22px 24px 24px;
border-radius: 6px;
color: #ffffff;
font-size: 14px;
background: #3370FF;
- z-index: 1000;
+ z-index: 10001;
}
#maxkb .maxkb-tips .maxkb-arrow {
position: absolute;
@@ -192,8 +206,8 @@ function initMaxkbStyle(root){
transform: rotate(45deg);
box-sizing: border-box;
/* left */
- right: -5px;
- bottom: 33px;
+ {{x_type}}: -5px;
+ {{y_type}}: 33px;
border-left-color: transparent;
border-bottom-color: transparent
}
@@ -226,7 +240,7 @@ function initMaxkbStyle(root){
}
#maxkb-chat-container {
- width: 420px;
+ width: 450px;
height: 600px;
display:none;
}
@@ -240,9 +254,10 @@ function initMaxkbStyle(root){
#maxkb .maxkb-chat-button{
position: fixed;
- bottom: 30px;
- right: 0;
+ {{x_type}}: {{x_value}}px;
+ {{y_type}}: {{y_value}}px;
cursor: pointer;
+ z-index:10000;
}
#maxkb #maxkb-chat-container{
z-index:10000;position: relative;
@@ -250,15 +265,16 @@ function initMaxkbStyle(root){
border: 1px solid #ffffff;
background: linear-gradient(188deg, rgba(235, 241, 255, 0.20) 39.6%, rgba(231, 249, 255, 0.20) 94.3%), #EFF0F1;
box-shadow: 0px 4px 8px 0px rgba(31, 35, 41, 0.10);
- position: fixed;bottom: 20px;right: 45px;overflow: hidden;
+ position: fixed;bottom: 16px;right: 16px;overflow: hidden;
}
#maxkb #maxkb-chat-container .maxkb-operate{
- top: 15px;
- right: 10px;
+ top: 18px;
+ right: 15px;
position: absolute;
display: flex;
align-items: center;
+ line-height: 18px;
}
#maxkb #maxkb-chat-container .maxkb-operate .maxkb-chat-close{
margin-left:15px;
@@ -292,6 +308,7 @@ function initMaxkbStyle(root){
height: 600px;
}
}`
+ .replaceAll('#maxkb ',`#${maxkbId} `)
root.appendChild(style)
}
@@ -304,4 +321,5 @@ function embedChatbot() {
initMaxkb()
} else console.error('invalid parameter')
}
-window.onload = embedChatbot
+window.addEventListener('load',embedChatbot)
+})();
\ No newline at end of file
diff --git a/apps/application/urls.py b/apps/application/urls.py
index 30866c81a22..b294289541e 100644
--- a/apps/application/urls.py
+++ b/apps/application/urls.py
@@ -5,10 +5,14 @@
app_name = "application"
urlpatterns = [
path('application', views.Application.as_view(), name="application"),
- path('application/profile', views.Application.Profile.as_view()),
+ path('application/import', views.Application.Import.as_view()),
+ path('application/profile', views.Application.Profile.as_view(), name='application/profile'),
path('application/embed', views.Application.Embed.as_view()),
path('application/authentication', views.Application.Authentication.as_view()),
+ path('application/mcp_servers', views.Application.McpServers.as_view()),
+ path('application/
/publish', views.Application.Publish.as_view()),
path('application//edit_icon', views.Application.EditIcon.as_view()),
+ path('application//export', views.Application.Export.as_view()),
path('application//statistics/customer_count',
views.ApplicationStatistics.CustomerCount.as_view()),
path('application//statistics/customer_count_trend',
@@ -18,6 +22,14 @@
path('application//statistics/chat_record_aggregate_trend',
views.ApplicationStatistics.ChatRecordAggregateTrend.as_view()),
path('application//model', views.Application.Model.as_view()),
+ path('application//function_lib', views.Application.FunctionLib.as_view()),
+ path('application//function_lib/',
+ views.Application.FunctionLib.Operate.as_view()),
+ path('application//application', views.Application.Application.as_view()),
+ path('application//application/',
+ views.Application.Application.Operate.as_view()),
+ path('application//model_params_form/',
+ views.Application.ModelParamsForm.as_view()),
path('application//hit_test', views.Application.HitTest.as_view()),
path('application//api_key', views.Application.ApplicationKey.as_view()),
path("application//api_key/",
@@ -28,13 +40,21 @@
path('application//access_token', views.Application.AccessToken.as_view(),
name='application/access_token'),
path('application//', views.Application.Page.as_view(), name='application_page'),
- path('application//chat/open', views.ChatView.Open.as_view()),
+ path('application//chat/open', views.ChatView.Open.as_view(), name='application/open'),
path("application/chat/open", views.ChatView.OpenTemp.as_view()),
+ path("application/chat_workflow/open", views.ChatView.OpenWorkFlowTemp.as_view()),
+ path("application//chat/client//",
+ views.ChatView.ClientChatHistoryPage.as_view()),
+ path("application//chat/client/",
+ views.ChatView.ClientChatHistoryPage.Operate.as_view()),
path('application//chat/export', views.ChatView.Export.as_view(), name='export'),
+ path('application//chat/completions', views.Openai.as_view(),
+ name='application/chat_completions'),
path('application//chat', views.ChatView.as_view(), name='chats'),
path('application//chat//', views.ChatView.Page.as_view()),
path('application//chat/', views.ChatView.Operate.as_view()),
path('application//chat//chat_record/', views.ChatView.ChatRecord.as_view()),
+ path('application//chat//upload_file', views.ChatView.UploadFile.as_view()),
path('application//chat//chat_record//',
views.ChatView.ChatRecord.Page.as_view()),
path('application//chat//chat_record/',
@@ -46,11 +66,27 @@
'application//chat//chat_record//dataset//document_id//improve',
views.ChatView.ChatRecord.Improve.as_view(),
name=''),
+ path(
+ 'application//dataset//improve',
+ views.ChatView.ChatRecord.Improve.as_view(),
+ name=''),
path('application//chat//chat_record//improve',
views.ChatView.ChatRecord.ChatRecordImprove.as_view()),
- path('application/chat_message/', views.ChatView.Message.as_view()),
+ path('application/chat_message/', views.ChatView.Message.as_view(), name='application/message'),
path(
'application//chat//chat_record//dataset//document_id//improve/',
views.ChatView.ChatRecord.Improve.Operate.as_view(),
- name='')
+ name=''),
+ path('application//speech_to_text', views.Application.SpeechToText.as_view(),
+ name='application/audio'),
+ path('application//text_to_speech', views.Application.TextToSpeech.as_view(),
+ name='application/audio'),
+ path('application//work_flow_version', views.ApplicationVersionView.as_view()),
+ path('application//work_flow_version//',
+ views.ApplicationVersionView.Page.as_view()),
+ path('application//work_flow_version/',
+ views.ApplicationVersionView.Operate.as_view()),
+ path('application//play_demo_text', views.Application.PlayDemoText.as_view(),
+ name='application/audio')
+
]
diff --git a/apps/application/views/__init__.py b/apps/application/views/__init__.py
index 52d004041eb..24569c17e3b 100644
--- a/apps/application/views/__init__.py
+++ b/apps/application/views/__init__.py
@@ -8,3 +8,4 @@
"""
from .application_views import *
from .chat_views import *
+from .application_version_views import *
diff --git a/apps/application/views/application_version_views.py b/apps/application/views/application_version_views.py
new file mode 100644
index 00000000000..de900936268
--- /dev/null
+++ b/apps/application/views/application_version_views.py
@@ -0,0 +1,94 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: application_version_views.py
+ @date:2024/10/15 16:49
+ @desc:
+"""
+from drf_yasg.utils import swagger_auto_schema
+from rest_framework.decorators import action
+from rest_framework.request import Request
+from rest_framework.views import APIView
+
+from application.serializers.application_version_serializers import ApplicationVersionSerializer
+from application.swagger_api.application_version_api import ApplicationVersionApi
+from application.views import get_application_operation_object
+from common.auth import has_permissions, TokenAuth
+from common.constants.permission_constants import PermissionConstants, CompareConstants, ViewPermission, RoleConstants, \
+ Permission, Group, Operate
+from common.log.log import log
+from common.response import result
+from django.utils.translation import gettext_lazy as _
+
+
+class ApplicationVersionView(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get the application list"),
+ operation_id=_("Get the application list"),
+ manual_parameters=ApplicationVersionApi.Query.get_request_params_api(),
+ responses=result.get_api_array_response(ApplicationVersionApi.get_response_body_api()),
+ tags=[_('Application/Version')])
+ @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND)
+ def get(self, request: Request, application_id: str):
+ return result.success(
+ ApplicationVersionSerializer.Query(
+ data={'name': request.query_params.get('name'), 'user_id': request.user.id,
+ 'application_id': application_id}).list())
+
+ class Page(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get the list of application versions by page"),
+ operation_id=_("Get the list of application versions by page"),
+ manual_parameters=result.get_page_request_params(
+ ApplicationVersionApi.Query.get_request_params_api()),
+ responses=result.get_page_api_response(ApplicationVersionApi.get_response_body_api()),
+ tags=[_('Application/Version')])
+ @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND)
+ def get(self, request: Request, application_id: str, current_page: int, page_size: int):
+ return result.success(
+ ApplicationVersionSerializer.Query(
+ data={'name': request.query_params.get('name'), 'user_id': request.user,
+ 'application_id': application_id}).page(
+ current_page, page_size))
+
+ class Operate(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get application version details"),
+ operation_id=_("Get application version details"),
+ manual_parameters=ApplicationVersionApi.Operate.get_request_params_api(),
+ responses=result.get_api_response(ApplicationVersionApi.get_response_body_api()),
+ tags=[_('Application/Version')])
+ @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND)
+ def get(self, request: Request, application_id: str, work_flow_version_id: str):
+ return result.success(
+ ApplicationVersionSerializer.Operate(
+ data={'user_id': request.user,
+ 'application_id': application_id, 'work_flow_version_id': work_flow_version_id}).one())
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Modify application version information"),
+ operation_id=_("Modify application version information"),
+ manual_parameters=ApplicationVersionApi.Operate.get_request_params_api(),
+ request_body=ApplicationVersionApi.Edit.get_request_body_api(),
+ responses=result.get_api_response(ApplicationVersionApi.get_response_body_api()),
+ tags=[_('Application/Version')])
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ @log(menu='Application', operate="Modify application version information",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def put(self, request: Request, application_id: str, work_flow_version_id: str):
+ return result.success(
+ ApplicationVersionSerializer.Operate(
+ data={'application_id': application_id, 'work_flow_version_id': work_flow_version_id,
+ 'user_id': request.user.id}).edit(
+ request.data))
diff --git a/apps/application/views/application_views.py b/apps/application/views/application_views.py
index 3ebed08995c..2628644f1d1 100644
--- a/apps/application/views/application_views.py
+++ b/apps/application/views/application_views.py
@@ -7,8 +7,11 @@
@desc:
"""
+from django.core import cache
from django.http import HttpResponse
+from django.utils.translation import gettext_lazy as _, gettext
from drf_yasg.utils import swagger_auto_schema
+from langchain_core.prompts import PromptTemplate
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser
from rest_framework.request import Request
@@ -18,24 +21,28 @@
from application.serializers.application_statistics_serializers import ApplicationStatisticsSerializer
from application.swagger_api.application_api import ApplicationApi
from application.swagger_api.application_statistics_api import ApplicationStatisticsApi
+from application.views.common import get_application_operation_object
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import CompareConstants, PermissionConstants, Permission, Group, Operate, \
ViewPermission, RoleConstants
from common.exception.app_exception import AppAuthenticationFailed
+from common.log.log import log
from common.response import result
from common.swagger_api.common_api import CommonApi
from common.util.common import query_params_to_single_dict
from dataset.serializers.dataset_serializers import DataSetSerializers
+chat_cache = cache.caches['chat_cache']
+
class ApplicationStatistics(APIView):
class CustomerCount(APIView):
authentication_classes = [TokenAuth]
@action(methods=["GET"], detail=False)
- @swagger_auto_schema(operation_summary="用户统计",
- operation_id="用户统计",
- tags=["应用/统计"],
+ @swagger_auto_schema(operation_summary=_("User Statistics"),
+ operation_id=_("User Statistics"),
+ tags=[_("Application/Statistics")],
manual_parameters=ApplicationStatisticsApi.get_request_params_api(),
responses=result.get_api_response(
ApplicationStatisticsApi.CustomerCount.get_response_body_api())
@@ -58,9 +65,9 @@ class CustomerCountTrend(APIView):
authentication_classes = [TokenAuth]
@action(methods=["GET"], detail=False)
- @swagger_auto_schema(operation_summary="用户统计趋势",
- operation_id="用户统计趋势",
- tags=["应用/统计"],
+ @swagger_auto_schema(operation_summary=_("User demographic trends"),
+ operation_id=_("User demographic trends"),
+ tags=[_("Application/Statistics")],
manual_parameters=ApplicationStatisticsApi.get_request_params_api(),
responses=result.get_api_array_response(
ApplicationStatisticsApi.CustomerCountTrend.get_response_body_api()))
@@ -82,9 +89,9 @@ class ChatRecordAggregate(APIView):
authentication_classes = [TokenAuth]
@action(methods=["GET"], detail=False)
- @swagger_auto_schema(operation_summary="对话相关统计",
- operation_id="对话相关统计",
- tags=["应用/统计"],
+ @swagger_auto_schema(operation_summary=_("Conversation statistics"),
+ operation_id=_("Conversation statistics"),
+ tags=[_("Application/Statistics")],
manual_parameters=ApplicationStatisticsApi.get_request_params_api(),
responses=result.get_api_response(
ApplicationStatisticsApi.ChatRecordAggregate.get_response_body_api())
@@ -107,9 +114,9 @@ class ChatRecordAggregateTrend(APIView):
authentication_classes = [TokenAuth]
@action(methods=["GET"], detail=False)
- @swagger_auto_schema(operation_summary="对话相关统计趋势",
- operation_id="对话相关统计趋势",
- tags=["应用/统计"],
+ @swagger_auto_schema(operation_summary=_("Dialogue-related statistical trends"),
+ operation_id=_("Dialogue-related statistical trends"),
+ tags=[_("Application/Statistics")],
manual_parameters=ApplicationStatisticsApi.get_request_params_api(),
responses=result.get_api_array_response(
ApplicationStatisticsApi.ChatRecordAggregate.get_response_body_api())
@@ -137,9 +144,9 @@ class EditIcon(APIView):
parser_classes = [MultiPartParser]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改应用icon",
- operation_id="修改应用icon",
- tags=['应用'],
+ @swagger_auto_schema(operation_summary=_("Modify application icon"),
+ operation_id=_("Modify application icon"),
+ tags=[_('Application')],
manual_parameters=ApplicationApi.EditApplicationIcon.get_request_params_api(),
request_body=ApplicationApi.Operate.get_request_body_api())
@has_permissions(ViewPermission(
@@ -148,31 +155,137 @@ class EditIcon(APIView):
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND), PermissionConstants.APPLICATION_EDIT,
compare=CompareConstants.AND)
+ @log(menu='Application', operate="Modify application icon",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def put(self, request: Request, application_id: str):
return result.success(
ApplicationSerializer.IconOperate(
data={'application_id': application_id, 'user_id': request.user.id,
'image': request.FILES.get('file')}).edit(request.data))
+ class Import(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods="POST", detail=False)
+ @swagger_auto_schema(operation_summary=_("Import Application"), operation_id=_("Import Application"),
+ manual_parameters=ApplicationApi.Import.get_request_params_api(),
+ tags=[_("Application")]
+ )
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Application', operate="Import Application")
+ def post(self, request: Request):
+ return result.success(ApplicationSerializer.Import(
+ data={'user_id': request.user.id, 'file': request.FILES.get('file')}).import_())
+
+ class Export(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods="GET", detail=False)
+ @swagger_auto_schema(operation_summary=_("Export Application"), operation_id=_("Export Application"),
+ manual_parameters=ApplicationApi.Export.get_request_params_api(),
+ tags=[_("Application")]
+ )
+ @has_permissions(lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('application_id')))
+ @log(menu='Application', operate="Export Application",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def get(self, request: Request, application_id: str):
+ return ApplicationSerializer.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id}).export()
+
class Embed(APIView):
@action(methods=["GET"], detail=False)
- @swagger_auto_schema(operation_summary="获取嵌入js",
- operation_id="获取嵌入js",
- tags=["应用"],
+ @swagger_auto_schema(operation_summary=_("Get embedded js"),
+ operation_id=_("Get embedded js"),
+ tags=[_("Application")],
manual_parameters=ApplicationApi.ApiKey.get_request_params_api())
def get(self, request: Request):
return ApplicationSerializer.Embed(
data={'protocol': request.query_params.get('protocol'), 'token': request.query_params.get('token'),
- 'host': request.query_params.get('host'), }).get_embed()
+ 'host': request.query_params.get('host'), }).get_embed(params=request.query_params)
class Model(APIView):
authentication_classes = [TokenAuth]
@action(methods=["GET"], detail=False)
- @swagger_auto_schema(operation_summary="获取模型列表",
- operation_id="获取模型列表",
- tags=["应用"],
- manual_parameters=ApplicationApi.ApiKey.get_request_params_api())
+ @swagger_auto_schema(operation_summary=_("Get a list of models"),
+ operation_id=_("Get a list of models"),
+ tags=[_("Application")],
+ manual_parameters=ApplicationApi.Model.get_request_params_api())
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ def get(self, request: Request, application_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id,
+ 'user_id': request.user.id}).list_model(request.query_params.get('model_type')))
+
+ class ModelParamsForm(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get model parameter form"),
+ operation_id=_("Get model parameter form"),
+ tags=[_("Application")])
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ def get(self, request: Request, application_id: str, model_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id,
+ 'user_id': request.user.id}).get_model_params_form(model_id))
+
+ class FunctionLib(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=["GET"], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get a list of function libraries"),
+ operation_id=_("Get a list of function libraries"),
+ tags=[_("Application")])
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ def get(self, request: Request, application_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id,
+ 'user_id': request.user.id}).list_function_lib())
+
+ class Operate(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=["GET"], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get library details"),
+ operation_id=_("Get library details"),
+ tags=[_("Application")],
+ )
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ def get(self, request: Request, application_id: str, function_lib_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id,
+ 'user_id': request.user.id}).get_function_lib(function_lib_id))
+
+ class Application(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get the list of apps created by the current user"),
+ operation_id=_("Get the list of apps created by the current user"),
+ tags=[_("Application/Chat")])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
@@ -182,45 +295,65 @@ def get(self, request: Request, application_id: str):
return result.success(
ApplicationSerializer.Operate(
data={'application_id': application_id,
- 'user_id': request.user.id}).list_model())
+ 'user_id': request.user.id}).application_list())
+
+ class Operate(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=["GET"], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get application data"),
+ operation_id=_("Get application data"),
+ tags=[_("Application")],
+ )
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ def get(self, request: Request, application_id: str, app_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id,
+ 'user_id': request.user.id}).get_application(app_id))
class Profile(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取应用相关信息",
- operation_id="获取应用相关信息",
- tags=["应用/会话"])
+ @swagger_auto_schema(operation_summary=_("Get application related information"),
+ operation_id=_("Get application related information"),
+ tags=[_("Application/Chat")])
def get(self, request: Request):
if 'application_id' in request.auth.keywords:
return result.success(ApplicationSerializer.Operate(
data={'application_id': request.auth.keywords.get('application_id'),
'user_id': request.user.id}).profile())
- else:
- raise AppAuthenticationFailed(401, "身份异常")
+ raise AppAuthenticationFailed(401, "身份异常")
class ApplicationKey(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="新增ApiKey",
- operation_id="新增ApiKey",
- tags=['应用/API_KEY'],
+ @swagger_auto_schema(operation_summary=_("Add ApiKey"),
+ operation_id=_("Add ApiKey"),
+ tags=[_('Application/API_KEY')],
manual_parameters=ApplicationApi.ApiKey.get_request_params_api())
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND))
+ @log(menu='Application', operate="Add ApiKey",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def post(self, request: Request, application_id: str):
return result.success(
ApplicationSerializer.ApplicationKeySerializer(
data={'application_id': application_id, 'user_id': request.user.id}).generate())
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取应用API_KEY列表",
- operation_id="获取应用API_KEY列表",
- tags=['应用/API_KEY'],
+ @swagger_auto_schema(operation_summary=_("Get the application API_KEY list"),
+ operation_id=_("Get the application API_KEY list"),
+ tags=[_('Application/API_KEY')],
manual_parameters=ApplicationApi.ApiKey.get_request_params_api()
)
@has_permissions(ViewPermission(
@@ -236,17 +369,20 @@ class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改应用API_KEY",
- operation_id="修改应用API_KEY",
- tags=['应用/API_KEY'],
+ @swagger_auto_schema(operation_summary=_("Modify application API_KEY"),
+ operation_id=_("Modify application API_KEY"),
+ tags=[_('Application/API_KEY')],
manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api(),
- request_body=ApplicationApi.ApiKey.Operate.get_request_body_api())
+ request_body=ApplicationApi.ApiKey.Operate.get_request_body_api(),
+ responses=result.get_api_response(ApplicationApi.ApiKey.Operate.get_response_body_api()))
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND), PermissionConstants.APPLICATION_EDIT,
compare=CompareConstants.AND)
+ @log(menu='Application', operate="Modify application API_KEY",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def put(self, request: Request, application_id: str, api_key_id: str):
return result.success(
ApplicationSerializer.ApplicationKeySerializer.Operate(
@@ -254,9 +390,9 @@ def put(self, request: Request, application_id: str, api_key_id: str):
'api_key_id': api_key_id}).edit(request.data))
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除应用API_KEY",
- operation_id="删除应用API_KEY",
- tags=['应用/API_KEY'],
+ @swagger_auto_schema(operation_summary=_("Delete Application API_KEY"),
+ operation_id=_("Delete Application API_KEY"),
+ tags=[_('Application/API_KEY')],
manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api())
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
@@ -264,6 +400,8 @@ def put(self, request: Request, application_id: str, api_key_id: str):
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND), PermissionConstants.APPLICATION_DELETE,
compare=CompareConstants.AND)
+ @log(menu='Application', operate="Delete Application API_KEY",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def delete(self, request: Request, application_id: str, api_key_id: str):
return result.success(
ApplicationSerializer.ApplicationKeySerializer.Operate(
@@ -274,25 +412,29 @@ class AccessToken(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改 应用AccessToken",
- operation_id="修改 应用AccessToken",
- tags=['应用/公开访问'],
+ @swagger_auto_schema(operation_summary=_("Modify Application AccessToken"),
+ operation_id=_("Modify Application AccessToken"),
+ tags=[_('Application/Public Access')],
manual_parameters=ApplicationApi.AccessToken.get_request_params_api(),
- request_body=ApplicationApi.AccessToken.get_request_body_api())
+ request_body=ApplicationApi.AccessToken.get_request_body_api(),
+ responses=result.get_api_response(ApplicationApi.AccessToken.get_response_body_api()))
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND))
+ @log(menu='Application', operate="Modify Application AccessToken",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def put(self, request: Request, application_id: str):
return result.success(
- ApplicationSerializer.AccessTokenSerializer(data={'application_id': application_id}).edit(request.data))
+ ApplicationSerializer.AccessTokenSerializer(data={'application_id': application_id}).edit(
+ request.data))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取应用 AccessToken信息",
- operation_id="获取应用 AccessToken信息",
+ @swagger_auto_schema(operation_summary=_("Get the application AccessToken information"),
+ operation_id=_("Get the application AccessToken information"),
manual_parameters=ApplicationApi.AccessToken.get_request_params_api(),
- tags=['应用/公开访问'],
+ tags=[_('Application/Public Access')],
)
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
@@ -306,19 +448,23 @@ def get(self, request: Request, application_id: str):
class Authentication(APIView):
@action(methods=['OPTIONS'], detail=False)
def options(self, request, *args, **kwargs):
- return HttpResponse(headers={"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true",
- "Access-Control-Allow-Methods": "POST",
- "Access-Control-Allow-Headers": "Origin,Content-Type,Cookie,Accept,Token"}, )
+ return HttpResponse(
+ headers={"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "POST",
+ "Access-Control-Allow-Headers": "Origin,Content-Type,Cookie,Accept,Token"}, )
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="应用认证",
- operation_id="应用认证",
+ @swagger_auto_schema(operation_summary=_("Application Certification"),
+ operation_id=_("Application Certification"),
request_body=ApplicationApi.Authentication.get_request_body_api(),
- tags=["应用/认证"],
+ responses=result.get_api_response(ApplicationApi.Authentication.get_response_body_api()),
+ tags=[_("Application/Certification")],
security=[])
def post(self, request: Request):
return result.success(
- ApplicationSerializer.Authentication(data={'access_token': request.data.get("access_token")}).auth(
+ ApplicationSerializer.Authentication(data={'access_token': request.data.get("access_token"),
+ 'authentication_value': request.data.get(
+ 'authentication_value')}).auth(
request),
headers={"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true",
"Access-Control-Allow-Methods": "POST",
@@ -326,21 +472,23 @@ def post(self, request: Request):
)
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建应用",
- operation_id="创建应用",
+ @swagger_auto_schema(operation_summary=_("Create an application"),
+ operation_id=_("Create an application"),
request_body=ApplicationApi.Create.get_request_body_api(),
- tags=['应用'])
+ responses=result.get_api_response(ApplicationApi.Create.get_response_body_api()),
+ tags=[_('Application')])
@has_permissions(PermissionConstants.APPLICATION_CREATE, compare=CompareConstants.AND)
+ @log(menu='Application', operate="Create an application",
+ get_operation_object=lambda r, k: {'name': r.data.get('name')})
def post(self, request: Request):
- ApplicationSerializer.Create(data={'user_id': request.user.id}).insert(request.data)
- return result.success(True)
+ return result.success(ApplicationSerializer.Create(data={'user_id': request.user.id}).insert(request.data))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取应用列表",
- operation_id="获取应用列表",
+ @swagger_auto_schema(operation_summary=_("Get the application list"),
+ operation_id=_("Get the application list"),
manual_parameters=ApplicationApi.Query.get_request_params_api(),
responses=result.get_api_array_response(ApplicationApi.get_response_body_api()),
- tags=['应用'])
+ tags=[_('Application')])
@has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND)
def get(self, request: Request):
return result.success(
@@ -351,10 +499,10 @@ class HitTest(APIView):
authentication_classes = [TokenAuth]
@action(methods="GET", detail=False)
- @swagger_auto_schema(operation_summary="命中测试列表", operation_id="命中测试列表",
+ @swagger_auto_schema(operation_summary=_("Hit Test List"), operation_id=_("Hit Test List"),
manual_parameters=CommonApi.HitTestApi.get_request_params_api(),
responses=result.get_api_array_response(CommonApi.HitTestApi.get_response_body_api()),
- tags=["应用"])
+ tags=[_("Application")])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN,
RoleConstants.APPLICATION_KEY],
@@ -367,18 +515,41 @@ def get(self, request: Request, application_id: str):
"query_text": request.query_params.get("query_text"),
"top_number": request.query_params.get("top_number"),
'similarity': request.query_params.get('similarity'),
- 'search_mode': request.query_params.get('search_mode')}).hit_test(
+ 'search_mode': request.query_params.get(
+ 'search_mode')}).hit_test(
))
+ class Publish(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Publishing an application"),
+ operation_id=_("Publishing an application"),
+ manual_parameters=ApplicationApi.Operate.get_request_params_api(),
+ request_body=ApplicationApi.Publish.get_request_body_api(),
+ responses=result.get_default_response(),
+ tags=[_('Application')])
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND))
+ @log(menu='Application', operate="Publishing an application",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def put(self, request: Request, application_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id}).publish(request.data))
+
class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除应用",
- operation_id="删除应用",
+ @swagger_auto_schema(operation_summary=_("Deleting application"),
+ operation_id=_("Deleting application"),
manual_parameters=ApplicationApi.Operate.get_request_params_api(),
responses=result.get_default_response(),
- tags=['应用'])
+ tags=[_('Application')])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
@@ -386,34 +557,39 @@ class Operate(APIView):
compare=CompareConstants.AND),
lambda r, k: Permission(group=Group.APPLICATION, operate=Operate.DELETE,
dynamic_tag=k.get('application_id')), compare=CompareConstants.AND)
+ @log(menu='Application', operate="Deleting application",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def delete(self, request: Request, application_id: str):
return result.success(ApplicationSerializer.Operate(
data={'application_id': application_id, 'user_id': request.user.id}).delete(
with_valid=True))
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改应用",
- operation_id="修改应用",
+ @swagger_auto_schema(operation_summary=_("Modify the application"),
+ operation_id=_("Modify the application"),
manual_parameters=ApplicationApi.Operate.get_request_params_api(),
request_body=ApplicationApi.Edit.get_request_body_api(),
responses=result.get_api_array_response(ApplicationApi.get_response_body_api()),
- tags=['应用'])
+ tags=[_('Application')])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND))
+ @log(menu='Application', operate="Modify the application",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def put(self, request: Request, application_id: str):
return result.success(
- ApplicationSerializer.Operate(data={'application_id': application_id, 'user_id': request.user.id}).edit(
+ ApplicationSerializer.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id}).edit(
request.data))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取应用详情",
- operation_id="获取应用详情",
+ @swagger_auto_schema(operation_summary=_("Get application details"),
+ operation_id=_("Get application details"),
manual_parameters=ApplicationApi.Operate.get_request_params_api(),
responses=result.get_api_array_response(ApplicationApi.get_response_body_api()),
- tags=['应用'])
+ tags=[_('Application')])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN,
RoleConstants.APPLICATION_KEY],
@@ -428,14 +604,17 @@ class ListApplicationDataSet(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取当前应用可使用的知识库",
- operation_id="获取当前应用可使用的知识库",
+ @swagger_auto_schema(operation_summary=_("Get the knowledge base available to the current application"),
+ operation_id=_("Get the knowledge base available to the current application"),
manual_parameters=ApplicationApi.Operate.get_request_params_api(),
- responses=result.get_api_array_response(DataSetSerializers.Query.get_response_body_api()),
- tags=['应用'])
+ responses=result.get_api_array_response(
+ DataSetSerializers.Query.get_response_body_api()),
+ tags=[_('Application')])
@has_permissions(ViewPermission([RoleConstants.ADMIN, RoleConstants.USER],
- [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
- dynamic_tag=keywords.get('application_id'))],
+ [lambda r, keywords: Permission(group=Group.APPLICATION,
+ operate=Operate.USE,
+ dynamic_tag=keywords.get(
+ 'application_id'))],
compare=CompareConstants.AND))
def get(self, request: Request, application_id: str):
return result.success(ApplicationSerializer.Operate(
@@ -445,15 +624,83 @@ class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="分页获取应用列表",
- operation_id="分页获取应用列表",
+ @swagger_auto_schema(operation_summary=_("Get the application list by page"),
+ operation_id=_("Get the application list by page"),
manual_parameters=result.get_page_request_params(
ApplicationApi.Query.get_request_params_api()),
responses=result.get_page_api_response(ApplicationApi.get_response_body_api()),
- tags=['应用'])
+ tags=[_('Application')])
@has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND)
def get(self, request: Request, current_page: int, page_size: int):
return result.success(
ApplicationSerializer.Query(
data={**query_params_to_single_dict(request.query_params), 'user_id': request.user.id}).page(
current_page, page_size))
+
+ class SpeechToText(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @has_permissions(
+ ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN],
+ [lambda r, keywords: Permission(group=Group.APPLICATION,
+ operate=Operate.USE,
+ dynamic_tag=keywords.get(
+ 'application_id'))],
+ compare=CompareConstants.AND))
+ def post(self, request: Request, application_id: str):
+ return result.success(
+ ApplicationSerializer.Operate(data={'application_id': application_id, 'user_id': request.user.id})
+ .speech_to_text(request.FILES.getlist('file')[0]))
+
+ class TextToSpeech(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("text to speech"),
+ operation_id=_("text to speech"),
+ manual_parameters=ApplicationApi.TextToSpeech.get_request_params_api(),
+ request_body=ApplicationApi.TextToSpeech.get_request_body_api(),
+ responses=result.get_default_response(),
+ tags=[_('Application')])
+ @has_permissions(
+ ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN],
+ [lambda r, keywords: Permission(group=Group.APPLICATION,
+ operate=Operate.USE,
+ dynamic_tag=keywords.get(
+ 'application_id'))],
+ compare=CompareConstants.AND))
+ def post(self, request: Request, application_id: str):
+ byte_data = ApplicationSerializer.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id}).text_to_speech(
+ request.data.get('text'))
+ return HttpResponse(byte_data, status=200, headers={'Content-Type': 'audio/mp3',
+ 'Content-Disposition': 'attachment; filename="abc.mp3"'})
+
+ class PlayDemoText(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @has_permissions(
+ ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN],
+ [lambda r, keywords: Permission(group=Group.APPLICATION,
+ operate=Operate.USE,
+ dynamic_tag=keywords.get(
+ 'application_id'))],
+ compare=CompareConstants.AND))
+ @log(menu='Application', operate="trial listening",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def post(self, request: Request, application_id: str):
+ byte_data = ApplicationSerializer.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id}).play_demo_text(request.data)
+ return HttpResponse(byte_data, status=200, headers={'Content-Type': 'audio/mp3',
+ 'Content-Disposition': 'attachment; filename="abc.mp3"'})
+
+ class McpServers(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @has_permissions(PermissionConstants.APPLICATION_READ, compare=CompareConstants.AND)
+ def get(self, request: Request):
+ return result.success(ApplicationSerializer.McpServers(
+ data={'mcp_servers': request.query_params.get('mcp_servers')}).get_mcp_servers())
diff --git a/apps/application/views/chat_views.py b/apps/application/views/chat_views.py
index 2d6ef10f11e..e8d402b9ae9 100644
--- a/apps/application/views/chat_views.py
+++ b/apps/application/views/chat_views.py
@@ -6,20 +6,42 @@
@date:2023/11/14 9:53
@desc:
"""
+
+from django.utils.translation import gettext_lazy as _
+from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
+from rest_framework.parsers import MultiPartParser
from rest_framework.request import Request
from rest_framework.views import APIView
-from application.serializers.chat_message_serializers import ChatMessageSerializer
+from application.serializers.chat_message_serializers import ChatMessageSerializer, OpenAIChatSerializer
from application.serializers.chat_serializers import ChatSerializers, ChatRecordSerializer
-from application.swagger_api.chat_api import ChatApi, VoteApi, ChatRecordApi, ImproveApi, ChatRecordImproveApi
-from common.auth import TokenAuth, has_permissions
+from application.swagger_api.chat_api import ChatApi, VoteApi, ChatRecordApi, ImproveApi, ChatRecordImproveApi, \
+ ChatClientHistoryApi, OpenAIChatApi
+from application.views import get_application_operation_object
+from common.auth import TokenAuth, has_permissions, OpenAIKeyAuth
from common.constants.authentication_type import AuthenticationType
from common.constants.permission_constants import Permission, Group, Operate, \
RoleConstants, ViewPermission, CompareConstants
+from common.log.log import log
from common.response import result
from common.util.common import query_params_to_single_dict
+from dataset.serializers.file_serializers import FileSerializer
+
+
+class Openai(APIView):
+ authentication_classes = [OpenAIKeyAuth]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("OpenAI Interface Dialogue"),
+ operation_id=_("OpenAI Interface Dialogue"),
+ request_body=OpenAIChatApi.get_request_body_api(),
+ responses=OpenAIChatApi.get_response_body_api(),
+ tags=[_("OpenAI Dialogue")])
+ def post(self, request: Request, application_id: str):
+ return OpenAIChatSerializer(data={'application_id': application_id, 'client_id': request.auth.client_id,
+ 'client_type': request.auth.client_type}).chat(request.data)
class ChatView(APIView):
@@ -28,30 +50,32 @@ class ChatView(APIView):
class Export(APIView):
authentication_classes = [TokenAuth]
- @action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="导出对话",
- operation_id="导出对话",
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Export conversation"),
+ operation_id=_("Export conversation"),
manual_parameters=ChatApi.get_request_params_api(),
- tags=["应用/对话日志"]
+ tags=[_("Application/Conversation Log")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
dynamic_tag=keywords.get('application_id'))])
)
- def get(self, request: Request, application_id: str):
+ @log(menu='Conversation Log', operate="Export conversation",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def post(self, request: Request, application_id: str):
return ChatSerializers.Query(
data={**query_params_to_single_dict(request.query_params), 'application_id': application_id,
- 'user_id': request.user.id}).export()
+ 'user_id': request.user.id}).export(request.data)
class Open(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取会话id,根据应用id",
- operation_id="获取会话id,根据应用id",
+ @swagger_auto_schema(operation_summary=_("Get the session id according to the application id"),
+ operation_id=_("Get the session id according to the application id"),
manual_parameters=ChatApi.OpenChat.get_request_params_api(),
- tags=["应用/会话"])
+ tags=[_("Application/Chat")])
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_ACCESS_TOKEN,
RoleConstants.APPLICATION_KEY],
@@ -63,14 +87,28 @@ def get(self, request: Request, application_id: str):
return result.success(ChatSerializers.OpenChat(
data={'user_id': request.user.id, 'application_id': application_id}).open())
+ class OpenWorkFlowTemp(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get the workflow temporary session id"),
+ operation_id=_("Get the workflow temporary session id"),
+ request_body=ChatApi.OpenWorkFlowTemp.get_request_body_api(),
+ responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
+ tags=[_("Application/Chat")])
+ def post(self, request: Request):
+ return result.success(ChatSerializers.OpenWorkFlowChat(
+ data={'user_id': request.user.id, **request.data}).open())
+
class OpenTemp(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="获取会话id(根据模型id,知识库列表,是否多轮会话)",
- operation_id="获取会话id",
+ @swagger_auto_schema(operation_summary=_("Get a temporary session id"),
+ operation_id=_("Get a temporary session id"),
request_body=ChatApi.OpenTempChat.get_request_body_api(),
- tags=["应用/会话"])
+ responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
+ tags=[_("Application/Chat")])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):
return result.success(ChatSerializers.OpenTempChat(
@@ -80,10 +118,10 @@ class Message(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="对话",
- operation_id="对话",
+ @swagger_auto_schema(operation_summary=_("dialogue"),
+ operation_id=_("dialogue"),
request_body=ChatApi.get_request_body_api(),
- tags=["应用/会话"])
+ tags=[_("Application/Chat")])
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY,
RoleConstants.APPLICATION_ACCESS_TOKEN],
@@ -99,14 +137,31 @@ def post(self, request: Request, chat_id: str):
'application_id': (request.auth.keywords.get(
'application_id') if request.auth.client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value else None),
'client_id': request.auth.client_id,
- 'client_type': request.auth.client_type}).chat()
+ 'form_data': (request.data.get(
+ 'form_data') if 'form_data' in request.data else {}),
+
+ 'image_list': request.data.get(
+ 'image_list') if 'image_list' in request.data else [],
+ 'document_list': request.data.get(
+ 'document_list') if 'document_list' in request.data else [],
+ 'audio_list': request.data.get(
+ 'audio_list') if 'audio_list' in request.data else [],
+ 'other_list': request.data.get(
+ 'other_list') if 'other_list' in request.data else [],
+ 'client_type': request.auth.client_type,
+ 'node_id': request.data.get('node_id', None),
+ 'runtime_node_id': request.data.get('runtime_node_id', None),
+ 'node_data': request.data.get('node_data', {}),
+ 'chat_record_id': request.data.get('chat_record_id'),
+ 'child_node': request.data.get('child_node')}
+ ).chat()
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取对话列表",
- operation_id="获取对话列表",
+ @swagger_auto_schema(operation_summary=_("Get the conversation list"),
+ operation_id=_("Get the conversation list"),
manual_parameters=ChatApi.get_request_params_api(),
responses=result.get_api_array_response(ChatApi.get_response_body_api()),
- tags=["应用/对话日志"]
+ tags=[_("Application/Conversation Log")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY],
@@ -122,30 +177,95 @@ class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除对话",
- operation_id="删除对话",
- tags=["应用/对话日志"])
+ @swagger_auto_schema(operation_summary=_("Delete a conversation"),
+ operation_id=_("Delete a conversation"),
+ tags=[_("Application/Conversation Log")])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND),
compare=CompareConstants.AND)
+ @log(menu='Conversation Log', operate="Delete a conversation",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def delete(self, request: Request, application_id: str, chat_id: str):
return result.success(
ChatSerializers.Operate(
data={'application_id': application_id, 'user_id': request.user.id,
'chat_id': chat_id}).delete())
+ class ClientChatHistoryPage(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Get client conversation list by paging"),
+ operation_id=_("Get client conversation list by paging"),
+ manual_parameters=result.get_page_request_params(
+ ChatClientHistoryApi.get_request_params_api()),
+ responses=result.get_page_api_response(ChatApi.get_response_body_api()),
+ tags=[_("Application/Conversation Log")]
+ )
+ @has_permissions(
+ ViewPermission([RoleConstants.APPLICATION_ACCESS_TOKEN],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))])
+ )
+ def get(self, request: Request, application_id: str, current_page: int, page_size: int):
+ return result.success(ChatSerializers.ClientChatHistory(
+ data={'client_id': request.auth.client_id, 'application_id': application_id}).page(
+ current_page=current_page,
+ page_size=page_size))
+
+ class Operate(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['DELETE'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Client deletes conversation"),
+ operation_id=_("Client deletes conversation"),
+ tags=[_("Application/Conversation Log")])
+ @has_permissions(ViewPermission(
+ [RoleConstants.APPLICATION_ACCESS_TOKEN],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND),
+ compare=CompareConstants.AND)
+ @log(menu='Conversation Log', operate="Client deletes conversation",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def delete(self, request: Request, application_id: str, chat_id: str):
+ return result.success(
+ ChatSerializers.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id,
+ 'chat_id': chat_id}).logic_delete())
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Client modifies dialogue summary"),
+ operation_id=_("Client modifies dialogue summary"),
+ request_body=ChatClientHistoryApi.Operate.ReAbstract.get_request_body_api(),
+ responses=result.get_default_response(),
+ tags=[_("Application/Conversation Log")])
+ @has_permissions(ViewPermission(
+ [RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+ compare=CompareConstants.AND),
+ compare=CompareConstants.AND)
+ @log(menu='Conversation Log', operate="Client modifies dialogue summary",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def put(self, request: Request, application_id: str, chat_id: str):
+ return result.success(
+ ChatSerializers.Operate(
+ data={'application_id': application_id, 'user_id': request.user.id,
+ 'chat_id': chat_id}).re_abstract(request.data))
+
class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="分页获取对话列表",
- operation_id="分页获取对话列表",
+ @swagger_auto_schema(operation_summary=_("Get the conversation list by page"),
+ operation_id=_("Get the conversation list by page"),
manual_parameters=result.get_page_request_params(ChatApi.get_request_params_api()),
responses=result.get_page_api_response(ChatApi.get_response_body_api()),
- tags=["应用/对话日志"]
+ tags=[_("Application/Conversation Log")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY],
@@ -165,11 +285,11 @@ class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取对话记录详情",
- operation_id="获取对话记录详情",
+ @swagger_auto_schema(operation_summary=_("Get conversation record details"),
+ operation_id=_("Get conversation record details"),
manual_parameters=ChatRecordApi.get_request_params_api(),
responses=result.get_api_array_response(ChatRecordApi.get_response_body_api()),
- tags=["应用/对话日志"]
+ tags=[_("Application/Conversation Log")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY,
@@ -184,11 +304,11 @@ def get(self, request: Request, application_id: str, chat_id: str, chat_record_i
'chat_record_id': chat_record_id}).one(request.auth.current_role))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取对话记录列表",
- operation_id="获取对话记录列表",
+ @swagger_auto_schema(operation_summary=_("Get a list of conversation records"),
+ operation_id=_("Get a list of conversation records"),
manual_parameters=ChatRecordApi.get_request_params_api(),
responses=result.get_api_array_response(ChatRecordApi.get_response_body_api()),
- tags=["应用/对话日志"]
+ tags=[_("Application/Conversation Log")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY],
@@ -198,18 +318,18 @@ def get(self, request: Request, application_id: str, chat_id: str, chat_record_i
def get(self, request: Request, application_id: str, chat_id: str):
return result.success(ChatRecordSerializer.Query(
data={'application_id': application_id,
- 'chat_id': chat_id}).list())
+ 'chat_id': chat_id, 'order_asc': request.query_params.get('order_asc')}).list())
class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取对话记录列表",
- operation_id="获取对话记录列表",
+ @swagger_auto_schema(operation_summary=_("Get the conversation history list by page"),
+ operation_id=_("Get the conversation history list by page"),
manual_parameters=result.get_page_request_params(
ChatRecordApi.get_request_params_api()),
responses=result.get_page_api_response(ChatRecordApi.get_response_body_api()),
- tags=["应用/对话日志"]
+ tags=[_("Application/Conversation Log")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY],
@@ -219,18 +339,19 @@ class Page(APIView):
def get(self, request: Request, application_id: str, chat_id: str, current_page: int, page_size: int):
return result.success(ChatRecordSerializer.Query(
data={'application_id': application_id,
- 'chat_id': chat_id}).page(current_page, page_size))
+ 'chat_id': chat_id, 'order_asc': request.query_params.get('order_asc')}).page(current_page,
+ page_size))
class Vote(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="点赞,点踩",
- operation_id="点赞,点踩",
+ @swagger_auto_schema(operation_summary=_("Like, Dislike"),
+ operation_id=_("Like, Dislike"),
manual_parameters=VoteApi.get_request_params_api(),
request_body=VoteApi.get_request_body_api(),
responses=result.get_default_response(),
- tags=["应用/会话"]
+ tags=[_("Application/Chat")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY,
@@ -238,6 +359,8 @@ class Vote(APIView):
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
dynamic_tag=keywords.get('application_id'))])
)
+ @log(menu='Conversation Log', operate="Like, Dislike",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def put(self, request: Request, application_id: str, chat_id: str, chat_record_id: str):
return result.success(ChatRecordSerializer.Vote(
data={'vote_status': request.data.get('vote_status'), 'chat_id': chat_id,
@@ -247,11 +370,11 @@ class ChatRecordImprove(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取标注段落列表信息",
- operation_id="获取标注段落列表信息",
+ @swagger_auto_schema(operation_summary=_("Get the list of marked paragraphs"),
+ operation_id=_("Get the list of marked paragraphs"),
manual_parameters=ChatRecordImproveApi.get_request_params_api(),
responses=result.get_api_response(ChatRecordImproveApi.get_response_body_api()),
- tags=["应用/对话日志/标注"]
+ tags=[_("Application/Conversation Log/Annotation")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER],
@@ -266,12 +389,12 @@ class Improve(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="标注",
- operation_id="标注",
+ @swagger_auto_schema(operation_summary=_("Annotation"),
+ operation_id=_("Annotation"),
manual_parameters=ImproveApi.get_request_params_api(),
request_body=ImproveApi.get_request_body_api(),
responses=result.get_api_response(ChatRecordApi.get_response_body_api()),
- tags=["应用/对话日志/标注"]
+ tags=[_("Application/Conversation Log/Annotation")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER],
@@ -285,21 +408,48 @@ class Improve(APIView):
'dataset_id'))],
compare=CompareConstants.AND
), compare=CompareConstants.AND)
+ @log(menu='Conversation Log', operate="Annotation",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def put(self, request: Request, application_id: str, chat_id: str, chat_record_id: str, dataset_id: str,
document_id: str):
return result.success(ChatRecordSerializer.Improve(
data={'chat_id': chat_id, 'chat_record_id': chat_record_id,
'dataset_id': dataset_id, 'document_id': document_id}).improve(request.data))
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Add to Knowledge Base"),
+ operation_id=_("Add to Knowledge Base"),
+ manual_parameters=ImproveApi.get_request_params_api_post(),
+ request_body=ImproveApi.get_request_body_api_post(),
+ responses=result.get_default_response(),
+ tags=[_("Application/Conversation Log/Add to Knowledge Base")]
+ )
+ @has_permissions(
+ ViewPermission([RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))],
+
+ ), ViewPermission([RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.DATASET,
+ operate=Operate.MANAGE,
+ dynamic_tag=keywords.get(
+ 'dataset_id'))],
+ compare=CompareConstants.AND
+ ), compare=CompareConstants.AND)
+ @log(menu='Conversation Log', operate="Add to Knowledge Base",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
+ def post(self, request: Request, application_id: str, dataset_id: str):
+ return result.success(ChatRecordSerializer.PostImprove().post_improve(request.data))
+
class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="标注",
- operation_id="标注",
+ @swagger_auto_schema(operation_summary=_("Delete a Annotation"),
+ operation_id=_("Delete a Annotation"),
manual_parameters=ImproveApi.get_request_params_api(),
responses=result.get_api_response(ChatRecordApi.get_response_body_api()),
- tags=["应用/对话日志/标注"]
+ tags=[_("Application/Conversation Log/Annotation")]
)
@has_permissions(
ViewPermission([RoleConstants.ADMIN, RoleConstants.USER],
@@ -313,6 +463,8 @@ class Operate(APIView):
'dataset_id'))],
compare=CompareConstants.AND
), compare=CompareConstants.AND)
+ @log(menu='Conversation Log', operate="Delete a Annotation",
+ get_operation_object=lambda r, k: get_application_operation_object(k.get('application_id')))
def delete(self, request: Request, application_id: str, chat_id: str, chat_record_id: str,
dataset_id: str,
document_id: str, paragraph_id: str):
@@ -320,3 +472,45 @@ def delete(self, request: Request, application_id: str, chat_id: str, chat_recor
data={'chat_id': chat_id, 'chat_record_id': chat_record_id,
'dataset_id': dataset_id, 'document_id': document_id,
'paragraph_id': paragraph_id}).delete())
+
+ class UploadFile(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Upload files"),
+ operation_id=_("Upload files"),
+ manual_parameters=[
+ openapi.Parameter(name='application_id',
+ in_=openapi.IN_PATH,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('Application ID')),
+ openapi.Parameter(name='chat_id',
+ in_=openapi.IN_PATH,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('Conversation ID')),
+ openapi.Parameter(name='file',
+ in_=openapi.IN_FORM,
+ type=openapi.TYPE_FILE,
+ required=True,
+ description=_('Upload file'))
+ ],
+ tags=[_("Application/Conversation Log")]
+ )
+ @has_permissions(
+ ViewPermission([RoleConstants.ADMIN, RoleConstants.USER, RoleConstants.APPLICATION_KEY,
+ RoleConstants.APPLICATION_ACCESS_TOKEN],
+ [lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
+ dynamic_tag=keywords.get('application_id'))])
+ )
+ def post(self, request: Request, application_id: str, chat_id: str):
+ files = request.FILES.getlist('file')
+ file_ids = []
+ debug = request.data.get("debug", "false").lower() == "true"
+ meta = {'application_id': application_id, 'chat_id': chat_id, 'debug': debug}
+ for file in files:
+ file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
+ file_ids.append({'name': file.name, 'url': file_url, 'file_id': file_url.split('/')[-1]})
+ return result.success(file_ids)
diff --git a/apps/application/views/common.py b/apps/application/views/common.py
new file mode 100644
index 00000000000..7773039c23b
--- /dev/null
+++ b/apps/application/views/common.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: common.py
+ @date:2025/3/25 16:56
+ @desc:
+"""
+
+from django.db.models import QuerySet
+
+from application.models import Application
+
+
+def get_application_operation_object(application_id):
+ application_model = QuerySet(model=Application).filter(id=application_id).first()
+ if application_model is not None:
+ return {
+ "name": application_model.name
+ }
+ return {}
diff --git a/apps/common/auth/authenticate.py b/apps/common/auth/authenticate.py
index 3d2a2258ea0..6eddb76b4c3 100644
--- a/apps/common/auth/authenticate.py
+++ b/apps/common/auth/authenticate.py
@@ -7,16 +7,16 @@
@desc: 认证类
"""
import traceback
+from importlib import import_module
+from django.conf import settings
from django.core import cache
from django.core import signing
from rest_framework.authentication import TokenAuthentication
-from common.auth.handle.impl.application_key import ApplicationKey
-from common.auth.handle.impl.public_access_token import PublicAccessToken
-from common.auth.handle.impl.user_token import UserToken
-from common.exception.app_exception import AppAuthenticationFailed, AppEmbedIdentityFailed, AppChatNumOutOfBoundsFailed
-
+from common.exception.app_exception import AppAuthenticationFailed, AppEmbedIdentityFailed, AppChatNumOutOfBoundsFailed, \
+ ChatException, AppApiException
+from django.utils.translation import gettext_lazy as _
token_cache = cache.caches['token_cache']
@@ -25,7 +25,16 @@ def authenticate(self, request):
return None, None
-handles = [UserToken(), PublicAccessToken(), ApplicationKey()]
+def new_instance_by_class_path(class_path: str):
+ parts = class_path.rpartition('.')
+ package_path = parts[0]
+ class_name = parts[2]
+ module = import_module(package_path)
+ HandlerClass = getattr(module, class_name)
+ return HandlerClass()
+
+
+handles = [new_instance_by_class_path(class_path) for class_path in settings.AUTH_HANDLES]
class TokenDetails:
@@ -44,21 +53,43 @@ def get_token_details(self):
return self.token_details
+class OpenAIKeyAuth(TokenAuthentication):
+ def authenticate(self, request):
+ auth = request.META.get('HTTP_AUTHORIZATION')
+ auth = auth.replace('Bearer ', '')
+ # 未认证
+ if auth is None:
+ raise AppAuthenticationFailed(1003, _('Not logged in, please log in first'))
+ try:
+ token_details = TokenDetails(auth)
+ for handle in handles:
+ if handle.support(request, auth, token_details.get_token_details):
+ return handle.handle(request, auth, token_details.get_token_details)
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user'))
+ except Exception as e:
+ traceback.format_exc()
+ if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed) or isinstance(e,
+ AppApiException):
+ raise e
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user'))
+
+
class TokenAuth(TokenAuthentication):
# 重新 authenticate 方法,自定义认证规则
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION')
# 未认证
if auth is None:
- raise AppAuthenticationFailed(1003, '未登录,请先登录')
+ raise AppAuthenticationFailed(1003, _('Not logged in, please log in first'))
try:
token_details = TokenDetails(auth)
for handle in handles:
if handle.support(request, auth, token_details.get_token_details):
return handle.handle(request, auth, token_details.get_token_details)
- raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户")
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user'))
except Exception as e:
traceback.format_exc()
- if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed):
+ if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed) or isinstance(e,
+ AppApiException):
raise e
- raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户")
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user'))
diff --git a/apps/common/auth/authentication.py b/apps/common/auth/authentication.py
index b27e1d1eabc..e11c9d552fb 100644
--- a/apps/common/auth/authentication.py
+++ b/apps/common/auth/authentication.py
@@ -11,7 +11,7 @@
from common.constants.permission_constants import ViewPermission, CompareConstants, RoleConstants, PermissionConstants, \
Permission
from common.exception.app_exception import AppUnauthorizedFailed
-
+from django.utils.translation import gettext_lazy as _
def exist_permissions_by_permission_constants(user_permission: List[PermissionConstants],
permission_list: List[PermissionConstants]):
@@ -59,11 +59,11 @@ def exist_permissions(user_role: List[RoleConstants], user_permission: List[Perm
**kwargs):
if isinstance(permission, ViewPermission):
return exist_permissions_by_view_permission(user_role, user_permission, permission, request, **kwargs)
- elif isinstance(permission, RoleConstants):
+ if isinstance(permission, RoleConstants):
return exist_role_by_role_constants(user_role, [permission])
- elif isinstance(permission, PermissionConstants):
+ if isinstance(permission, PermissionConstants):
return exist_permissions_by_permission_constants(user_permission, [permission])
- elif isinstance(permission, Permission):
+ if isinstance(permission, Permission):
return user_permission.__contains__(permission)
return False
@@ -72,8 +72,7 @@ def exist(user_role: List[RoleConstants], user_permission: List[PermissionConsta
if callable(permission):
p = permission(request, kwargs)
return exist_permissions(user_role, user_permission, p, request)
- else:
- return exist_permissions(user_role, user_permission, permission, request, **kwargs)
+ return exist_permissions(user_role, user_permission, permission, request, **kwargs)
def has_permissions(*permission, compare=CompareConstants.OR):
@@ -92,8 +91,7 @@ def run(view, request, **kwargs):
# 判断是否有权限
if any(exit_list) if compare == CompareConstants.OR else all(exit_list):
return func(view, request, **kwargs)
- else:
- raise AppUnauthorizedFailed(403, "没有权限访问")
+ raise AppUnauthorizedFailed(403, _('No permission to access'))
return run
diff --git a/apps/common/auth/handle/impl/application_key.py b/apps/common/auth/handle/impl/application_key.py
index b35ef80fc80..bddcfd43a09 100644
--- a/apps/common/auth/handle/impl/application_key.py
+++ b/apps/common/auth/handle/impl/application_key.py
@@ -13,15 +13,16 @@
from common.constants.authentication_type import AuthenticationType
from common.constants.permission_constants import Permission, Group, Operate, RoleConstants, Auth
from common.exception.app_exception import AppAuthenticationFailed
+from django.utils.translation import gettext_lazy as _
class ApplicationKey(AuthBaseHandle):
def handle(self, request, token: str, get_token_details):
application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=token).first()
if application_api_key is None:
- raise AppAuthenticationFailed(500, "secret_key 无效")
+ raise AppAuthenticationFailed(500, _('Secret key is invalid'))
if not application_api_key.is_active:
- raise AppAuthenticationFailed(500, "secret_key 无效")
+ raise AppAuthenticationFailed(500, _('Secret key is invalid'))
permission_list = [Permission(group=Group.APPLICATION,
operate=Operate.USE,
dynamic_tag=str(
diff --git a/apps/common/auth/handle/impl/public_access_token.py b/apps/common/auth/handle/impl/public_access_token.py
index 1655187a83b..fdcff4021fe 100644
--- a/apps/common/auth/handle/impl/public_access_token.py
+++ b/apps/common/auth/handle/impl/public_access_token.py
@@ -12,8 +12,10 @@
from common.auth.handle.auth_base_handle import AuthBaseHandle
from common.constants.authentication_type import AuthenticationType
from common.constants.permission_constants import RoleConstants, Permission, Group, Operate, Auth
-from common.exception.app_exception import AppAuthenticationFailed
-
+from common.exception.app_exception import AppAuthenticationFailed, ChatException
+from common.models.db_model_manage import DBModelManage
+from common.util.common import password_encrypt
+from django.utils.translation import gettext_lazy as _
class PublicAccessToken(AuthBaseHandle):
def support(self, request, token: str, get_token_details):
@@ -29,12 +31,27 @@ def handle(self, request, token: str, get_token_details):
auth_details = get_token_details()
application_access_token = QuerySet(ApplicationAccessToken).filter(
application_id=auth_details.get('application_id')).first()
+ if request.path != '/api/application/profile':
+ application_setting_model = DBModelManage.get_model('application_setting')
+ xpack_cache = DBModelManage.get_model('xpack_cache')
+ X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False)
+ if application_setting_model is not None and X_PACK_LICENSE_IS_VALID:
+ application_setting = QuerySet(application_setting_model).filter(application_id=str(
+ application_access_token.application_id)).first()
+ if application_setting.authentication:
+ authentication = auth_details.get('authentication', {})
+ if authentication is None:
+ authentication = {}
+ if application_setting.authentication_value.get('type') != authentication.get(
+ 'type') or password_encrypt(
+ application_setting.authentication_value.get('value')) != authentication.get('value'):
+ raise ChatException(1002, _('Authentication information is incorrect'))
if application_access_token is None:
- raise AppAuthenticationFailed(1002, "身份验证信息不正确")
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect'))
if not application_access_token.is_active:
- raise AppAuthenticationFailed(1002, "身份验证信息不正确")
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect'))
if not application_access_token.access_token == auth_details.get('access_token'):
- raise AppAuthenticationFailed(1002, "身份验证信息不正确")
+ raise AppAuthenticationFailed(1002, _('Authentication information is incorrect'))
return application_access_token.application.user, Auth(
role_list=[RoleConstants.APPLICATION_ACCESS_TOKEN],
diff --git a/apps/common/auth/handle/impl/user_token.py b/apps/common/auth/handle/impl/user_token.py
index 6559797ba3f..dbb6bd2b51a 100644
--- a/apps/common/auth/handle/impl/user_token.py
+++ b/apps/common/auth/handle/impl/user_token.py
@@ -17,7 +17,7 @@
from django.core import cache
from users.models.user import get_user_dynamics_permission
-
+from django.utils.translation import gettext_lazy as _
token_cache = cache.caches['token_cache']
@@ -31,7 +31,7 @@ def support(self, request, token: str, get_token_details):
def handle(self, request, token: str, get_token_details):
cache_token = token_cache.get(token)
if cache_token is None:
- raise AppAuthenticationFailed(1002, "登录过期")
+ raise AppAuthenticationFailed(1002, _('Login expired'))
auth_details = get_token_details()
user = QuerySet(User).get(id=auth_details['id'])
# 续期
diff --git a/apps/common/cache/file_cache.py b/apps/common/cache/file_cache.py
index 72b1201d104..45b5a73497e 100644
--- a/apps/common/cache/file_cache.py
+++ b/apps/common/cache/file_cache.py
@@ -12,7 +12,7 @@
import time
from diskcache import Cache
-from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
+from django.core.cache.backends.base import BaseCache
class FileCache(BaseCache):
@@ -29,35 +29,58 @@ def _createdir(self):
finally:
os.umask(old_umask)
- def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
+ def add(self, key, value, timeout=None, version=None):
expire = timeout if isinstance(timeout, int) or isinstance(timeout,
- float) else timeout.total_seconds()
- return self.cache.add(key, value=value, expire=expire)
+ float) or timeout is None else timeout.total_seconds()
+ return self.cache.add(self.get_key(key, version), value=value, expire=expire)
- def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
+ def set(self, key, value, timeout=None, version=None):
expire = timeout if isinstance(timeout, int) or isinstance(timeout,
- float) else timeout.total_seconds()
- return self.cache.set(key, value=value, expire=expire)
+ float) or timeout is None else timeout.total_seconds()
+ return self.cache.set(self.get_key(key, version), value=value, expire=expire)
def get(self, key, default=None, version=None):
- return self.cache.get(key, default=default)
+ return self.cache.get(self.get_key(key, version), default=default)
+
+ @staticmethod
+ def get_key(key, version):
+ if version is None:
+ return f"default:{key}"
+ return f"{version}:{key}"
def delete(self, key, version=None):
- return self.cache.delete(key)
+ return self.cache.delete(self.get_key(key, version))
- def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
+ def touch(self, key, timeout=None, version=None):
expire = timeout if isinstance(timeout, int) or isinstance(timeout,
float) else timeout.total_seconds()
- return self.cache.touch(key, expire=expire)
+ return self.cache.touch(self.get_key(key, version), expire=expire)
- def ttl(self, key):
+ def ttl(self, key, version=None):
"""
获取key的剩余时间
:param key: key
:return: 剩余时间
+ @param version:
"""
- value, expire_time = self.cache.get(key, expire_time=True)
+ value, expire_time = self.cache.get(self.get_key(key, version), expire_time=True)
if value is None:
return None
return datetime.timedelta(seconds=math.ceil(expire_time - time.time()))
+
+ def clear_by_application_id(self, application_id):
+ delete_keys = []
+ for key in self.cache.iterkeys():
+ value = self.cache.get(key)
+ if (hasattr(value,
+ 'application') and value.application is not None and value.application.id is not None and
+ str(
+ value.application.id) == application_id):
+ delete_keys.append(key)
+ for key in delete_keys:
+ self.cache.delete(key)
+
+ def clear_timeout_data(self):
+ for key in self.cache.iterkeys():
+ self.get(key)
diff --git a/apps/common/cache/mem_cache.py b/apps/common/cache/mem_cache.py
index 9bb6c45ba1b..5afb1e56265 100644
--- a/apps/common/cache/mem_cache.py
+++ b/apps/common/cache/mem_cache.py
@@ -29,3 +29,19 @@ def get(self, key, default=None, version=None):
pickled = self._cache[key]
self._cache.move_to_end(key, last=False)
return pickled
+
+ def clear_by_application_id(self, application_id):
+ delete_keys = []
+ for key in self._cache.keys():
+ value = self._cache.get(key)
+ if (hasattr(value,
+ 'application') and value.application is not None and value.application.id is not None and
+ str(
+ value.application.id) == application_id):
+ delete_keys.append(key)
+ for key in delete_keys:
+ self._delete(key)
+
+ def clear_timeout_data(self):
+ for key in self._cache.keys():
+ self.get(key)
diff --git a/apps/common/cache_data/application_access_token_cache.py b/apps/common/cache_data/application_access_token_cache.py
new file mode 100644
index 00000000000..54f2a7e5405
--- /dev/null
+++ b/apps/common/cache_data/application_access_token_cache.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: application_access_token_cache.py
+ @date:2024/7/25 11:34
+ @desc:
+"""
+from django.core.cache import cache
+from django.db.models import QuerySet
+
+from application.models.api_key_model import ApplicationAccessToken
+from common.constants.cache_code_constants import CacheCodeConstants
+from common.util.cache_util import get_cache
+
+
+@get_cache(cache_key=lambda access_token, use_get_data: access_token,
+ use_get_data=lambda access_token, use_get_data: use_get_data,
+ version=CacheCodeConstants.APPLICATION_ACCESS_TOKEN_CACHE.value)
+def get_application_access_token(access_token, use_get_data):
+ application_access_token = QuerySet(ApplicationAccessToken).filter(access_token=access_token).first()
+ if application_access_token is None:
+ return None
+ return {'white_active': application_access_token.white_active,
+ 'white_list': application_access_token.white_list,
+ 'application_icon': application_access_token.application.icon,
+ 'application_name': application_access_token.application.name}
+
+
+def del_application_access_token(access_token):
+ cache.delete(access_token, version=CacheCodeConstants.APPLICATION_ACCESS_TOKEN_CACHE.value)
diff --git a/apps/common/cache_data/application_api_key_cache.py b/apps/common/cache_data/application_api_key_cache.py
new file mode 100644
index 00000000000..a7d810cee38
--- /dev/null
+++ b/apps/common/cache_data/application_api_key_cache.py
@@ -0,0 +1,27 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: application_api_key_cache.py
+ @date:2024/7/25 11:30
+ @desc:
+"""
+from django.core.cache import cache
+from django.db.models import QuerySet
+
+from application.models.api_key_model import ApplicationApiKey
+from common.constants.cache_code_constants import CacheCodeConstants
+from common.util.cache_util import get_cache
+
+
+@get_cache(cache_key=lambda secret_key, use_get_data: secret_key,
+ use_get_data=lambda secret_key, use_get_data: use_get_data,
+ version=CacheCodeConstants.APPLICATION_API_KEY_CACHE.value)
+def get_application_api_key(secret_key, use_get_data):
+ application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=secret_key).first()
+ return {'allow_cross_domain': application_api_key.allow_cross_domain,
+ 'cross_domain_list': application_api_key.cross_domain_list}
+
+
+def del_application_api_key(secret_key):
+ cache.delete(secret_key, version=CacheCodeConstants.APPLICATION_API_KEY_CACHE.value)
diff --git a/apps/common/cache_data/static_resource_cache.py b/apps/common/cache_data/static_resource_cache.py
new file mode 100644
index 00000000000..1bb84e96729
--- /dev/null
+++ b/apps/common/cache_data/static_resource_cache.py
@@ -0,0 +1,19 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: static_resource_cache.py
+ @date:2024/7/25 11:30
+ @desc:
+"""
+from common.constants.cache_code_constants import CacheCodeConstants
+from common.util.cache_util import get_cache
+
+
+@get_cache(cache_key=lambda index_path: index_path,
+ version=CacheCodeConstants.STATIC_RESOURCE_CACHE.value)
+def get_index_html(index_path):
+ file = open(index_path, "r", encoding='utf-8')
+ content = file.read()
+ file.close()
+ return content
diff --git a/apps/common/chunk/__init__.py b/apps/common/chunk/__init__.py
new file mode 100644
index 00000000000..a4babde7680
--- /dev/null
+++ b/apps/common/chunk/__init__.py
@@ -0,0 +1,18 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py
+ @date:2024/7/23 17:03
+ @desc:
+"""
+from common.chunk.impl.mark_chunk_handle import MarkChunkHandle
+
+handles = [MarkChunkHandle()]
+
+
+def text_to_chunk(text: str):
+ chunk_list = [text]
+ for handle in handles:
+ chunk_list = handle.handle(chunk_list)
+ return chunk_list
diff --git a/apps/common/chunk/i_chunk_handle.py b/apps/common/chunk/i_chunk_handle.py
new file mode 100644
index 00000000000..d53575d11a8
--- /dev/null
+++ b/apps/common/chunk/i_chunk_handle.py
@@ -0,0 +1,16 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: i_chunk_handle.py
+ @date:2024/7/23 16:51
+ @desc:
+"""
+from abc import ABC, abstractmethod
+from typing import List
+
+
+class IChunkHandle(ABC):
+ @abstractmethod
+ def handle(self, chunk_list: List[str]):
+ pass
diff --git a/apps/common/chunk/impl/mark_chunk_handle.py b/apps/common/chunk/impl/mark_chunk_handle.py
new file mode 100644
index 00000000000..5bca2f4450b
--- /dev/null
+++ b/apps/common/chunk/impl/mark_chunk_handle.py
@@ -0,0 +1,40 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: mark_chunk_handle.py
+ @date:2024/7/23 16:52
+ @desc:
+"""
+import re
+from typing import List
+
+from common.chunk.i_chunk_handle import IChunkHandle
+
+max_chunk_len = 256
+split_chunk_pattern = r'.{1,%d}[。| |\\.|!|;|;|!|\n]' % max_chunk_len
+max_chunk_pattern = r'.{1,%d}' % max_chunk_len
+
+
+class MarkChunkHandle(IChunkHandle):
+ def handle(self, chunk_list: List[str]):
+ result = []
+ for chunk in chunk_list:
+ chunk_result = re.findall(split_chunk_pattern, chunk, flags=re.DOTALL)
+ for c_r in chunk_result:
+ if len(c_r.strip()) > 0:
+ result.append(c_r.strip())
+
+ other_chunk_list = re.split(split_chunk_pattern, chunk, flags=re.DOTALL)
+ for other_chunk in other_chunk_list:
+ if len(other_chunk) > 0:
+ if len(other_chunk) < max_chunk_len:
+ if len(other_chunk.strip()) > 0:
+ result.append(other_chunk.strip())
+ else:
+ max_chunk_list = re.findall(max_chunk_pattern, other_chunk, flags=re.DOTALL)
+ for m_c in max_chunk_list:
+ if len(m_c.strip()) > 0:
+ result.append(m_c.strip())
+
+ return result
diff --git a/apps/common/config/embedding_config.py b/apps/common/config/embedding_config.py
index 367dce7f877..69081be055d 100644
--- a/apps/common/config/embedding_config.py
+++ b/apps/common/config/embedding_config.py
@@ -6,30 +6,60 @@
@date:2023/10/23 16:03
@desc:
"""
-from langchain_community.embeddings import HuggingFaceEmbeddings
+import threading
+import time
-from smartdoc.const import CONFIG
+from common.cache.mem_cache import MemCache
+_lock = threading.Lock()
+locks = {}
-class EmbeddingModel:
- instance = None
+
+class ModelManage:
+ cache = MemCache('model', {})
+ up_clear_time = time.time()
+
+ @staticmethod
+ def _get_lock(_id):
+ lock = locks.get(_id)
+ if lock is None:
+ with _lock:
+ lock = locks.get(_id)
+ if lock is None:
+ lock = threading.Lock()
+ locks[_id] = lock
+
+ return lock
+
+ @staticmethod
+ def get_model(_id, get_model):
+ model_instance = ModelManage.cache.get(_id)
+ if model_instance is None:
+ lock = ModelManage._get_lock(_id)
+ with lock:
+ model_instance = ModelManage.cache.get(_id)
+ if model_instance is None:
+ model_instance = get_model(_id)
+ ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8)
+ else:
+ if model_instance.is_cache_model():
+ ModelManage.cache.touch(_id, timeout=60 * 60 * 8)
+ else:
+ model_instance = get_model(_id)
+ ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8)
+ ModelManage.clear_timeout_cache()
+ return model_instance
+
+ @staticmethod
+ def clear_timeout_cache():
+ if time.time() - ModelManage.up_clear_time > 60 * 60:
+ threading.Thread(target=lambda: ModelManage.cache.clear_timeout_data()).start()
+ ModelManage.up_clear_time = time.time()
@staticmethod
- def get_embedding_model():
- """
- 获取向量化模型
- :return:
- """
- if EmbeddingModel.instance is None:
- model_name = CONFIG.get('EMBEDDING_MODEL_NAME')
- cache_folder = CONFIG.get('EMBEDDING_MODEL_PATH')
- device = CONFIG.get('EMBEDDING_DEVICE')
- e = HuggingFaceEmbeddings(
- model_name=model_name,
- cache_folder=cache_folder,
- model_kwargs={'device': device})
- EmbeddingModel.instance = e
- return EmbeddingModel.instance
+ def delete_key(_id):
+ if ModelManage.cache.has_key(_id):
+ ModelManage.cache.delete(_id)
class VectorStore:
diff --git a/apps/common/config/swagger_conf.py b/apps/common/config/swagger_conf.py
index b3ba2720de5..d17486d532d 100644
--- a/apps/common/config/swagger_conf.py
+++ b/apps/common/config/swagger_conf.py
@@ -6,7 +6,7 @@
@date:2023/9/5 14:01
@desc: 用于swagger 分组
"""
-
+from drf_yasg.generators import OpenAPISchemaGenerator
from drf_yasg.inspectors import SwaggerAutoSchema
tags_dict = {
@@ -20,3 +20,10 @@ def get_tags(self, operation_keys=None):
if "api" in tags and operation_keys:
return [tags_dict.get(operation_keys[1]) if operation_keys[1] in tags_dict else operation_keys[1]]
return tags
+
+
+class CustomOpenAPISchemaGenerator(OpenAPISchemaGenerator):
+ def get_schema(self, request=None, public=False):
+ schema = super().get_schema(request, public)
+ schema.schemes = ['https', 'http']
+ return schema
diff --git a/apps/common/constants/authentication_type.py b/apps/common/constants/authentication_type.py
index 33163003efa..83586ee3b84 100644
--- a/apps/common/constants/authentication_type.py
+++ b/apps/common/constants/authentication_type.py
@@ -16,3 +16,5 @@ class AuthenticationType(Enum):
APPLICATION_ACCESS_TOKEN = "APPLICATION_ACCESS_TOKEN"
# key API
API_KEY = "API_KEY"
+ # 第三方对接
+ PLATFORM = 'PLATFORM'
diff --git a/apps/common/constants/cache_code_constants.py b/apps/common/constants/cache_code_constants.py
new file mode 100644
index 00000000000..dd64805f0fd
--- /dev/null
+++ b/apps/common/constants/cache_code_constants.py
@@ -0,0 +1,18 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: cache_code_constants.py
+ @date:2024/7/24 18:20
+ @desc:
+"""
+from enum import Enum
+
+
+class CacheCodeConstants(Enum):
+ # 应用ACCESS_TOKEN缓存
+ APPLICATION_ACCESS_TOKEN_CACHE = 'APPLICATION_ACCESS_TOKEN_CACHE'
+ # 静态资源缓存
+ STATIC_RESOURCE_CACHE = 'STATIC_RESOURCE_CACHE'
+ # 应用API_KEY缓存
+ APPLICATION_API_KEY_CACHE = 'APPLICATION_API_KEY_CACHE'
diff --git a/apps/common/constants/exception_code_constants.py b/apps/common/constants/exception_code_constants.py
index ba7a8105f14..821318d239e 100644
--- a/apps/common/constants/exception_code_constants.py
+++ b/apps/common/constants/exception_code_constants.py
@@ -9,6 +9,7 @@
from enum import Enum
from common.exception.app_exception import AppApiException
+from django.utils.translation import gettext_lazy as _
class ExceptionCodeConstantsValue:
@@ -27,13 +28,16 @@ def to_app_api_exception(self):
class ExceptionCodeConstants(Enum):
- INCORRECT_USERNAME_AND_PASSWORD = ExceptionCodeConstantsValue(1000, "用户名或者密码不正确")
- NOT_AUTHENTICATION = ExceptionCodeConstantsValue(1001, "请先登录,并携带用户Token")
- EMAIL_SEND_ERROR = ExceptionCodeConstantsValue(1002, "邮件发送失败")
- EMAIL_FORMAT_ERROR = ExceptionCodeConstantsValue(1003, "邮箱格式错误")
- EMAIL_IS_EXIST = ExceptionCodeConstantsValue(1004, "邮箱已经被注册,请勿重复注册")
- EMAIL_IS_NOT_EXIST = ExceptionCodeConstantsValue(1005, "邮箱尚未注册,请先注册")
- CODE_ERROR = ExceptionCodeConstantsValue(1005, "验证码不正确,或者验证码过期")
- USERNAME_IS_EXIST = ExceptionCodeConstantsValue(1006, "用户名已被使用,请使用其他用户名")
- USERNAME_ERROR = ExceptionCodeConstantsValue(1006, "用户名不能为空,并且长度在6-20")
- PASSWORD_NOT_EQ_RE_PASSWORD = ExceptionCodeConstantsValue(1007, "密码与确认密码不一致")
+ INCORRECT_USERNAME_AND_PASSWORD = ExceptionCodeConstantsValue(1000, _('The username or password is incorrect'))
+ NOT_AUTHENTICATION = ExceptionCodeConstantsValue(1001, _('Please log in first and bring the user Token'))
+ EMAIL_SEND_ERROR = ExceptionCodeConstantsValue(1002, _('Email sending failed'))
+ EMAIL_FORMAT_ERROR = ExceptionCodeConstantsValue(1003, _('Email format error'))
+ EMAIL_IS_EXIST = ExceptionCodeConstantsValue(1004, _('The email has been registered, please log in directly'))
+ EMAIL_IS_NOT_EXIST = ExceptionCodeConstantsValue(1005, _('The email is not registered, please register first'))
+ CODE_ERROR = ExceptionCodeConstantsValue(1005,
+ _('The verification code is incorrect or the verification code has expired'))
+ USERNAME_IS_EXIST = ExceptionCodeConstantsValue(1006, _('The username has been registered, please log in directly'))
+ USERNAME_ERROR = ExceptionCodeConstantsValue(1006,
+ _('The username cannot be empty and must be between 6 and 20 characters long.'))
+ PASSWORD_NOT_EQ_RE_PASSWORD = ExceptionCodeConstantsValue(1007,
+ _('Password and confirmation password are inconsistent'))
diff --git a/apps/common/constants/permission_constants.py b/apps/common/constants/permission_constants.py
index 04f86bbc796..a5c198da7e5 100644
--- a/apps/common/constants/permission_constants.py
+++ b/apps/common/constants/permission_constants.py
@@ -7,7 +7,7 @@
"""
from enum import Enum
from typing import List
-
+from django.utils.translation import gettext_lazy as _
class Group(Enum):
"""
@@ -58,10 +58,10 @@ def __init__(self, name: str, decs: str, group: RoleGroup):
class RoleConstants(Enum):
- ADMIN = Role("管理员", "管理员,预制目前不会使用", RoleGroup.USER)
- USER = Role("用户", "用户所有权限", RoleGroup.USER)
- APPLICATION_ACCESS_TOKEN = Role("会话", "只拥有应用会话框接口权限", RoleGroup.APPLICATION_ACCESS_TOKEN),
- APPLICATION_KEY = Role("应用私钥", "应用私钥", RoleGroup.APPLICATION_KEY)
+ ADMIN = Role(_("ADMIN"), _('Admin, prefabs are not currently used'), RoleGroup.USER)
+ USER = Role(_("USER"), _('All user permissions'), RoleGroup.USER)
+ APPLICATION_ACCESS_TOKEN = Role(_('chat'), _('Only has application dialog interface permissions'), RoleGroup.APPLICATION_ACCESS_TOKEN),
+ APPLICATION_KEY = Role(_('Apply private key'), _('Apply private key'), RoleGroup.APPLICATION_KEY)
class Permission:
diff --git a/apps/common/db/compiler.py b/apps/common/db/compiler.py
index 9a65f93e1b7..69640c8a082 100644
--- a/apps/common/db/compiler.py
+++ b/apps/common/db/compiler.py
@@ -7,9 +7,10 @@
@desc:
"""
-from django.core.exceptions import EmptyResultSet
+from django.core.exceptions import EmptyResultSet, FullResultSet
from django.db import NotSupportedError
from django.db.models.sql.compiler import SQLCompiler
+from django.db.transaction import TransactionManagementError
class AppSQLCompiler(SQLCompiler):
@@ -19,15 +20,16 @@ def __init__(self, query, connection, using, elide_empty=True, field_replace_dic
field_replace_dict = {}
self.field_replace_dict = field_replace_dict
- def get_query_str(self, with_limits=True, with_table_name=False):
+ def get_query_str(self, with_limits=True, with_table_name=False, with_col_aliases=False):
refcounts_before = self.query.alias_refcount.copy()
try:
- extra_select, order_by, group_by = self.pre_sql_setup()
+ combinator = self.query.combinator
+ extra_select, order_by, group_by = self.pre_sql_setup(
+ with_col_aliases=with_col_aliases or bool(combinator),
+ )
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
- with_limit_offset = with_limits and (
- self.query.high_mark is not None or self.query.low_mark
- )
+ with_limit_offset = with_limits and self.query.is_sliced
combinator = self.query.combinator
features = self.connection.features
if combinator:
@@ -40,8 +42,14 @@ def get_query_str(self, with_limits=True, with_table_name=False):
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
+ elif self.qualify:
+ result, params = self.get_qualify_sql()
+ order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
+ # This must come after 'select', 'ordering', and 'distinct'
+ # (see docstring of get_from_clause() for details).
+ from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
@@ -51,11 +59,92 @@ def get_query_str(self, with_limits=True, with_table_name=False):
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
- having, h_params = (
- self.compile(self.having) if self.having is not None else ("", [])
- )
+ except FullResultSet:
+ where, w_params = "", []
+ try:
+ having, h_params = (
+ self.compile(self.having)
+ if self.having is not None
+ else ("", [])
+ )
+ except FullResultSet:
+ having, h_params = "", []
result = []
params = []
+
+ if self.query.distinct:
+ distinct_result, distinct_params = self.connection.ops.distinct_sql(
+ distinct_fields,
+ distinct_params,
+ )
+ result += distinct_result
+ params += distinct_params
+
+ out_cols = []
+ for _, (s_sql, s_params), alias in self.select + extra_select:
+ if alias:
+ s_sql = "%s AS %s" % (
+ s_sql,
+ self.connection.ops.quote_name(alias),
+ )
+ params.extend(s_params)
+ out_cols.append(s_sql)
+
+ params.extend(f_params)
+
+ if self.query.select_for_update and features.has_select_for_update:
+ if (
+ self.connection.get_autocommit()
+ # Don't raise an exception when database doesn't
+ # support transactions, as it's a noop.
+ and features.supports_transactions
+ ):
+ raise TransactionManagementError(
+ "select_for_update cannot be used outside of a transaction."
+ )
+
+ if (
+ with_limit_offset
+ and not features.supports_select_for_update_with_limit
+ ):
+ raise NotSupportedError(
+ "LIMIT/OFFSET is not supported with "
+ "select_for_update on this database backend."
+ )
+ nowait = self.query.select_for_update_nowait
+ skip_locked = self.query.select_for_update_skip_locked
+ of = self.query.select_for_update_of
+ no_key = self.query.select_for_no_key_update
+ # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
+ # backend doesn't support it, raise NotSupportedError to
+ # prevent a possible deadlock.
+ if nowait and not features.has_select_for_update_nowait:
+ raise NotSupportedError(
+ "NOWAIT is not supported on this database backend."
+ )
+ elif skip_locked and not features.has_select_for_update_skip_locked:
+ raise NotSupportedError(
+ "SKIP LOCKED is not supported on this database backend."
+ )
+ elif of and not features.has_select_for_update_of:
+ raise NotSupportedError(
+ "FOR UPDATE OF is not supported on this database backend."
+ )
+ elif no_key and not features.has_select_for_no_key_update:
+ raise NotSupportedError(
+ "FOR NO KEY UPDATE is not supported on this "
+ "database backend."
+ )
+ for_update_part = self.connection.ops.for_update_sql(
+ nowait=nowait,
+ skip_locked=skip_locked,
+ of=self.get_select_for_update_of_arguments(),
+ no_key=no_key,
+ )
+
+ if for_update_part and features.for_update_after_from:
+ result.append(for_update_part)
+
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
@@ -91,7 +180,11 @@ def get_query_str(self, with_limits=True, with_table_name=False):
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
- result.append("ORDER BY %s" % ", ".join(ordering))
+ order_by_sql = "ORDER BY %s" % ", ".join(ordering)
+ if combinator and features.requires_compound_order_by_subquery:
+ result = ["SELECT * FROM (", *result, ")", order_by_sql]
+ else:
+ result.append(order_by_sql)
if with_limit_offset:
result.append(
@@ -102,6 +195,7 @@ def get_query_str(self, with_limits=True, with_table_name=False):
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
+
from_, f_params = self.get_from_clause()
sql = " ".join(result)
if not with_table_name:
diff --git a/apps/common/db/search.py b/apps/common/db/search.py
index 76366715439..bef42a1414a 100644
--- a/apps/common/db/search.py
+++ b/apps/common/db/search.py
@@ -12,7 +12,7 @@
from django.db.models import QuerySet
from common.db.compiler import AppSQLCompiler
-from common.db.sql_execute import select_one, select_list
+from common.db.sql_execute import select_one, select_list, update_execute
from common.response.result import Page
@@ -109,6 +109,24 @@ def native_search(queryset: QuerySet | Dict[str, QuerySet], select_string: str,
return select_list(exec_sql, exec_params)
+def native_update(queryset: QuerySet | Dict[str, QuerySet], select_string: str,
+ field_replace_dict: None | Dict[str, Dict[str, str]] | Dict[str, str] = None,
+ with_table_name=False):
+ """
+ 复杂查询
+ :param with_table_name: 生成sql是否包含表名
+ :param queryset: 查询条件构造器
+ :param select_string: 查询前缀 不包括 where limit 等信息
+ :param field_replace_dict: 需要替换的字段
+ :return: 查询结果
+ """
+ if isinstance(queryset, Dict):
+ exec_sql, exec_params = generate_sql_by_query_dict(queryset, select_string, field_replace_dict, with_table_name)
+ else:
+ exec_sql, exec_params = generate_sql_by_query(queryset, select_string, field_replace_dict, with_table_name)
+ return update_execute(exec_sql, exec_params)
+
+
def page_search(current_page: int, page_size: int, queryset: QuerySet, post_records_handler):
"""
分页查询
diff --git a/apps/common/db/sql_execute.py b/apps/common/db/sql_execute.py
index 79e7de46a18..b12297e1f9a 100644
--- a/apps/common/db/sql_execute.py
+++ b/apps/common/db/sql_execute.py
@@ -36,8 +36,9 @@ def update_execute(sql: str, params):
"""
with connection.cursor() as cursor:
cursor.execute(sql, params)
+ affected_rows = cursor.rowcount
cursor.close()
- return None
+ return affected_rows
def select_list(sql: str, params: List):
diff --git a/apps/common/encoder/encoder.py b/apps/common/encoder/encoder.py
new file mode 100644
index 00000000000..02d0ec88cc9
--- /dev/null
+++ b/apps/common/encoder/encoder.py
@@ -0,0 +1,30 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: SystemEncoder.py
+ @date:2025/3/17 16:38
+ @desc:
+"""
+import datetime
+import decimal
+import json
+import uuid
+
+from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
+
+
+class SystemEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, uuid.UUID):
+ return str(obj)
+ if isinstance(obj, datetime.datetime):
+ return obj.strftime("%Y-%m-%d %H:%M:%S")
+ if isinstance(obj, decimal.Decimal):
+ return float(obj)
+ if isinstance(obj, InMemoryUploadedFile):
+ return {'name': obj.name, 'size': obj.size}
+ if isinstance(obj, TemporaryUploadedFile):
+ return {'name': obj.name, 'size': obj.size}
+ else:
+ return json.JSONEncoder.default(self, obj)
diff --git a/apps/common/event/__init__.py b/apps/common/event/__init__.py
index 909740b7ab2..ddf6dd6f9f2 100644
--- a/apps/common/event/__init__.py
+++ b/apps/common/event/__init__.py
@@ -9,10 +9,25 @@
import setting.models
from setting.models import Model
from .listener_manage import *
+from django.utils.translation import gettext as _
+
+from ..db.sql_execute import update_execute
+from common.lock.impl.file_lock import FileLock
+
+lock = FileLock()
+update_document_status_sql = """
+UPDATE "public"."document"
+SET status ="replace"("replace"("replace"(status, '1', '3'), '0', '3'), '4', '3')
+WHERE status ~ '1|0|4'
+"""
def run():
- listener_manage.ListenerManagement().run()
- QuerySet(Document).filter(status=Status.embedding).update(**{'status': Status.error})
- QuerySet(Model).filter(status=setting.models.Status.DOWNLOAD).update(status=setting.models.Status.ERROR,
- meta={'message': "下载程序被中断,请重试"})
+ if lock.try_lock('event_init', 30 * 30):
+ try:
+ QuerySet(Model).filter(status=setting.models.Status.DOWNLOAD).update(status=setting.models.Status.ERROR,
+ meta={'message': _(
+ 'The download process was interrupted, please try again')})
+ update_execute(update_document_status_sql, [])
+ finally:
+ lock.un_lock('event_init')
diff --git a/apps/common/event/common.py b/apps/common/event/common.py
index e35123758cf..a54d24df23c 100644
--- a/apps/common/event/common.py
+++ b/apps/common/event/common.py
@@ -8,20 +8,43 @@
"""
from concurrent.futures import ThreadPoolExecutor
+from django.core.cache.backends.locmem import LocMemCache
+
work_thread_pool = ThreadPoolExecutor(5)
embedding_thread_pool = ThreadPoolExecutor(3)
+memory_cache = LocMemCache('task', {"OPTIONS": {"MAX_ENTRIES": 1000}})
+
def poxy(poxy_function):
- def inner(args):
- work_thread_pool.submit(poxy_function, args)
+ def inner(args, **keywords):
+ work_thread_pool.submit(poxy_function, args, **keywords)
return inner
+def get_cache_key(poxy_function, args):
+ return poxy_function.__name__ + str(args)
+
+
+def get_cache_poxy_function(poxy_function, cache_key):
+ def fun(args, **keywords):
+ try:
+ poxy_function(args, **keywords)
+ finally:
+ memory_cache.delete(cache_key)
+
+ return fun
+
+
def embedding_poxy(poxy_function):
- def inner(args):
- embedding_thread_pool.submit(poxy_function, args)
+ def inner(*args, **keywords):
+ key = get_cache_key(poxy_function, args)
+ if memory_cache.has_key(key):
+ return
+ memory_cache.add(key, None)
+ f = get_cache_poxy_function(poxy_function, key)
+ embedding_thread_pool.submit(f, args, **keywords)
return inner
diff --git a/apps/common/event/listener_manage.py b/apps/common/event/listener_manage.py
index 415d20562ec..dd2a54a0c67 100644
--- a/apps/common/event/listener_manage.py
+++ b/apps/common/event/listener_manage.py
@@ -8,25 +8,29 @@
"""
import logging
import os
+import threading
+import datetime
import traceback
from typing import List
import django.db.models
-from blinker import signal
from django.db.models import QuerySet
+from django.db.models.functions import Substr, Reverse
+from langchain_core.embeddings import Embeddings
-from common.config.embedding_config import VectorStore, EmbeddingModel
-from common.db.search import native_search, get_dynamics_model
-from common.event.common import poxy, embedding_poxy
+from common.config.embedding_config import VectorStore
+from common.db.search import native_search, get_dynamics_model, native_update
from common.util.file_util import get_file_content
-from common.util.fork import ForkManage, Fork
from common.util.lock import try_lock, un_lock
-from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping
-from embedding.models import SourceType
+from common.util.page_utils import page_desc
+from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping, TaskType, State
+from embedding.models import SourceType, SearchMode
from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext_lazy as _
-max_kb_error = logging.getLogger("max_kb_error")
-max_kb = logging.getLogger("max_kb")
+max_kb_error = logging.getLogger(__file__)
+max_kb = logging.getLogger(__file__)
+lock = threading.Lock()
class SyncWebDatasetArgs:
@@ -45,9 +49,10 @@ def __init__(self, source_url_list: List[str], selector: str, handler):
class UpdateProblemArgs:
- def __init__(self, problem_id: str, problem_content: str):
+ def __init__(self, problem_id: str, problem_content: str, embedding_model: Embeddings):
self.problem_id = problem_id
self.problem_content = problem_content
+ self.embedding_model = embedding_model
class UpdateEmbeddingDatasetIdArgs:
@@ -57,45 +62,67 @@ def __init__(self, paragraph_id_list: List[str], target_dataset_id: str):
class UpdateEmbeddingDocumentIdArgs:
- def __init__(self, paragraph_id_list: List[str], target_document_id: str, target_dataset_id: str):
+ def __init__(self, paragraph_id_list: List[str], target_document_id: str, target_dataset_id: str,
+ target_embedding_model: Embeddings = None):
self.paragraph_id_list = paragraph_id_list
self.target_document_id = target_document_id
self.target_dataset_id = target_dataset_id
+ self.target_embedding_model = target_embedding_model
class ListenerManagement:
- embedding_by_problem_signal = signal("embedding_by_problem")
- embedding_by_paragraph_signal = signal("embedding_by_paragraph")
- embedding_by_dataset_signal = signal("embedding_by_dataset")
- embedding_by_document_signal = signal("embedding_by_document")
- delete_embedding_by_document_signal = signal("delete_embedding_by_document")
- delete_embedding_by_document_list_signal = signal("delete_embedding_by_document_list")
- delete_embedding_by_dataset_signal = signal("delete_embedding_by_dataset")
- delete_embedding_by_paragraph_signal = signal("delete_embedding_by_paragraph")
- delete_embedding_by_source_signal = signal("delete_embedding_by_source")
- enable_embedding_by_paragraph_signal = signal('enable_embedding_by_paragraph')
- disable_embedding_by_paragraph_signal = signal('disable_embedding_by_paragraph')
- init_embedding_model_signal = signal('init_embedding_model')
- sync_web_dataset_signal = signal('sync_web_dataset')
- sync_web_document_signal = signal('sync_web_document')
- update_problem_signal = signal('update_problem')
- delete_embedding_by_source_ids_signal = signal('delete_embedding_by_source_ids')
- delete_embedding_by_dataset_id_list_signal = signal("delete_embedding_by_dataset_id_list")
@staticmethod
- def embedding_by_problem(args):
- VectorStore.get_embedding_vector().save(**args)
+ def embedding_by_problem(args, embedding_model: Embeddings):
+ VectorStore.get_embedding_vector().save(**args, embedding=embedding_model)
@staticmethod
- @embedding_poxy
- def embedding_by_paragraph(paragraph_id):
+ def embedding_by_paragraph_list(paragraph_id_list, embedding_model: Embeddings):
+ try:
+ data_list = native_search(
+ {'problem': QuerySet(get_dynamics_model({'paragraph.id': django.db.models.CharField()})).filter(
+ **{'paragraph.id__in': paragraph_id_list}),
+ 'paragraph': QuerySet(Paragraph).filter(id__in=paragraph_id_list)},
+ select_string=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql')))
+ ListenerManagement.embedding_by_paragraph_data_list(data_list, paragraph_id_list=paragraph_id_list,
+ embedding_model=embedding_model)
+ except Exception as e:
+ max_kb_error.error(_('Query vector data: {paragraph_id_list} error {error} {traceback}').format(
+ paragraph_id_list=paragraph_id_list, error=str(e), traceback=traceback.format_exc()))
+
+ @staticmethod
+ def embedding_by_paragraph_data_list(data_list, paragraph_id_list, embedding_model: Embeddings):
+ max_kb.info(_('Start--->Embedding paragraph: {paragraph_id_list}').format(paragraph_id_list=paragraph_id_list))
+ status = Status.success
+ try:
+ # 删除段落
+ VectorStore.get_embedding_vector().delete_by_paragraph_ids(paragraph_id_list)
+
+ def is_save_function():
+ return QuerySet(Paragraph).filter(id__in=paragraph_id_list).exists()
+
+ # 批量向量化
+ VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_save_function)
+ except Exception as e:
+ max_kb_error.error(_('Vectorized paragraph: {paragraph_id_list} error {error} {traceback}').format(
+ paragraph_id_list=paragraph_id_list, error=str(e), traceback=traceback.format_exc()))
+ status = Status.error
+ finally:
+ QuerySet(Paragraph).filter(id__in=paragraph_id_list).update(**{'status': status})
+ max_kb.info(
+ _('End--->Embedding paragraph: {paragraph_id_list}').format(paragraph_id_list=paragraph_id_list))
+
+ @staticmethod
+ def embedding_by_paragraph(paragraph_id, embedding_model: Embeddings):
"""
向量化段落 根据段落id
- :param paragraph_id: 段落id
- :return: None
+ @param paragraph_id: 段落id
+ @param embedding_model: 向量模型
"""
- max_kb.info(f"开始--->向量化段落:{paragraph_id}")
- status = Status.success
+ max_kb.info(_('Start--->Embedding paragraph: {paragraph_id}').format(paragraph_id=paragraph_id))
+ # 更新到开始状态
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, State.STARTED)
try:
data_list = native_search(
{'problem': QuerySet(get_dynamics_model({'paragraph.id': django.db.models.CharField()})).filter(
@@ -105,64 +132,184 @@ def embedding_by_paragraph(paragraph_id):
os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql')))
# 删除段落
VectorStore.get_embedding_vector().delete_by_paragraph_id(paragraph_id)
+
+ def is_the_task_interrupted():
+ _paragraph = QuerySet(Paragraph).filter(id=paragraph_id).first()
+ if _paragraph is None or Status(_paragraph.status)[TaskType.EMBEDDING] == State.REVOKE:
+ return True
+ return False
+
# 批量向量化
- VectorStore.get_embedding_vector().batch_save(data_list)
+ VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_the_task_interrupted)
+ # 更新到开始状态
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING,
+ State.SUCCESS)
except Exception as e:
- max_kb_error.error(f'向量化段落:{paragraph_id}出现错误{str(e)}{traceback.format_exc()}')
- status = Status.error
+ max_kb_error.error(_('Vectorized paragraph: {paragraph_id} error {error} {traceback}').format(
+ paragraph_id=paragraph_id, error=str(e), traceback=traceback.format_exc()))
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING,
+ State.FAILURE)
finally:
- QuerySet(Paragraph).filter(id=paragraph_id).update(**{'status': status})
- max_kb.info(f'结束--->向量化段落:{paragraph_id}')
+ max_kb.info(_('End--->Embedding paragraph: {paragraph_id}').format(paragraph_id=paragraph_id))
@staticmethod
- @embedding_poxy
- def embedding_by_document(document_id):
+ def embedding_by_data_list(data_list: List, embedding_model: Embeddings):
+ # 批量向量化
+ VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, lambda: True)
+
+ @staticmethod
+ def get_embedding_paragraph_apply(embedding_model, is_the_task_interrupted, post_apply=lambda: None):
+ def embedding_paragraph_apply(paragraph_list):
+ for paragraph in paragraph_list:
+ if is_the_task_interrupted():
+ break
+ ListenerManagement.embedding_by_paragraph(str(paragraph.get('id')), embedding_model)
+ post_apply()
+
+ return embedding_paragraph_apply
+
+ @staticmethod
+ def get_aggregation_document_status(document_id):
+ def aggregation_document_status():
+ pass
+ sql = get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_document_status_meta.sql'))
+ native_update({'document_custom_sql': QuerySet(Document).filter(id=document_id)}, sql, with_table_name=True)
+
+ return aggregation_document_status
+
+ @staticmethod
+ def get_aggregation_document_status_by_dataset_id(dataset_id):
+ def aggregation_document_status():
+ sql = get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_document_status_meta.sql'))
+ native_update({'document_custom_sql': QuerySet(Document).filter(dataset_id=dataset_id)}, sql,
+ with_table_name=True)
+
+ return aggregation_document_status
+
+ @staticmethod
+ def get_aggregation_document_status_by_query_set(queryset):
+ def aggregation_document_status():
+ sql = get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_document_status_meta.sql'))
+ native_update({'document_custom_sql': queryset}, sql, with_table_name=True)
+
+ return aggregation_document_status
+
+ @staticmethod
+ def post_update_document_status(document_id, task_type: TaskType):
+ _document = QuerySet(Document).filter(id=document_id).first()
+
+ status = Status(_document.status)
+ if status[task_type] == State.REVOKE:
+ status[task_type] = State.REVOKED
+ else:
+ status[task_type] = State.SUCCESS
+ for item in _document.status_meta.get('aggs', []):
+ agg_status = item.get('status')
+ agg_count = item.get('count')
+ if Status(agg_status)[task_type] == State.FAILURE and agg_count > 0:
+ status[task_type] = State.FAILURE
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), task_type, status[task_type])
+
+ ListenerManagement.update_status(QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', task_type.value,
+ task_type.value),
+ ).filter(task_type_status=State.REVOKE.value).filter(document_id=document_id).values('id'),
+ task_type,
+ State.REVOKED)
+
+ @staticmethod
+ def update_status(query_set: QuerySet, taskType: TaskType, state: State):
+ exec_sql = get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'update_paragraph_status.sql'))
+ bit_number = len(TaskType)
+ up_index = taskType.value - 1
+ next_index = taskType.value + 1
+ current_index = taskType.value
+ status_number = state.value
+ current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + '+00'
+ params_dict = {'${bit_number}': bit_number, '${up_index}': up_index,
+ '${status_number}': status_number, '${next_index}': next_index,
+ '${table_name}': query_set.model._meta.db_table, '${current_index}': current_index,
+ '${current_time}': current_time}
+ for key in params_dict:
+ _value_ = params_dict[key]
+ exec_sql = exec_sql.replace(key, str(_value_))
+ with lock:
+ native_update(query_set, exec_sql)
+
+ @staticmethod
+ def embedding_by_document(document_id, embedding_model: Embeddings, state_list=None):
"""
向量化文档
- :param document_id: 文档id
+ @param state_list:
+ @param document_id: 文档id
+ @param embedding_model 向量模型
:return: None
"""
- max_kb.info(f"开始--->向量化文档:{document_id}")
- status = Status.success
+ if state_list is None:
+ state_list = [State.PENDING, State.SUCCESS, State.FAILURE, State.REVOKE, State.REVOKED]
+ if not try_lock('embedding' + str(document_id)):
+ return
try:
- data_list = native_search(
- {'problem': QuerySet(
- get_dynamics_model({'paragraph.document_id': django.db.models.CharField()})).filter(
- **{'paragraph.document_id': document_id}),
- 'paragraph': QuerySet(Paragraph).filter(document_id=document_id)},
- select_string=get_file_content(
- os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql')))
- # 删除文档向量数据
- VectorStore.get_embedding_vector().delete_by_document_id(document_id)
- # 批量向量化
- VectorStore.get_embedding_vector().batch_save(data_list)
+ def is_the_task_interrupted():
+ document = QuerySet(Document).filter(id=document_id).first()
+ if document is None or Status(document.status)[TaskType.EMBEDDING] == State.REVOKE:
+ return True
+ return False
+
+ if is_the_task_interrupted():
+ return
+ max_kb.info(_('Start--->Embedding document: {document_id}').format(document_id=document_id)
+ )
+ # 批量修改状态为PADDING
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
+ State.STARTED)
+
+ # 根据段落进行向量化处理
+ page_desc(QuerySet(Paragraph)
+ .annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType.EMBEDDING.value,
+ 1),
+ ).filter(task_type_status__in=state_list, document_id=document_id)
+ .values('id'), 5,
+ ListenerManagement.get_embedding_paragraph_apply(embedding_model, is_the_task_interrupted,
+ ListenerManagement.get_aggregation_document_status(
+ document_id)),
+ is_the_task_interrupted)
except Exception as e:
- max_kb_error.error(f'向量化文档:{document_id}出现错误{str(e)}{traceback.format_exc()}')
- status = Status.error
+ max_kb_error.error(_('Vectorized document: {document_id} error {error} {traceback}').format(
+ document_id=document_id, error=str(e), traceback=traceback.format_exc()))
finally:
- # 修改状态
- QuerySet(Document).filter(id=document_id).update(**{'status': status})
- QuerySet(Paragraph).filter(document_id=document_id).update(**{'status': status})
- max_kb.info(f"结束--->向量化文档:{document_id}")
+ ListenerManagement.post_update_document_status(document_id, TaskType.EMBEDDING)
+ ListenerManagement.get_aggregation_document_status(document_id)()
+ max_kb.info(_('End--->Embedding document: {document_id}').format(document_id=document_id))
+ un_lock('embedding' + str(document_id))
@staticmethod
- @embedding_poxy
- def embedding_by_dataset(dataset_id):
+ def embedding_by_dataset(dataset_id, embedding_model: Embeddings):
"""
向量化知识库
- :param dataset_id: 知识库id
+ @param dataset_id: 知识库id
+ @param embedding_model 向量模型
:return: None
"""
- max_kb.info(f"开始--->向量化数据集:{dataset_id}")
+ max_kb.info(_('Start--->Embedding dataset: {dataset_id}').format(dataset_id=dataset_id))
try:
+ ListenerManagement.delete_embedding_by_dataset(dataset_id)
document_list = QuerySet(Document).filter(dataset_id=dataset_id)
- max_kb.info(f"数据集文档:{[d.name for d in document_list]}")
+ max_kb.info(_('Start--->Embedding document: {document_list}').format(document_list=document_list))
for document in document_list:
- ListenerManagement.embedding_by_document(document.id)
+ ListenerManagement.embedding_by_document(document.id, embedding_model=embedding_model)
except Exception as e:
- max_kb_error.error(f'向量化数据集:{dataset_id}出现错误{str(e)}{traceback.format_exc()}')
+ max_kb_error.error(_('Vectorized dataset: {dataset_id} error {error} {traceback}').format(
+ dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc()))
finally:
- max_kb.info(f"结束--->向量化数据集:{dataset_id}")
+ max_kb.info(_('End--->Embedding dataset: {dataset_id}').format(dataset_id=dataset_id))
@staticmethod
def delete_embedding_by_document(document_id):
@@ -170,7 +317,7 @@ def delete_embedding_by_document(document_id):
@staticmethod
def delete_embedding_by_document_list(document_id_list: List[str]):
- VectorStore.get_embedding_vector().delete_bu_document_id_list(document_id_list)
+ VectorStore.get_embedding_vector().delete_by_document_id_list(document_id_list)
@staticmethod
def delete_embedding_by_dataset(dataset_id):
@@ -192,29 +339,10 @@ def disable_embedding_by_paragraph(paragraph_id):
def enable_embedding_by_paragraph(paragraph_id):
VectorStore.get_embedding_vector().update_by_paragraph_id(paragraph_id, {'is_active': True})
- @staticmethod
- @poxy
- def sync_web_document(args: SyncWebDocumentArgs):
- for source_url in args.source_url_list:
- result = Fork(base_fork_url=source_url, selector_list=args.selector.split(' ')).fork()
- args.handler(source_url, args.selector, result)
-
- @staticmethod
- @poxy
- def sync_web_dataset(args: SyncWebDatasetArgs):
- if try_lock('sync_web_dataset' + args.lock_key):
- try:
- ForkManage(args.url, args.selector.split(" ") if args.selector is not None else []).fork(2, set(),
- args.handler)
- except Exception as e:
- logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
- finally:
- un_lock('sync_web_dataset' + args.lock_key)
-
@staticmethod
def update_problem(args: UpdateProblemArgs):
problem_paragraph_mapping_list = QuerySet(ProblemParagraphMapping).filter(problem_id=args.problem_id)
- embed_value = VectorStore.get_embedding_vector().embed_query(args.problem_content)
+ embed_value = args.embedding_model.embed_query(args.problem_content)
VectorStore.get_embedding_vector().update_by_source_ids([v.id for v in problem_paragraph_mapping_list],
{'embedding': embed_value})
@@ -225,9 +353,13 @@ def update_embedding_dataset_id(args: UpdateEmbeddingDatasetIdArgs):
@staticmethod
def update_embedding_document_id(args: UpdateEmbeddingDocumentIdArgs):
- VectorStore.get_embedding_vector().update_by_paragraph_ids(args.paragraph_id_list,
- {'document_id': args.target_document_id,
- 'dataset_id': args.target_dataset_id})
+ if args.target_embedding_model is None:
+ VectorStore.get_embedding_vector().update_by_paragraph_ids(args.paragraph_id_list,
+ {'document_id': args.target_document_id,
+ 'dataset_id': args.target_dataset_id})
+ else:
+ ListenerManagement.embedding_by_paragraph_list(args.paragraph_id_list,
+ embedding_model=args.target_embedding_model)
@staticmethod
def delete_embedding_by_source_ids(source_ids: List[str]):
@@ -242,43 +374,9 @@ def delete_embedding_by_dataset_id_list(source_ids: List[str]):
VectorStore.get_embedding_vector().delete_by_dataset_id_list(source_ids)
@staticmethod
- @poxy
- def init_embedding_model(ages):
- EmbeddingModel.get_embedding_model()
-
- def run(self):
- # 添加向量 根据问题id
- ListenerManagement.embedding_by_problem_signal.connect(self.embedding_by_problem)
- # 添加向量 根据段落id
- ListenerManagement.embedding_by_paragraph_signal.connect(self.embedding_by_paragraph)
- # 添加向量 根据知识库id
- ListenerManagement.embedding_by_dataset_signal.connect(
- self.embedding_by_dataset)
- # 添加向量 根据文档id
- ListenerManagement.embedding_by_document_signal.connect(
- self.embedding_by_document)
- # 删除 向量 根据文档
- ListenerManagement.delete_embedding_by_document_signal.connect(self.delete_embedding_by_document)
- # 删除 向量 根据文档id列表
- ListenerManagement.delete_embedding_by_document_list_signal.connect(self.delete_embedding_by_document_list)
- # 删除 向量 根据知识库id
- ListenerManagement.delete_embedding_by_dataset_signal.connect(self.delete_embedding_by_dataset)
- # 删除向量 根据段落id
- ListenerManagement.delete_embedding_by_paragraph_signal.connect(
- self.delete_embedding_by_paragraph)
- # 删除向量 根据资源id
- ListenerManagement.delete_embedding_by_source_signal.connect(self.delete_embedding_by_source)
- # 禁用段落
- ListenerManagement.disable_embedding_by_paragraph_signal.connect(self.disable_embedding_by_paragraph)
- # 启动段落向量
- ListenerManagement.enable_embedding_by_paragraph_signal.connect(self.enable_embedding_by_paragraph)
- # 初始化向量化模型
- ListenerManagement.init_embedding_model_signal.connect(self.init_embedding_model)
- # 同步web站点知识库
- ListenerManagement.sync_web_dataset_signal.connect(self.sync_web_dataset)
- # 同步web站点 文档
- ListenerManagement.sync_web_document_signal.connect(self.sync_web_document)
- # 更新问题向量
- ListenerManagement.update_problem_signal.connect(self.update_problem)
- ListenerManagement.delete_embedding_by_source_ids_signal.connect(self.delete_embedding_by_source_ids)
- ListenerManagement.delete_embedding_by_dataset_id_list_signal.connect(self.delete_embedding_by_dataset_id_list)
+ def hit_test(query_text, dataset_id: list[str], exclude_document_id_list: list[str], top_number: int,
+ similarity: float,
+ search_mode: SearchMode,
+ embedding: Embeddings):
+ return VectorStore.get_embedding_vector().hit_test(query_text, dataset_id, exclude_document_id_list, top_number,
+ similarity, search_mode, embedding)
diff --git a/apps/common/exception/app_exception.py b/apps/common/exception/app_exception.py
index 3646efb0cbc..b8f5602e705 100644
--- a/apps/common/exception/app_exception.py
+++ b/apps/common/exception/app_exception.py
@@ -73,3 +73,11 @@ class AppChatNumOutOfBoundsFailed(AppApiException):
def __init__(self, code, message):
self.code = code
self.message = message
+
+
+class ChatException(AppApiException):
+ status_code = 500
+
+ def __init__(self, code, message):
+ self.code = code
+ self.message = message
diff --git a/apps/common/field/common.py b/apps/common/field/common.py
index c615e587a1a..61c852d357d 100644
--- a/apps/common/field/common.py
+++ b/apps/common/field/common.py
@@ -7,6 +7,21 @@
@desc:
"""
from rest_framework import serializers
+from django.utils.translation import gettext_lazy as _
+
+class ObjectField(serializers.Field):
+ def __init__(self, model_type_list, **kwargs):
+ self.model_type_list = model_type_list
+ super().__init__(**kwargs)
+
+ def to_internal_value(self, data):
+ for model_type in self.model_type_list:
+ if isinstance(data, model_type):
+ return data
+ self.fail(_('Message type error'), value=data)
+
+ def to_representation(self, value):
+ return value
class InstanceField(serializers.Field):
@@ -16,7 +31,7 @@ def __init__(self, model_type, **kwargs):
def to_internal_value(self, data):
if not isinstance(data, self.model_type):
- self.fail('message类型错误', value=data)
+ self.fail(_('Message type error'), value=data)
return data
def to_representation(self, value):
@@ -27,7 +42,7 @@ class FunctionField(serializers.Field):
def to_internal_value(self, data):
if not callable(data):
- self.fail('不是一个函數', value=data)
+ self.fail(_('not a function'), value=data)
return data
def to_representation(self, value):
@@ -40,3 +55,11 @@ def __init__(self, **kwargs):
def to_representation(self, value):
return value
+
+
+class UploadedFileField(serializers.FileField):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def to_representation(self, value):
+ return value
diff --git a/apps/common/forms/__init__.py b/apps/common/forms/__init__.py
index cda6fe04046..251f01df092 100644
--- a/apps/common/forms/__init__.py
+++ b/apps/common/forms/__init__.py
@@ -20,3 +20,6 @@
from .radio_button_field import *
from .table_checkbox import *
from .radio_card_field import *
+from .label import *
+from .slider_field import *
+from .switch_field import *
diff --git a/apps/common/forms/base_field.py b/apps/common/forms/base_field.py
index d12ae77a723..b0cf0f20240 100644
--- a/apps/common/forms/base_field.py
+++ b/apps/common/forms/base_field.py
@@ -9,6 +9,10 @@
from enum import Enum
from typing import List, Dict
+from common.exception.app_exception import AppApiException
+from common.forms.label.base_label import BaseLabel
+from django.utils.translation import gettext_lazy as _
+
class TriggerType(Enum):
# 执行函数获取 OptionList数据
@@ -20,7 +24,7 @@ class TriggerType(Enum):
class BaseField:
def __init__(self,
input_type: str,
- label: str,
+ label: str or BaseLabel,
required: bool = False,
default_value: object = None,
relation_show_field_dict: Dict = None,
@@ -53,10 +57,16 @@ def __init__(self,
self.required = required
self.trigger_type = trigger_type
- def to_dict(self):
+ def is_valid(self, value):
+ field_label = self.label.label if hasattr(self.label, 'to_dict') else self.label
+ if self.required and value is None:
+ raise AppApiException(500,
+ _('The field {field_label} is required').format(field_label=field_label))
+
+ def to_dict(self, **kwargs):
return {
'input_type': self.input_type,
- 'label': self.label,
+ 'label': self.label.to_dict(**kwargs) if hasattr(self.label, 'to_dict') else self.label,
'required': self.required,
'default_value': self.default_value,
'relation_show_field_dict': self.relation_show_field_dict,
@@ -64,6 +74,7 @@ def to_dict(self):
'trigger_type': self.trigger_type.value,
'attrs': self.attrs,
'props_info': self.props_info,
+ **kwargs
}
@@ -97,8 +108,8 @@ def __init__(self, input_type: str,
self.value_field = value_field
self.option_list = option_list
- def to_dict(self):
- return {**super().to_dict(), 'text_field': self.text_field, 'value_field': self.value_field,
+ def to_dict(self, **kwargs):
+ return {**super().to_dict(**kwargs), 'text_field': self.text_field, 'value_field': self.value_field,
'option_list': self.option_list}
@@ -141,6 +152,6 @@ def __init__(self,
self.provider = provider
self.method = method
- def to_dict(self):
- return {**super().to_dict(), 'text_field': self.text_field, 'value_field': self.value_field,
+ def to_dict(self, **kwargs):
+ return {**super().to_dict(**kwargs), 'text_field': self.text_field, 'value_field': self.value_field,
'provider': self.provider, 'method': self.method}
diff --git a/apps/common/forms/base_form.py b/apps/common/forms/base_form.py
index 93984b8c61b..5ef92c5c1e6 100644
--- a/apps/common/forms/base_form.py
+++ b/apps/common/forms/base_form.py
@@ -6,11 +6,25 @@
@date:2023/11/1 16:04
@desc:
"""
+from typing import Dict
+
from common.forms import BaseField
class BaseForm:
- def to_form_list(self):
- return [{**self.__getattribute__(key).to_dict(), 'field': key} for key in
+ def to_form_list(self, **kwargs):
+ return [{**self.__getattribute__(key).to_dict(**kwargs), 'field': key} for key in
list(filter(lambda key: isinstance(self.__getattribute__(key), BaseField),
[attr for attr in vars(self.__class__) if not attr.startswith("__")]))]
+
+ def valid_form(self, form_data):
+ field_keys = list(filter(lambda key: isinstance(self.__getattribute__(key), BaseField),
+ [attr for attr in vars(self.__class__) if not attr.startswith("__")]))
+ for field_key in field_keys:
+ self.__getattribute__(field_key).is_valid(form_data.get(field_key))
+
+ def get_default_form_data(self):
+ return {key: self.__getattribute__(key).default_value for key in
+ [attr for attr in vars(self.__class__) if not attr.startswith("__")] if
+ isinstance(self.__getattribute__(key), BaseField) and self.__getattribute__(
+ key).default_value is not None}
diff --git a/apps/common/forms/label/__init__.py b/apps/common/forms/label/__init__.py
new file mode 100644
index 00000000000..81c1b329874
--- /dev/null
+++ b/apps/common/forms/label/__init__.py
@@ -0,0 +1,10 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/8/22 17:19
+ @desc:
+"""
+from .base_label import *
+from .tooltip_label import *
diff --git a/apps/common/forms/label/base_label.py b/apps/common/forms/label/base_label.py
new file mode 100644
index 00000000000..59e4d372267
--- /dev/null
+++ b/apps/common/forms/label/base_label.py
@@ -0,0 +1,28 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: base_label.py
+ @date:2024/8/22 17:11
+ @desc:
+"""
+
+
+class BaseLabel:
+ def __init__(self,
+ input_type: str,
+ label: str,
+ attrs=None,
+ props_info=None):
+ self.input_type = input_type
+ self.label = label
+ self.attrs = attrs
+ self.props_info = props_info
+
+ def to_dict(self, **kwargs):
+ return {
+ 'input_type': self.input_type,
+ 'label': self.label,
+ 'attrs': {} if self.attrs is None else self.attrs,
+ 'props_info': {} if self.props_info is None else self.props_info,
+ }
diff --git a/apps/common/forms/label/tooltip_label.py b/apps/common/forms/label/tooltip_label.py
new file mode 100644
index 00000000000..885345dafbc
--- /dev/null
+++ b/apps/common/forms/label/tooltip_label.py
@@ -0,0 +1,14 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: tooltip_label.py
+ @date:2024/8/22 17:19
+ @desc:
+"""
+from common.forms.label.base_label import BaseLabel
+
+
+class TooltipLabel(BaseLabel):
+ def __init__(self, label, tooltip):
+ super().__init__('TooltipLabel', label, attrs={'tooltip': tooltip}, props_info={})
diff --git a/apps/common/forms/single_select_field.py b/apps/common/forms/single_select_field.py
index cf3d5040965..21bd5de5750 100644
--- a/apps/common/forms/single_select_field.py
+++ b/apps/common/forms/single_select_field.py
@@ -8,6 +8,7 @@
"""
from typing import List, Dict
+from common.forms import BaseLabel
from common.forms.base_field import TriggerType, BaseExecField
@@ -17,7 +18,7 @@ class SingleSelect(BaseExecField):
"""
def __init__(self,
- label: str,
+ label: str or BaseLabel,
text_field: str,
value_field: str,
option_list: List[str:object],
diff --git a/apps/common/forms/slider_field.py b/apps/common/forms/slider_field.py
new file mode 100644
index 00000000000..3919891fda4
--- /dev/null
+++ b/apps/common/forms/slider_field.py
@@ -0,0 +1,65 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: slider_field.py
+ @date:2024/8/22 17:06
+ @desc:
+"""
+from typing import Dict
+
+from common.exception.app_exception import AppApiException
+from common.forms import BaseField, TriggerType, BaseLabel
+from django.utils.translation import gettext_lazy as _
+
+
+class SliderField(BaseField):
+ """
+ 滑块输入框
+ """
+
+ def __init__(self, label: str or BaseLabel,
+ _min,
+ _max,
+ _step,
+ precision,
+ required: bool = False,
+ default_value=None,
+ relation_show_field_dict: Dict = None,
+ attrs=None, props_info=None):
+ """
+ @param label: 提示
+ @param _min: 最小值
+ @param _max: 最大值
+ @param _step: 步长
+ @param precision: 保留多少小数
+ @param required: 是否必填
+ @param default_value: 默认值
+ @param relation_show_field_dict:
+ @param attrs:
+ @param props_info:
+ """
+ _attrs = {'min': _min, 'max': _max, 'step': _step,
+ 'precision': precision, 'show-input-controls': False, 'show-input': True}
+ if attrs is not None:
+ _attrs.update(attrs)
+ super().__init__('Slider', label, required, default_value, relation_show_field_dict,
+ {},
+ TriggerType.OPTION_LIST, _attrs, props_info)
+
+ def is_valid(self, value):
+ super().is_valid(value)
+ field_label = self.label.label if hasattr(self.label, 'to_dict') else self.label
+ if value is not None:
+ if value < self.attrs.get('min'):
+ raise AppApiException(500,
+ _("The {field_label} cannot be less than {min}").format(field_label=field_label,
+ min=self.attrs.get(
+ 'min')))
+
+ if value > self.attrs.get('max'):
+ raise AppApiException(500,
+ _("The {field_label} cannot be greater than {max}").format(
+ field_label=field_label,
+ max=self.attrs.get(
+ 'max')))
diff --git a/apps/common/forms/switch_field.py b/apps/common/forms/switch_field.py
new file mode 100644
index 00000000000..ea119c3ecfb
--- /dev/null
+++ b/apps/common/forms/switch_field.py
@@ -0,0 +1,33 @@
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: switch_field.py
+ @date:2024/10/13 19:43
+ @desc:
+"""
+from typing import Dict
+from common.forms import BaseField, TriggerType, BaseLabel
+
+
+class SwitchField(BaseField):
+ """
+ 滑块输入框
+ """
+
+ def __init__(self, label: str or BaseLabel,
+ required: bool = False,
+ default_value=None,
+ relation_show_field_dict: Dict = None,
+
+ attrs=None, props_info=None):
+ """
+ @param required: 是否必填
+ @param default_value: 默认值
+ @param relation_show_field_dict:
+ @param attrs:
+ @param props_info:
+ """
+
+ super().__init__('SwitchInput', label, required, default_value, relation_show_field_dict,
+ {},
+ TriggerType.OPTION_LIST, attrs, props_info)
diff --git a/apps/common/forms/text_input_field.py b/apps/common/forms/text_input_field.py
index 28a821e1570..2b8b2ce04a5 100644
--- a/apps/common/forms/text_input_field.py
+++ b/apps/common/forms/text_input_field.py
@@ -8,6 +8,7 @@
"""
from typing import Dict
+from common.forms import BaseLabel
from common.forms.base_field import BaseField, TriggerType
@@ -16,7 +17,7 @@ class TextInputField(BaseField):
文本输入框
"""
- def __init__(self, label: str,
+ def __init__(self, label: str or BaseLabel,
required: bool = False,
default_value=None,
relation_show_field_dict: Dict = None,
diff --git a/apps/common/handle/base_parse_qa_handle.py b/apps/common/handle/base_parse_qa_handle.py
new file mode 100644
index 00000000000..8cd1cd1cdb2
--- /dev/null
+++ b/apps/common/handle/base_parse_qa_handle.py
@@ -0,0 +1,52 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: base_parse_qa_handle.py
+ @date:2024/5/21 14:56
+ @desc:
+"""
+from abc import ABC, abstractmethod
+
+
+def get_row_value(row, title_row_index_dict, field):
+ index = title_row_index_dict.get(field)
+ if index is None:
+ return None
+ if (len(row) - 1) >= index:
+ return row[index]
+ return None
+
+
+def get_title_row_index_dict(title_row_list):
+ title_row_index_dict = {}
+ if len(title_row_list) == 1:
+ title_row_index_dict['content'] = 0
+ elif len(title_row_list) == 1:
+ title_row_index_dict['title'] = 0
+ title_row_index_dict['content'] = 1
+ else:
+ title_row_index_dict['title'] = 0
+ title_row_index_dict['content'] = 1
+ title_row_index_dict['problem_list'] = 2
+ for index in range(len(title_row_list)):
+ title_row = title_row_list[index]
+ if title_row is None:
+ title_row = ''
+ if title_row.startswith('分段标题'):
+ title_row_index_dict['title'] = index
+ if title_row.startswith('分段内容'):
+ title_row_index_dict['content'] = index
+ if title_row.startswith('问题'):
+ title_row_index_dict['problem_list'] = index
+ return title_row_index_dict
+
+
+class BaseParseQAHandle(ABC):
+ @abstractmethod
+ def support(self, file, get_buffer):
+ pass
+
+ @abstractmethod
+ def handle(self, file, get_buffer, save_image):
+ pass
diff --git a/apps/common/handle/base_parse_table_handle.py b/apps/common/handle/base_parse_table_handle.py
new file mode 100644
index 00000000000..65eaf897f1f
--- /dev/null
+++ b/apps/common/handle/base_parse_table_handle.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: base_parse_qa_handle.py
+ @date:2024/5/21 14:56
+ @desc:
+"""
+from abc import ABC, abstractmethod
+
+
+class BaseParseTableHandle(ABC):
+ @abstractmethod
+ def support(self, file, get_buffer):
+ pass
+
+ @abstractmethod
+ def handle(self, file, get_buffer,save_image):
+ pass
+
+ @abstractmethod
+ def get_content(self, file, save_image):
+ pass
\ No newline at end of file
diff --git a/apps/common/handle/base_split_handle.py b/apps/common/handle/base_split_handle.py
index f9b573f0f79..bedaad5e10c 100644
--- a/apps/common/handle/base_split_handle.py
+++ b/apps/common/handle/base_split_handle.py
@@ -18,3 +18,7 @@ def support(self, file, get_buffer):
@abstractmethod
def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
pass
+
+ @abstractmethod
+ def get_content(self, file, save_image):
+ pass
diff --git a/apps/common/handle/base_to_response.py b/apps/common/handle/base_to_response.py
new file mode 100644
index 00000000000..376d1a9ddd7
--- /dev/null
+++ b/apps/common/handle/base_to_response.py
@@ -0,0 +1,30 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: base_to_response.py
+ @date:2024/9/6 16:04
+ @desc:
+"""
+from abc import ABC, abstractmethod
+
+from rest_framework import status
+
+
+class BaseToResponse(ABC):
+
+ @abstractmethod
+ def to_block_response(self, chat_id, chat_record_id, content, is_end, completion_tokens,
+ prompt_tokens, other_params: dict = None,
+ _status=status.HTTP_200_OK):
+ pass
+
+ @abstractmethod
+ def to_stream_chunk_response(self, chat_id, chat_record_id, node_id, up_node_id_list, content, is_end,
+ completion_tokens,
+ prompt_tokens, other_params: dict = None):
+ pass
+
+ @staticmethod
+ def format_stream_chunk(response_str):
+ return 'data: ' + response_str + '\n\n'
diff --git a/apps/common/handle/handle_exception.py b/apps/common/handle/handle_exception.py
index d3e86401bc2..21e8c8ef1e9 100644
--- a/apps/common/handle/handle_exception.py
+++ b/apps/common/handle/handle_exception.py
@@ -14,7 +14,7 @@
from common.exception.app_exception import AppApiException
from common.response import result
-
+from django.utils.translation import gettext_lazy as _
def to_result(key, args, parent_key=None):
"""
@@ -27,7 +27,7 @@ def to_result(key, args, parent_key=None):
error_detail = list(filter(
lambda d: True if isinstance(d, ErrorDetail) else True if isinstance(d, dict) and len(
d.keys()) > 0 else False,
- (args[0] if len(args) > 0 else {key: [ErrorDetail('未知异常', code='unknown')]}).get(key)))[0]
+ (args[0] if len(args) > 0 else {key: [ErrorDetail(_('Unknown exception'), code='unknown')]}).get(key)))[0]
if isinstance(error_detail, dict):
return list(map(lambda k: to_result(k, args=[error_detail],
@@ -63,13 +63,15 @@ def find_err_detail(exc_detail):
_value = exc_detail[key]
if isinstance(_value, list):
return find_err_detail(_value)
- elif isinstance(_value, ErrorDetail):
+ if isinstance(_value, ErrorDetail):
return _value
- elif isinstance(_value, dict):
+ if isinstance(_value, dict) and len(_value.keys()) > 0:
return find_err_detail(_value)
if isinstance(exc_detail, list):
for v in exc_detail:
- return find_err_detail(v)
+ r = find_err_detail(v)
+ if r is not None:
+ return r
def handle_exception(exc, context):
diff --git a/apps/common/handle/impl/csv_split_handle.py b/apps/common/handle/impl/csv_split_handle.py
new file mode 100644
index 00000000000..3ea690e0e65
--- /dev/null
+++ b/apps/common/handle/impl/csv_split_handle.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: csv_parse_qa_handle.py
+ @date:2024/5/21 14:59
+ @desc:
+"""
+import csv
+import io
+import os
+from typing import List
+
+from charset_normalizer import detect
+
+from common.handle.base_split_handle import BaseSplitHandle
+
+
+def post_cell(cell_value):
+ return cell_value.replace('\n', ' ').replace('|', '|')
+
+
+def row_to_md(row):
+ return '| ' + ' | '.join(
+ [post_cell(cell) if cell is not None else '' for cell in row]) + ' |\n'
+
+
+class CsvSplitHandle(BaseSplitHandle):
+ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
+ buffer = get_buffer(file)
+ paragraphs = []
+ file_name = os.path.basename(file.name)
+ result = {'name': file_name, 'content': paragraphs}
+ try:
+ reader = csv.reader(io.TextIOWrapper(io.BytesIO(buffer), encoding=detect(buffer)['encoding']))
+ try:
+ title_row_list = reader.__next__()
+ title_md_content = row_to_md(title_row_list)
+ title_md_content += '| ' + ' | '.join(
+ ['---' if cell is not None else '' for cell in title_row_list]) + ' |\n'
+ except Exception as e:
+ return result
+ if len(title_row_list) == 0:
+ return result
+ result_item_content = ''
+ for row in reader:
+ next_md_content = row_to_md(row)
+ next_md_content_len = len(next_md_content)
+ result_item_content_len = len(result_item_content)
+ if len(result_item_content) == 0:
+ result_item_content += title_md_content
+ result_item_content += next_md_content
+ else:
+ if result_item_content_len + next_md_content_len < limit:
+ result_item_content += next_md_content
+ else:
+ paragraphs.append({'content': result_item_content, 'title': ''})
+ result_item_content = title_md_content + next_md_content
+ if len(result_item_content) > 0:
+ paragraphs.append({'content': result_item_content, 'title': ''})
+ return result
+ except Exception as e:
+ return result
+
+ def get_content(self, file, save_image):
+ pass
+
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".csv"):
+ return True
+ return False
diff --git a/apps/common/handle/impl/doc_split_handle.py b/apps/common/handle/impl/doc_split_handle.py
index 25f5d694af9..4161f13a19d 100644
--- a/apps/common/handle/impl/doc_split_handle.py
+++ b/apps/common/handle/impl/doc_split_handle.py
@@ -7,18 +7,22 @@
@desc:
"""
import io
+import os
import re
import traceback
import uuid
+from functools import reduce
from typing import List
from docx import Document, ImagePart
+from docx.oxml import ns
from docx.table import Table
from docx.text.paragraph import Paragraph
from common.handle.base_split_handle import BaseSplitHandle
from common.util.split_model import SplitModel
from dataset.models import Image
+from django.utils.translation import gettext_lazy as _
default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'),
re.compile('(?<=\\n)(? 0:
+ for image in _images:
+ images.append({'image': image, 'get_image_id_handle': get_image_id_handle})
+ except Exception as e:
+ pass
+ return images
+
+
+def images_to_string(images, doc: Document, images_list, get_image_id):
+ return "".join(
+ [item for item in [image_to_mode(image, doc, images_list, get_image_id) for image in images] if
+ item is not None])
+
+
def get_paragraph_element_txt(paragraph_element, doc: Document, images_list, get_image_id):
try:
- images = paragraph_element.xpath(".//pic:pic")
+ images = get_paragraph_element_images(paragraph_element, doc, images_list, get_image_id)
if len(images) > 0:
- return "".join(
- [item for item in [image_to_mode(image, doc, images_list, get_image_id) for image in images] if
- item is not None])
+ return images_to_string(images, doc, images_list, get_image_id)
elif paragraph_element.text is not None:
return paragraph_element.text
return ""
@@ -83,14 +110,49 @@ def get_image_id(image_id):
return get_image_id
+title_font_list = [
+ [36, 100],
+ [30, 36]
+]
+
+
+def get_title_level(paragraph: Paragraph):
+ try:
+ if paragraph.style is not None:
+ psn = paragraph.style.name
+ if psn.startswith('Heading') or psn.startswith('TOC 标题') or psn.startswith('标题'):
+ return int(psn.replace("Heading ", '').replace('TOC 标题', '').replace('标题',
+ ''))
+ if len(paragraph.runs) == 1:
+ font_size = paragraph.runs[0].font.size
+ pt = font_size.pt
+ if pt >= 30:
+ for _value, index in zip(title_font_list, range(len(title_font_list))):
+ if pt >= _value[0] and pt < _value[1]:
+ return index + 1
+ except Exception as e:
+ pass
+ return None
+
+
class DocSplitHandle(BaseSplitHandle):
@staticmethod
def paragraph_to_md(paragraph: Paragraph, doc: Document, images_list, get_image_id):
try:
- psn = paragraph.style.name
- if psn.startswith('Heading'):
- return "".join(["#" for i in range(int(psn.replace("Heading ", '')))]) + " " + paragraph.text
+ title_level = get_title_level(paragraph)
+ if title_level is not None:
+ title = "".join(["#" for i in range(title_level)]) + " " + paragraph.text
+ images = reduce(lambda x, y: [*x, *y],
+ [get_paragraph_element_images(e, doc, images_list, get_image_id) for e in
+ paragraph._element],
+ [])
+ if len(images) > 0:
+ return title + '\n' + images_to_string(images, doc, images_list, get_image_id) if len(
+ paragraph.text) > 0 else images_to_string(images, doc, images_list, get_image_id)
+ return title
+
except Exception as e:
+ traceback.print_exc()
return paragraph.text
return get_paragraph_txt(paragraph, doc, images_list, get_image_id)
@@ -110,11 +172,12 @@ def table_to_md(table, doc: Document, images_list, get_image_id):
def to_md(self, doc, images_list, get_image_id):
elements = []
for element in doc.element.body:
- if element.tag.endswith('tbl'):
+ tag = str(element.tag)
+ if tag.endswith('tbl'):
# 处理表格
table = Table(element, doc)
elements.append(table)
- elif element.tag.endswith('p'):
+ elif tag.endswith('p'):
# 处理段落
paragraph = Paragraph(element, doc)
elements.append(paragraph)
@@ -128,6 +191,7 @@ def to_md(self, doc, images_list, get_image_id):
in elements])
def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
+ file_name = os.path.basename(file.name)
try:
image_list = []
buffer = get_buffer(file)
@@ -141,14 +205,29 @@ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_bu
split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit)
except BaseException as e:
traceback.print_exception(e)
- return {'name': file.name,
+ return {'name': file_name,
'content': []}
- return {'name': file.name,
+ return {'name': file_name,
'content': split_model.parse(content)
}
def support(self, file, get_buffer):
file_name: str = file.name.lower()
- if file_name.endswith(".docx") or file_name.endswith(".doc"):
+ if file_name.endswith(".docx") or file_name.endswith(".doc") or file_name.endswith(
+ ".DOC") or file_name.endswith(".DOCX"):
return True
return False
+
+ def get_content(self, file, save_image):
+ try:
+ image_list = []
+ buffer = file.read()
+ doc = Document(io.BytesIO(buffer))
+ content = self.to_md(doc, image_list, get_image_id_func())
+ if len(image_list) > 0:
+ content = content.replace('/api/image/', '/api/file/')
+ save_image(image_list)
+ return content
+ except BaseException as e:
+ traceback.print_exception(e)
+ return f'{e}'
diff --git a/apps/common/handle/impl/html_split_handle.py b/apps/common/handle/impl/html_split_handle.py
new file mode 100644
index 00000000000..90e59ebcb5c
--- /dev/null
+++ b/apps/common/handle/impl/html_split_handle.py
@@ -0,0 +1,73 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: html_split_handle.py
+ @date:2024/5/23 10:58
+ @desc:
+"""
+import re
+import traceback
+from typing import List
+
+from bs4 import BeautifulSoup
+from charset_normalizer import detect
+from html2text import html2text
+
+from common.handle.base_split_handle import BaseSplitHandle
+from common.util.split_model import SplitModel
+
+default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'),
+ re.compile('(?<=\\n)(? 0:
+ charset = charset_list[0]
+ return charset
+ return detect(buffer)['encoding']
+
+
+class HTMLSplitHandle(BaseSplitHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".html") or file_name.endswith(".HTML"):
+ return True
+ return False
+
+ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
+ buffer = get_buffer(file)
+
+ if pattern_list is not None and len(pattern_list) > 0:
+ split_model = SplitModel(pattern_list, with_filter, limit)
+ else:
+ split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit)
+ try:
+ encoding = get_encoding(buffer)
+ content = buffer.decode(encoding)
+ content = html2text(content)
+ except BaseException as e:
+ return {'name': file.name,
+ 'content': []}
+ return {'name': file.name,
+ 'content': split_model.parse(content)
+ }
+
+ def get_content(self, file, save_image):
+ buffer = file.read()
+
+ try:
+ encoding = get_encoding(buffer)
+ content = buffer.decode(encoding)
+ return html2text(content)
+ except BaseException as e:
+ traceback.print_exception(e)
+ return f'{e}'
\ No newline at end of file
diff --git a/apps/common/handle/impl/pdf_split_handle.py b/apps/common/handle/impl/pdf_split_handle.py
index 4d835ca11a4..abdac5e19c9 100644
--- a/apps/common/handle/impl/pdf_split_handle.py
+++ b/apps/common/handle/impl/pdf_split_handle.py
@@ -6,13 +6,20 @@
@date:2024/3/27 18:19
@desc:
"""
+import logging
+import os
import re
+import tempfile
+import time
+import traceback
from typing import List
import fitz
+from langchain_community.document_loaders import PyPDFLoader
from common.handle.base_split_handle import BaseSplitHandle
from common.util.split_model import SplitModel
+from django.utils.translation import gettext_lazy as _
default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'),
re.compile('(?<=\\n)(? 0:
+ return {'name': file.name, 'content': result}
+
+ # 没有目录的pdf
+ content = self.handle_pdf_content(file, pdf_document)
+
if pattern_list is not None and len(pattern_list) > 0:
split_model = SplitModel(pattern_list, with_filter, limit)
else:
split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit)
except BaseException as e:
+ max_kb.error(f"File: {file.name}, error: {e}")
return {'name': file.name,
'content': []}
+ finally:
+ pdf_document.close()
+ # 处理完后可以删除临时文件
+ os.remove(temp_file_path)
+
return {'name': file.name,
'content': split_model.parse(content)
}
+ @staticmethod
+ def handle_pdf_content(file, pdf_document):
+ content = ""
+ for page_num in range(len(pdf_document)):
+ start_time = time.time()
+ page = pdf_document.load_page(page_num)
+ text = page.get_text()
+
+ if text and text.strip(): # 如果页面中有文本内容
+ page_content = text
+ else:
+ try:
+ new_doc = fitz.open()
+ new_doc.insert_pdf(pdf_document, from_page=page_num, to_page=page_num)
+ page_num_pdf = tempfile.gettempdir() + f"/{file.name}_{page_num}.pdf"
+ new_doc.save(page_num_pdf)
+ new_doc.close()
+
+ loader = PyPDFLoader(page_num_pdf, extract_images=True)
+ page_content = "\n" + loader.load()[0].page_content
+ except NotImplementedError as e:
+ # 文件格式不支持,直接退出
+ raise e
+ except BaseException as e:
+ # 当页出错继续进行下一页,防止一个页面出错导致整个文件解析失败
+ max_kb.error(f"File: {file.name}, Page: {page_num + 1}, error: {e}")
+ continue
+ finally:
+ os.remove(page_num_pdf)
+
+ content += page_content
+
+ # Null characters are not allowed.
+ content = content.replace('\0', '')
+
+ elapsed_time = time.time() - start_time
+ max_kb.debug(
+ f"File: {file.name}, Page: {page_num + 1}, Time : {elapsed_time: .3f}s, content-length: {len(page_content)}")
+
+ return content
+
+ @staticmethod
+ def handle_toc(doc, limit):
+ # 找到目录
+ toc = doc.get_toc()
+ if toc is None or len(toc) == 0:
+ return None
+
+ # 创建存储章节内容的数组
+ chapters = []
+
+ # 遍历目录并按章节提取文本
+ for i, entry in enumerate(toc):
+ level, title, start_page = entry
+ start_page -= 1 # PyMuPDF 页码从 0 开始,书签页码从 1 开始
+ chapter_title = title
+ # 确定结束页码,如果是最后一个章节则到文档末尾
+ if i + 1 < len(toc):
+ end_page = toc[i + 1][2] - 1
+ else:
+ end_page = doc.page_count - 1
+
+ # 去掉标题中的符号
+ title = PdfSplitHandle.handle_chapter_title(title)
+
+ # 提取该章节的文本内容
+ chapter_text = ""
+ for page_num in range(start_page, end_page + 1):
+ page = doc.load_page(page_num) # 加载页面
+ text = page.get_text("text")
+ text = re.sub(r'(? -1:
+ text = text[idx + len(title):]
+
+ if i + 1 < len(toc):
+ l, next_title, next_start_page = toc[i + 1]
+ next_title = PdfSplitHandle.handle_chapter_title(next_title)
+ # print(f'next_title: {next_title}')
+ idx = text.find(next_title)
+ if idx > -1:
+ text = text[:idx]
+
+ chapter_text += text # 提取文本
+
+ # Null characters are not allowed.
+ chapter_text = chapter_text.replace('\0', '')
+ # 限制标题长度
+ real_chapter_title = chapter_title[:256]
+ # 限制章节内容长度
+ if 0 < limit < len(chapter_text):
+ split_text = PdfSplitHandle.split_text(chapter_text, limit)
+ for text in split_text:
+ chapters.append({"title": real_chapter_title, "content": text})
+ else:
+ chapters.append({"title": real_chapter_title, "content": chapter_text if chapter_text else real_chapter_title})
+ # 保存章节内容和章节标题
+ return chapters
+
+ @staticmethod
+ def handle_links(doc, pattern_list, with_filter, limit):
+ # 检查文档是否包含内部链接
+ if not check_links_in_pdf(doc):
+ return
+ # 创建存储章节内容的数组
+ chapters = []
+ toc_start_page = -1
+ page_content = ""
+ handle_pre_toc = True
+ # 遍历 PDF 的每一页,查找带有目录链接的页
+ for page_num in range(doc.page_count):
+ page = doc.load_page(page_num)
+ links = page.get_links()
+ # 如果目录开始页码未设置,则设置为当前页码
+ if len(links) > 0:
+ toc_start_page = page_num
+ if toc_start_page < 0:
+ page_content += page.get_text('text')
+ # 检查该页是否包含内部链接(即指向文档内部的页面)
+ for num in range(len(links)):
+ link = links[num]
+ if link['kind'] == 1: # 'kind' 为 1 表示内部链接
+ # 获取链接目标的页面
+ dest_page = link['page']
+ rect = link['from'] # 获取链接的矩形区域
+ # 如果目录开始页码包括前言部分,则不处理前言部分
+ if dest_page < toc_start_page:
+ handle_pre_toc = False
+
+ # 提取链接区域的文本作为标题
+ link_title = page.get_text("text", clip=rect).strip().split("\n")[0].replace('.', '').strip()
+ # print(f'link_title: {link_title}')
+ # 提取目标页面内容作为章节开始
+ start_page = dest_page
+ end_page = dest_page
+ # 下一个link
+ next_link = links[num + 1] if num + 1 < len(links) else None
+ next_link_title = None
+ if next_link is not None and next_link['kind'] == 1:
+ rect = next_link['from']
+ next_link_title = page.get_text("text", clip=rect).strip() \
+ .split("\n")[0].replace('.', '').strip()
+ # print(f'next_link_title: {next_link_title}')
+ end_page = next_link['page']
+
+ # 提取章节内容
+ chapter_text = ""
+ for p_num in range(start_page, end_page + 1):
+ p = doc.load_page(p_num)
+ text = p.get_text("text")
+ text = re.sub(r'(? -1:
+ text = text[idx + len(link_title):]
+
+ if next_link_title is not None:
+ idx = text.find(next_link_title)
+ if idx > -1:
+ text = text[:idx]
+ chapter_text += text
+
+ # Null characters are not allowed.
+ chapter_text = chapter_text.replace('\0', '')
+
+ # 限制章节内容长度
+ if 0 < limit < len(chapter_text):
+ split_text = PdfSplitHandle.split_text(chapter_text, limit)
+ for text in split_text:
+ chapters.append({"title": link_title, "content": text})
+ else:
+ # 保存章节信息
+ chapters.append({"title": link_title, "content": chapter_text})
+
+ # 目录中没有前言部分,手动处理
+ if handle_pre_toc:
+ pre_toc = []
+ lines = page_content.strip().split('\n')
+ try:
+ for line in lines:
+ if re.match(r'^前\s*言', line):
+ pre_toc.append({'title': line, 'content': ''})
+ else:
+ pre_toc[-1]['content'] += line
+ for i in range(len(pre_toc)):
+ pre_toc[i]['content'] = re.sub(r'(? 0:
+ split_model = SplitModel(pattern_list, with_filter, limit)
+ else:
+ split_model = SplitModel(default_pattern_list, with_filter=with_filter, limit=limit)
+ # 插入目录前的部分
+ page_content = re.sub(r'(?= length:
+ # 查找最近的句号
+ last_period_index = current_segment.rfind('.')
+ if last_period_index != -1:
+ segments.append(current_segment[:last_period_index + 1])
+ current_segment = current_segment[last_period_index + 1:] # 更新当前段落
+ else:
+ segments.append(current_segment)
+ current_segment = ""
+
+ # 处理剩余的部分
+ if current_segment:
+ segments.append(current_segment)
+
+ return segments
+
+ @staticmethod
+ def handle_chapter_title(title):
+ title = re.sub(r'[一二三四五六七八九十\s*]、\s*', '', title)
+ title = re.sub(r'第[一二三四五六七八九十]章\s*', '', title)
+ return title
+
def support(self, file, get_buffer):
file_name: str = file.name.lower()
- if file_name.endswith(".pdf"):
+ if file_name.endswith(".pdf") or file_name.endswith(".PDF"):
return True
return False
+
+ def get_content(self, file, save_image):
+ with tempfile.NamedTemporaryFile(delete=False) as temp_file:
+ # 将上传的文件保存到临时文件中
+ temp_file.write(file.read())
+ # 获取临时文件的路径
+ temp_file_path = temp_file.name
+
+ pdf_document = fitz.open(temp_file_path)
+ try:
+ return self.handle_pdf_content(file, pdf_document)
+ except BaseException as e:
+ traceback.print_exception(e)
+ return f'{e}'
\ No newline at end of file
diff --git a/apps/common/handle/impl/qa/csv_parse_qa_handle.py b/apps/common/handle/impl/qa/csv_parse_qa_handle.py
new file mode 100644
index 00000000000..75c22cbdafd
--- /dev/null
+++ b/apps/common/handle/impl/qa/csv_parse_qa_handle.py
@@ -0,0 +1,59 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: csv_parse_qa_handle.py
+ @date:2024/5/21 14:59
+ @desc:
+"""
+import csv
+import io
+
+from charset_normalizer import detect
+
+from common.handle.base_parse_qa_handle import BaseParseQAHandle, get_title_row_index_dict, get_row_value
+
+
+def read_csv_standard(file_path):
+ data = []
+ with open(file_path, 'r') as file:
+ reader = csv.reader(file)
+ for row in reader:
+ data.append(row)
+ return data
+
+
+class CsvParseQAHandle(BaseParseQAHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".csv"):
+ return True
+ return False
+
+ def handle(self, file, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ reader = csv.reader(io.TextIOWrapper(io.BytesIO(buffer), encoding=detect(buffer)['encoding']))
+ try:
+ title_row_list = reader.__next__()
+ except Exception as e:
+ return [{'name': file.name, 'paragraphs': []}]
+ if len(title_row_list) == 0:
+ return [{'name': file.name, 'paragraphs': []}]
+ title_row_index_dict = get_title_row_index_dict(title_row_list)
+ paragraph_list = []
+ for row in reader:
+ content = get_row_value(row, title_row_index_dict, 'content')
+ if content is None:
+ continue
+ problem = get_row_value(row, title_row_index_dict, 'problem_list')
+ problem = str(problem) if problem is not None else ''
+ problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0]
+ title = get_row_value(row, title_row_index_dict, 'title')
+ title = str(title) if title is not None else ''
+ paragraph_list.append({'title': title[0:255],
+ 'content': content[0:102400],
+ 'problem_list': problem_list})
+ return [{'name': file.name, 'paragraphs': paragraph_list}]
+ except Exception as e:
+ return [{'name': file.name, 'paragraphs': []}]
diff --git a/apps/common/handle/impl/qa/xls_parse_qa_handle.py b/apps/common/handle/impl/qa/xls_parse_qa_handle.py
new file mode 100644
index 00000000000..06edb1fb300
--- /dev/null
+++ b/apps/common/handle/impl/qa/xls_parse_qa_handle.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: xls_parse_qa_handle.py
+ @date:2024/5/21 14:59
+ @desc:
+"""
+
+import xlrd
+
+from common.handle.base_parse_qa_handle import BaseParseQAHandle, get_title_row_index_dict, get_row_value
+
+
+def handle_sheet(file_name, sheet):
+ rows = iter([sheet.row_values(i) for i in range(sheet.nrows)])
+ try:
+ title_row_list = next(rows)
+ except Exception as e:
+ return {'name': file_name, 'paragraphs': []}
+ if len(title_row_list) == 0:
+ return {'name': file_name, 'paragraphs': []}
+ title_row_index_dict = get_title_row_index_dict(title_row_list)
+ paragraph_list = []
+ for row in rows:
+ content = get_row_value(row, title_row_index_dict, 'content')
+ if content is None:
+ continue
+ problem = get_row_value(row, title_row_index_dict, 'problem_list')
+ problem = str(problem) if problem is not None else ''
+ problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0]
+ title = get_row_value(row, title_row_index_dict, 'title')
+ title = str(title) if title is not None else ''
+ content = str(content)
+ paragraph_list.append({'title': title[0:255],
+ 'content': content[0:102400],
+ 'problem_list': problem_list})
+ return {'name': file_name, 'paragraphs': paragraph_list}
+
+
+class XlsParseQAHandle(BaseParseQAHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ buffer = get_buffer(file)
+ if file_name.endswith(".xls") and xlrd.inspect_format(content=buffer):
+ return True
+ return False
+
+ def handle(self, file, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ workbook = xlrd.open_workbook(file_contents=buffer)
+ worksheets = workbook.sheets()
+ worksheets_size = len(worksheets)
+ return [row for row in
+ [handle_sheet(file.name,
+ sheet) if worksheets_size == 1 and sheet.name == 'Sheet1' else handle_sheet(
+ sheet.name, sheet) for sheet
+ in worksheets] if row is not None]
+ except Exception as e:
+ return [{'name': file.name, 'paragraphs': []}]
diff --git a/apps/common/handle/impl/qa/xlsx_parse_qa_handle.py b/apps/common/handle/impl/qa/xlsx_parse_qa_handle.py
new file mode 100644
index 00000000000..c3ee40d5360
--- /dev/null
+++ b/apps/common/handle/impl/qa/xlsx_parse_qa_handle.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: xlsx_parse_qa_handle.py
+ @date:2024/5/21 14:59
+ @desc:
+"""
+import io
+
+import openpyxl
+
+from common.handle.base_parse_qa_handle import BaseParseQAHandle, get_title_row_index_dict, get_row_value
+from common.handle.impl.tools import xlsx_embed_cells_images
+
+
+def handle_sheet(file_name, sheet, image_dict):
+ rows = sheet.rows
+ try:
+ title_row_list = next(rows)
+ title_row_list = [row.value for row in title_row_list]
+ except Exception as e:
+ return {'name': file_name, 'paragraphs': []}
+ if len(title_row_list) == 0:
+ return {'name': file_name, 'paragraphs': []}
+ title_row_index_dict = get_title_row_index_dict(title_row_list)
+ paragraph_list = []
+ for row in rows:
+ content = get_row_value(row, title_row_index_dict, 'content')
+ if content is None or content.value is None:
+ continue
+ problem = get_row_value(row, title_row_index_dict, 'problem_list')
+ problem = str(problem.value) if problem is not None and problem.value is not None else ''
+ problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0]
+ title = get_row_value(row, title_row_index_dict, 'title')
+ title = str(title.value) if title is not None and title.value is not None else ''
+ content = str(content.value)
+ image = image_dict.get(content, None)
+ if image is not None:
+ content = f''
+ paragraph_list.append({'title': title[0:255],
+ 'content': content[0:102400],
+ 'problem_list': problem_list})
+ return {'name': file_name, 'paragraphs': paragraph_list}
+
+
+class XlsxParseQAHandle(BaseParseQAHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".xlsx"):
+ return True
+ return False
+
+ def handle(self, file, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ workbook = openpyxl.load_workbook(io.BytesIO(buffer))
+ try:
+ image_dict: dict = xlsx_embed_cells_images(io.BytesIO(buffer))
+ save_image([item for item in image_dict.values()])
+ except Exception as e:
+ image_dict = {}
+ worksheets = workbook.worksheets
+ worksheets_size = len(worksheets)
+ return [row for row in
+ [handle_sheet(file.name,
+ sheet,
+ image_dict) if worksheets_size == 1 and sheet.title == 'Sheet1' else handle_sheet(
+ sheet.title, sheet, image_dict) for sheet
+ in worksheets] if row is not None]
+ except Exception as e:
+ return [{'name': file.name, 'paragraphs': []}]
diff --git a/apps/common/handle/impl/qa/zip_parse_qa_handle.py b/apps/common/handle/impl/qa/zip_parse_qa_handle.py
new file mode 100644
index 00000000000..6f2763516fb
--- /dev/null
+++ b/apps/common/handle/impl/qa/zip_parse_qa_handle.py
@@ -0,0 +1,163 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: text_split_handle.py
+ @date:2024/3/27 18:19
+ @desc:
+"""
+import io
+import os
+import re
+import uuid
+import zipfile
+from typing import List
+from urllib.parse import urljoin
+
+from django.db.models import QuerySet
+
+from common.handle.base_parse_qa_handle import BaseParseQAHandle
+from common.handle.impl.qa.csv_parse_qa_handle import CsvParseQAHandle
+from common.handle.impl.qa.xls_parse_qa_handle import XlsParseQAHandle
+from common.handle.impl.qa.xlsx_parse_qa_handle import XlsxParseQAHandle
+from common.util.common import parse_md_image
+from dataset.models import Image
+from django.utils.translation import gettext_lazy as _
+
+class FileBufferHandle:
+ buffer = None
+
+ def get_buffer(self, file):
+ if self.buffer is None:
+ self.buffer = file.read()
+ return self.buffer
+
+
+split_handles = [XlsParseQAHandle(), XlsxParseQAHandle(), CsvParseQAHandle()]
+
+
+def save_inner_image(image_list):
+ """
+ 子模块插入图片逻辑
+ @param image_list:
+ @return:
+ """
+ if image_list is not None and len(image_list) > 0:
+ QuerySet(Image).bulk_create(image_list)
+
+
+def file_to_paragraph(file):
+ """
+ 文件转换为段落列表
+ @param file: 文件
+ @return: {
+ name:文件名
+ paragraphs:段落列表
+ }
+ """
+ get_buffer = FileBufferHandle().get_buffer
+ for split_handle in split_handles:
+ if split_handle.support(file, get_buffer):
+ return split_handle.handle(file, get_buffer, save_inner_image)
+ raise Exception(_("Unsupported file format"))
+
+
+def is_valid_uuid(uuid_str: str):
+ """
+ 校验字符串是否是uuid
+ @param uuid_str: 需要校验的字符串
+ @return: bool
+ """
+ try:
+ uuid.UUID(uuid_str)
+ except ValueError:
+ return False
+ return True
+
+
+def get_image_list(result_list: list, zip_files: List[str]):
+ """
+ 获取图片文件列表
+ @param result_list:
+ @param zip_files:
+ @return:
+ """
+ image_file_list = []
+ for result in result_list:
+ for p in result.get('paragraphs', []):
+ content: str = p.get('content', '')
+ image_list = parse_md_image(content)
+ for image in image_list:
+ search = re.search("\(.*\)", image)
+ if search:
+ new_image_id = str(uuid.uuid1())
+ source_image_path = search.group().replace('(', '').replace(')', '')
+ image_path = urljoin(result.get('name'), '.' + source_image_path if source_image_path.startswith(
+ '/') else source_image_path)
+ if not zip_files.__contains__(image_path):
+ continue
+ if image_path.startswith('api/file/') or image_path.startswith('api/image/'):
+ image_id = image_path.replace('api/file/', '').replace('api/image/', '')
+ if is_valid_uuid(image_id):
+ image_file_list.append({'source_file': image_path,
+ 'image_id': image_id})
+ else:
+ image_file_list.append({'source_file': image_path,
+ 'image_id': new_image_id})
+ content = content.replace(source_image_path, f'/api/image/{new_image_id}')
+ p['content'] = content
+ else:
+ image_file_list.append({'source_file': image_path,
+ 'image_id': new_image_id})
+ content = content.replace(source_image_path, f'/api/image/{new_image_id}')
+ p['content'] = content
+
+ return image_file_list
+
+
+def filter_image_file(result_list: list, image_list):
+ image_source_file_list = [image.get('source_file') for image in image_list]
+ return [r for r in result_list if not image_source_file_list.__contains__(r.get('name', ''))]
+
+
+class ZipParseQAHandle(BaseParseQAHandle):
+
+ def handle(self, file, get_buffer, save_image):
+ buffer = get_buffer(file)
+ bytes_io = io.BytesIO(buffer)
+ result = []
+ # 打开zip文件
+ with zipfile.ZipFile(bytes_io, 'r') as zip_ref:
+ # 获取压缩包中的文件名列表
+ files = zip_ref.namelist()
+ # 读取压缩包中的文件内容
+ for file in files:
+ # 跳过 macOS 特有的元数据目录和文件
+ if file.endswith('/') or file.startswith('__MACOSX'):
+ continue
+ with zip_ref.open(file) as f:
+ # 对文件内容进行处理
+ try:
+ value = file_to_paragraph(f)
+ if isinstance(value, list):
+ result = [*result, *value]
+ else:
+ result.append(value)
+ except Exception:
+ pass
+ image_list = get_image_list(result, files)
+ result = filter_image_file(result, image_list)
+ image_mode_list = []
+ for image in image_list:
+ with zip_ref.open(image.get('source_file')) as f:
+ i = Image(id=image.get('image_id'), image=f.read(),
+ image_name=os.path.basename(image.get('source_file')))
+ image_mode_list.append(i)
+ save_image(image_mode_list)
+ return result
+
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".zip") or file_name.endswith(".ZIP"):
+ return True
+ return False
diff --git a/apps/common/handle/impl/response/openai_to_response.py b/apps/common/handle/impl/response/openai_to_response.py
new file mode 100644
index 00000000000..f2b69384e50
--- /dev/null
+++ b/apps/common/handle/impl/response/openai_to_response.py
@@ -0,0 +1,52 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: openai_to_response.py
+ @date:2024/9/6 16:08
+ @desc:
+"""
+import datetime
+
+from django.http import JsonResponse
+from openai.types import CompletionUsage
+from openai.types.chat import ChatCompletionChunk, ChatCompletionMessage, ChatCompletion
+from openai.types.chat.chat_completion import Choice as BlockChoice
+from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta
+from rest_framework import status
+
+from common.handle.base_to_response import BaseToResponse
+
+
+class OpenaiToResponse(BaseToResponse):
+ def to_block_response(self, chat_id, chat_record_id, content, is_end, completion_tokens, prompt_tokens,
+ other_params: dict = None,
+ _status=status.HTTP_200_OK):
+ if other_params is None:
+ other_params = {}
+ data = ChatCompletion(id=chat_record_id, choices=[
+ BlockChoice(finish_reason='stop', index=0, chat_id=chat_id,
+ answer_list=other_params.get('answer_list', ""),
+ message=ChatCompletionMessage(role='assistant', content=content))],
+ created=datetime.datetime.now().second, model='', object='chat.completion',
+ usage=CompletionUsage(completion_tokens=completion_tokens,
+ prompt_tokens=prompt_tokens,
+ total_tokens=completion_tokens + prompt_tokens)
+ ).dict()
+ return JsonResponse(data=data, status=_status)
+
+ def to_stream_chunk_response(self, chat_id, chat_record_id, node_id, up_node_id_list, content, is_end,
+ completion_tokens,
+ prompt_tokens, other_params: dict = None):
+ if other_params is None:
+ other_params = {}
+ chunk = ChatCompletionChunk(id=chat_record_id, model='', object='chat.completion.chunk',
+ created=datetime.datetime.now().second, choices=[
+ Choice(delta=ChoiceDelta(content=content, reasoning_content=other_params.get('reasoning_content', ""),
+ chat_id=chat_id),
+ finish_reason='stop' if is_end else None,
+ index=0)],
+ usage=CompletionUsage(completion_tokens=completion_tokens,
+ prompt_tokens=prompt_tokens,
+ total_tokens=completion_tokens + prompt_tokens)).json()
+ return super().format_stream_chunk(chunk)
diff --git a/apps/common/handle/impl/response/system_to_response.py b/apps/common/handle/impl/response/system_to_response.py
new file mode 100644
index 00000000000..8df5ce1394b
--- /dev/null
+++ b/apps/common/handle/impl/response/system_to_response.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: system_to_response.py
+ @date:2024/9/6 18:03
+ @desc:
+"""
+import json
+
+from rest_framework import status
+
+from common.handle.base_to_response import BaseToResponse
+from common.response import result
+
+
+class SystemToResponse(BaseToResponse):
+ def to_block_response(self, chat_id, chat_record_id, content, is_end, completion_tokens,
+ prompt_tokens, other_params: dict = None,
+ _status=status.HTTP_200_OK):
+ if other_params is None:
+ other_params = {}
+ return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
+ 'content': content, 'is_end': is_end, **other_params,
+ 'completion_tokens': completion_tokens, 'prompt_tokens': prompt_tokens},
+ response_status=_status,
+ code=_status)
+
+ def to_stream_chunk_response(self, chat_id, chat_record_id, node_id, up_node_id_list, content, is_end,
+ completion_tokens,
+ prompt_tokens, other_params: dict = None):
+ if other_params is None:
+ other_params = {}
+ chunk = json.dumps({'chat_id': str(chat_id), 'chat_record_id': str(chat_record_id), 'operate': True,
+ 'content': content, 'node_id': node_id, 'up_node_id_list': up_node_id_list,
+ 'is_end': is_end,
+ 'usage': {'completion_tokens': completion_tokens,
+ 'prompt_tokens': prompt_tokens,
+ 'total_tokens': completion_tokens + prompt_tokens},
+ **other_params})
+ return super().format_stream_chunk(chunk)
diff --git a/apps/common/handle/impl/table/csv_parse_table_handle.py b/apps/common/handle/impl/table/csv_parse_table_handle.py
new file mode 100644
index 00000000000..e2fc7ce863e
--- /dev/null
+++ b/apps/common/handle/impl/table/csv_parse_table_handle.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+import logging
+
+from charset_normalizer import detect
+
+from common.handle.base_parse_table_handle import BaseParseTableHandle
+
+max_kb = logging.getLogger("max_kb")
+
+
+class CsvSplitHandle(BaseParseTableHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".csv"):
+ return True
+ return False
+
+ def handle(self, file, get_buffer,save_image):
+ buffer = get_buffer(file)
+ try:
+ content = buffer.decode(detect(buffer)['encoding'])
+ except BaseException as e:
+ max_kb.error(f'csv split handle error: {e}')
+ return [{'name': file.name, 'paragraphs': []}]
+
+ csv_model = content.split('\n')
+ paragraphs = []
+ # 第一行为标题
+ title = csv_model[0].split(',')
+ for row in csv_model[1:]:
+ if not row:
+ continue
+ line = '; '.join([f'{key}:{value}' for key, value in zip(title, row.split(','))])
+ paragraphs.append({'title': '', 'content': line})
+
+ return [{'name': file.name, 'paragraphs': paragraphs}]
+
+ def get_content(self, file, save_image):
+ buffer = file.read()
+ try:
+ return buffer.decode(detect(buffer)['encoding'])
+ except BaseException as e:
+ max_kb.error(f'csv split handle error: {e}')
+ return f'error: {e}'
\ No newline at end of file
diff --git a/apps/common/handle/impl/table/xls_parse_table_handle.py b/apps/common/handle/impl/table/xls_parse_table_handle.py
new file mode 100644
index 00000000000..897e347e8a8
--- /dev/null
+++ b/apps/common/handle/impl/table/xls_parse_table_handle.py
@@ -0,0 +1,94 @@
+# coding=utf-8
+import logging
+
+import xlrd
+
+from common.handle.base_parse_table_handle import BaseParseTableHandle
+
+max_kb = logging.getLogger("max_kb")
+
+
+class XlsSplitHandle(BaseParseTableHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ buffer = get_buffer(file)
+ if file_name.endswith(".xls") and xlrd.inspect_format(content=buffer):
+ return True
+ return False
+
+ def handle(self, file, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ wb = xlrd.open_workbook(file_contents=buffer, formatting_info=True)
+ result = []
+ sheets = wb.sheets()
+ for sheet in sheets:
+ # 获取合并单元格的范围信息
+ merged_cells = sheet.merged_cells
+ print(merged_cells)
+ data = []
+ paragraphs = []
+ # 获取第一行作为标题行
+ headers = [sheet.cell_value(0, col_idx) for col_idx in range(sheet.ncols)]
+ # 从第二行开始遍历每一行(跳过标题行)
+ for row_idx in range(1, sheet.nrows):
+ row_data = {}
+ for col_idx in range(sheet.ncols):
+ cell_value = sheet.cell_value(row_idx, col_idx)
+
+ # 检查是否为空单元格,如果为空检查是否在合并区域中
+ if cell_value == "":
+ # 检查当前单元格是否在合并区域
+ for (rlo, rhi, clo, chi) in merged_cells:
+ if rlo <= row_idx < rhi and clo <= col_idx < chi:
+ # 使用合并区域的左上角单元格的值
+ cell_value = sheet.cell_value(rlo, clo)
+ break
+
+ # 将标题作为键,单元格的值作为值存入字典
+ row_data[headers[col_idx]] = cell_value
+ data.append(row_data)
+
+ for row in data:
+ row_output = "; ".join([f"{key}: {value}" for key, value in row.items()])
+ # print(row_output)
+ paragraphs.append({'title': '', 'content': row_output})
+
+ result.append({'name': sheet.name, 'paragraphs': paragraphs})
+
+ except BaseException as e:
+ max_kb.error(f'excel split handle error: {e}')
+ return [{'name': file.name, 'paragraphs': []}]
+ return result
+
+ def get_content(self, file, save_image):
+ # 打开 .xls 文件
+ try:
+ workbook = xlrd.open_workbook(file_contents=file.read(), formatting_info=True)
+ sheets = workbook.sheets()
+ md_tables = ''
+ for sheet in sheets:
+ # 过滤空白的sheet
+ if sheet.nrows == 0 or sheet.ncols == 0:
+ continue
+
+ # 获取表头和内容
+ headers = sheet.row_values(0)
+ data = [sheet.row_values(row_idx) for row_idx in range(1, sheet.nrows)]
+
+ # 构建 Markdown 表格
+ md_table = '| ' + ' | '.join(headers) + ' |\n'
+ md_table += '| ' + ' | '.join(['---'] * len(headers)) + ' |\n'
+ for row in data:
+ # 将每个单元格中的内容替换换行符为 以保留原始格式
+ md_table += '| ' + ' | '.join(
+ [str(cell)
+ .replace('\r\n', ' ')
+ .replace('\n', ' ')
+ if cell else '' for cell in row]) + ' |\n'
+ md_tables += md_table + '\n\n'
+
+ return md_tables
+ except Exception as e:
+ max_kb.error(f'excel split handle error: {e}')
+ return f'error: {e}'
diff --git a/apps/common/handle/impl/table/xlsx_parse_table_handle.py b/apps/common/handle/impl/table/xlsx_parse_table_handle.py
new file mode 100644
index 00000000000..a68eb14f1a1
--- /dev/null
+++ b/apps/common/handle/impl/table/xlsx_parse_table_handle.py
@@ -0,0 +1,107 @@
+# coding=utf-8
+import io
+import logging
+
+from openpyxl import load_workbook
+
+from common.handle.base_parse_table_handle import BaseParseTableHandle
+from common.handle.impl.tools import xlsx_embed_cells_images
+
+max_kb = logging.getLogger("max_kb")
+
+
+class XlsxSplitHandle(BaseParseTableHandle):
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith('.xlsx'):
+ return True
+ return False
+
+ def fill_merged_cells(self, sheet, image_dict):
+ data = []
+ # 从第二行开始遍历每一行
+ for row in sheet.iter_rows(values_only=False):
+ row_data = []
+ for col_idx, cell in enumerate(row):
+ cell_value = cell.value
+ image = image_dict.get(cell_value, None)
+ if image is not None:
+ cell_value = f''
+
+ # 使用标题作为键,单元格的值作为值存入字典
+ row_data.insert(col_idx, cell_value)
+ data.append(row_data)
+
+ for merged_range in sheet.merged_cells.ranges:
+ cell_value = data[merged_range.min_row - 1][merged_range.min_col - 1]
+ for row_index in range(merged_range.min_row, merged_range.max_row + 1):
+ for col_index in range(merged_range.min_col, merged_range.max_col + 1):
+ data[row_index - 1][col_index - 1] = cell_value
+ return data
+
+ def handle(self, file, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ wb = load_workbook(io.BytesIO(buffer))
+ try:
+ image_dict: dict = xlsx_embed_cells_images(io.BytesIO(buffer))
+ save_image([item for item in image_dict.values()])
+ except Exception as e:
+ image_dict = {}
+ result = []
+ for sheetname in wb.sheetnames:
+ paragraphs = []
+ ws = wb[sheetname]
+ data = self.fill_merged_cells(ws, image_dict)
+ if len(data) >= 2:
+ head_list = data[0]
+ for row_index in range(1, len(data)):
+ row_output = "; ".join(
+ [f"{head_list[col_index]}: {data[row_index][col_index]}" for col_index in
+ range(0, len(data[row_index]))])
+ paragraphs.append({'title': '', 'content': row_output})
+
+ result.append({'name': sheetname, 'paragraphs': paragraphs})
+
+ except BaseException as e:
+ max_kb.error(f'excel split handle error: {e}')
+ return [{'name': file.name, 'paragraphs': []}]
+ return result
+
+ def get_content(self, file, save_image):
+ try:
+ # 加载 Excel 文件
+ workbook = load_workbook(file)
+ try:
+ image_dict: dict = xlsx_embed_cells_images(file)
+ if len(image_dict) > 0:
+ save_image(image_dict.values())
+ except Exception as e:
+ print(f'{e}')
+ image_dict = {}
+ md_tables = ''
+ # 如果未指定 sheet_name,则使用第一个工作表
+ for sheetname in workbook.sheetnames:
+ sheet = workbook[sheetname] if sheetname else workbook.active
+ data = self.fill_merged_cells(sheet, image_dict)
+ if len(data) == 0:
+ continue
+ # 提取表头和内容
+
+ headers = [f"{value}" for value in data[0]]
+
+ # 构建 Markdown 表格
+ md_table = '| ' + ' | '.join(headers) + ' |\n'
+ md_table += '| ' + ' | '.join(['---'] * len(headers)) + ' |\n'
+ for row_index in range(1, len(data)):
+ r = [f'{value}' for value in data[row_index]]
+ md_table += '| ' + ' | '.join(
+ [str(cell).replace('\n', ' ') if cell is not None else '' for cell in r]) + ' |\n'
+
+ md_tables += md_table + '\n\n'
+
+ md_tables = md_tables.replace('/api/image/', '/api/file/')
+ return md_tables
+ except Exception as e:
+ max_kb.error(f'excel split handle error: {e}')
+ return f'error: {e}'
diff --git a/apps/common/handle/impl/text_split_handle.py b/apps/common/handle/impl/text_split_handle.py
index a773b3bbb56..9d91d874d3d 100644
--- a/apps/common/handle/impl/text_split_handle.py
+++ b/apps/common/handle/impl/text_split_handle.py
@@ -7,6 +7,7 @@
@desc:
"""
import re
+import traceback
from typing import List
from charset_normalizer import detect
@@ -26,7 +27,8 @@ class TextSplitHandle(BaseSplitHandle):
def support(self, file, get_buffer):
buffer = get_buffer(file)
file_name: str = file.name.lower()
- if file_name.endswith(".md") or file_name.endswith('.txt'):
+ if file_name.endswith(".md") or file_name.endswith('.txt') or file_name.endswith('.TXT') or file_name.endswith(
+ '.MD'):
return True
result = detect(buffer)
if result['encoding'] is not None and result['confidence'] is not None and result['encoding'] != 'ascii' and \
@@ -48,3 +50,11 @@ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_bu
return {'name': file.name,
'content': split_model.parse(content)
}
+
+ def get_content(self, file, save_image):
+ buffer = file.read()
+ try:
+ return buffer.decode(detect(buffer)['encoding'])
+ except BaseException as e:
+ traceback.print_exception(e)
+ return f'{e}'
\ No newline at end of file
diff --git a/apps/common/handle/impl/tools.py b/apps/common/handle/impl/tools.py
new file mode 100644
index 00000000000..d041397a7ee
--- /dev/null
+++ b/apps/common/handle/impl/tools.py
@@ -0,0 +1,118 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: tools.py
+ @date:2024/9/11 16:41
+ @desc:
+"""
+import io
+import uuid
+from functools import reduce
+from io import BytesIO
+from xml.etree.ElementTree import fromstring
+from zipfile import ZipFile
+
+from PIL import Image as PILImage
+from openpyxl.drawing.image import Image as openpyxl_Image
+from openpyxl.packaging.relationship import get_rels_path, get_dependents
+from openpyxl.xml.constants import SHEET_DRAWING_NS, REL_NS, SHEET_MAIN_NS
+
+from common.handle.base_parse_qa_handle import get_title_row_index_dict, get_row_value
+from dataset.models import Image
+
+
+def parse_element(element) -> {}:
+ data = {}
+ xdr_namespace = "{%s}" % SHEET_DRAWING_NS
+ targets = level_order_traversal(element, xdr_namespace + "nvPicPr")
+ for target in targets:
+ cNvPr = embed = ""
+ for child in target:
+ if child.tag == xdr_namespace + "nvPicPr":
+ cNvPr = child[0].attrib["name"]
+ elif child.tag == xdr_namespace + "blipFill":
+ _rel_embed = "{%s}embed" % REL_NS
+ embed = child[0].attrib[_rel_embed]
+ if cNvPr:
+ data[cNvPr] = embed
+ return data
+
+
+def parse_element_sheet_xml(element) -> []:
+ data = []
+ xdr_namespace = "{%s}" % SHEET_MAIN_NS
+ targets = level_order_traversal(element, xdr_namespace + "f")
+ for target in targets:
+ for child in target:
+ if child.tag == xdr_namespace + "f":
+ data.append(child.text)
+ return data
+
+
+def level_order_traversal(root, flag: str) -> []:
+ queue = [root]
+ targets = []
+ while queue:
+ node = queue.pop(0)
+ children = [child.tag for child in node]
+ if flag in children:
+ targets.append(node)
+ continue
+ for child in node:
+ queue.append(child)
+ return targets
+
+
+def handle_images(deps, archive: ZipFile) -> []:
+ images = []
+ if not PILImage: # Pillow not installed, drop images
+ return images
+ for dep in deps:
+ try:
+ image_io = archive.read(dep.target)
+ image = openpyxl_Image(BytesIO(image_io))
+ except Exception as e:
+ print(e)
+ continue
+ image.embed = dep.id # 文件rId
+ image.target = dep.target # 文件地址
+ images.append(image)
+ return images
+
+
+def xlsx_embed_cells_images(buffer) -> {}:
+ archive = ZipFile(buffer)
+ # 解析cellImage.xml文件
+ deps = get_dependents(archive, get_rels_path("xl/cellimages.xml"))
+ image_rel = handle_images(deps=deps, archive=archive)
+ # 工作表及其中图片ID
+ sheet_list = {}
+ for item in archive.namelist():
+ if not item.startswith('xl/worksheets/sheet'):
+ continue
+ key = item.split('/')[-1].split('.')[0].split('sheet')[-1]
+ sheet_list[key] = parse_element_sheet_xml(fromstring(archive.read(item)))
+ cell_images_xml = parse_element(fromstring(archive.read("xl/cellimages.xml")))
+ cell_images_rel = {}
+ for image in image_rel:
+ cell_images_rel[image.embed] = image
+ for cnv, embed in cell_images_xml.items():
+ cell_images_xml[cnv] = cell_images_rel.get(embed)
+ result = {}
+ for key, img in cell_images_xml.items():
+ image_excel_id_list = [_xl for _xl in
+ reduce(lambda x, y: [*x, *y], [sheet for sheet_id, sheet in sheet_list.items()], []) if
+ key in _xl]
+ if len(image_excel_id_list) > 0:
+ image_excel_id = image_excel_id_list[-1]
+ f = archive.open(img.target)
+ img_byte = io.BytesIO()
+ im = PILImage.open(f).convert('RGB')
+ im.save(img_byte, format='JPEG')
+ image = Image(id=uuid.uuid1(), image=img_byte.getvalue(), image_name=img.path)
+ result['=' + image_excel_id] = image
+ archive.close()
+ return result
+
+
diff --git a/apps/common/handle/impl/xls_split_handle.py b/apps/common/handle/impl/xls_split_handle.py
new file mode 100644
index 00000000000..dbdcc95506d
--- /dev/null
+++ b/apps/common/handle/impl/xls_split_handle.py
@@ -0,0 +1,80 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: xls_parse_qa_handle.py
+ @date:2024/5/21 14:59
+ @desc:
+"""
+from typing import List
+
+import xlrd
+
+from common.handle.base_split_handle import BaseSplitHandle
+
+
+def post_cell(cell_value):
+ return cell_value.replace('\r\n', ' ').replace('\n', ' ').replace('|', '|')
+
+
+def row_to_md(row):
+ return '| ' + ' | '.join(
+ [post_cell(str(cell)) if cell is not None else '' for cell in row]) + ' |\n'
+
+
+def handle_sheet(file_name, sheet, limit: int):
+ rows = iter([sheet.row_values(i) for i in range(sheet.nrows)])
+ paragraphs = []
+ result = {'name': file_name, 'content': paragraphs}
+ try:
+ title_row_list = next(rows)
+ title_md_content = row_to_md(title_row_list)
+ title_md_content += '| ' + ' | '.join(
+ ['---' if cell is not None else '' for cell in title_row_list]) + ' |\n'
+ except Exception as e:
+ return result
+ if len(title_row_list) == 0:
+ return result
+ result_item_content = ''
+ for row in rows:
+ next_md_content = row_to_md(row)
+ next_md_content_len = len(next_md_content)
+ result_item_content_len = len(result_item_content)
+ if len(result_item_content) == 0:
+ result_item_content += title_md_content
+ result_item_content += next_md_content
+ else:
+ if result_item_content_len + next_md_content_len < limit:
+ result_item_content += next_md_content
+ else:
+ paragraphs.append({'content': result_item_content, 'title': ''})
+ result_item_content = title_md_content + next_md_content
+ if len(result_item_content) > 0:
+ paragraphs.append({'content': result_item_content, 'title': ''})
+ return result
+
+
+class XlsSplitHandle(BaseSplitHandle):
+ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ workbook = xlrd.open_workbook(file_contents=buffer)
+ worksheets = workbook.sheets()
+ worksheets_size = len(worksheets)
+ return [row for row in
+ [handle_sheet(file.name,
+ sheet, limit) if worksheets_size == 1 and sheet.name == 'Sheet1' else handle_sheet(
+ sheet.name, sheet, limit) for sheet
+ in worksheets] if row is not None]
+ except Exception as e:
+ return [{'name': file.name, 'content': []}]
+
+ def get_content(self, file, save_image):
+ pass
+
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ buffer = get_buffer(file)
+ if file_name.endswith(".xls") and xlrd.inspect_format(content=buffer):
+ return True
+ return False
diff --git a/apps/common/handle/impl/xlsx_split_handle.py b/apps/common/handle/impl/xlsx_split_handle.py
new file mode 100644
index 00000000000..22ad23146f4
--- /dev/null
+++ b/apps/common/handle/impl/xlsx_split_handle.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: xlsx_parse_qa_handle.py
+ @date:2024/5/21 14:59
+ @desc:
+"""
+import io
+from typing import List
+
+import openpyxl
+
+from common.handle.base_split_handle import BaseSplitHandle
+from common.handle.impl.tools import xlsx_embed_cells_images
+
+
+def post_cell(image_dict, cell_value):
+ image = image_dict.get(cell_value, None)
+ if image is not None:
+ return f''
+ return cell_value.replace('\n', ' ').replace('|', '|')
+
+
+def row_to_md(row, image_dict):
+ return '| ' + ' | '.join(
+ [post_cell(image_dict, str(cell.value if cell.value is not None else '')) if cell is not None else '' for cell
+ in row]) + ' |\n'
+
+
+def handle_sheet(file_name, sheet, image_dict, limit: int):
+ rows = sheet.rows
+ paragraphs = []
+ result = {'name': file_name, 'content': paragraphs}
+ try:
+ title_row_list = next(rows)
+ title_md_content = row_to_md(title_row_list, image_dict)
+ title_md_content += '| ' + ' | '.join(
+ ['---' if cell is not None else '' for cell in title_row_list]) + ' |\n'
+ except Exception as e:
+ return result
+ if len(title_row_list) == 0:
+ return result
+ result_item_content = ''
+ for row in rows:
+ next_md_content = row_to_md(row, image_dict)
+ next_md_content_len = len(next_md_content)
+ result_item_content_len = len(result_item_content)
+ if len(result_item_content) == 0:
+ result_item_content += title_md_content
+ result_item_content += next_md_content
+ else:
+ if result_item_content_len + next_md_content_len < limit:
+ result_item_content += next_md_content
+ else:
+ paragraphs.append({'content': result_item_content, 'title': ''})
+ result_item_content = title_md_content + next_md_content
+ if len(result_item_content) > 0:
+ paragraphs.append({'content': result_item_content, 'title': ''})
+ return result
+
+
+class XlsxSplitHandle(BaseSplitHandle):
+ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
+ buffer = get_buffer(file)
+ try:
+ workbook = openpyxl.load_workbook(io.BytesIO(buffer))
+ try:
+ image_dict: dict = xlsx_embed_cells_images(io.BytesIO(buffer))
+ save_image([item for item in image_dict.values()])
+ except Exception as e:
+ image_dict = {}
+ worksheets = workbook.worksheets
+ worksheets_size = len(worksheets)
+ return [row for row in
+ [handle_sheet(file.name,
+ sheet,
+ image_dict,
+ limit) if worksheets_size == 1 and sheet.title == 'Sheet1' else handle_sheet(
+ sheet.title, sheet, image_dict, limit) for sheet
+ in worksheets] if row is not None]
+ except Exception as e:
+ return [{'name': file.name, 'content': []}]
+
+ def get_content(self, file, save_image):
+ pass
+
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".xlsx"):
+ return True
+ return False
diff --git a/apps/common/handle/impl/zip_split_handle.py b/apps/common/handle/impl/zip_split_handle.py
new file mode 100644
index 00000000000..8d931c9e78e
--- /dev/null
+++ b/apps/common/handle/impl/zip_split_handle.py
@@ -0,0 +1,161 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: text_split_handle.py
+ @date:2024/3/27 18:19
+ @desc:
+"""
+import io
+import os
+import re
+import uuid
+import zipfile
+from typing import List
+from urllib.parse import urljoin
+
+from charset_normalizer import detect
+from django.db.models import QuerySet
+
+from common.handle.base_split_handle import BaseSplitHandle
+from common.handle.impl.csv_split_handle import CsvSplitHandle
+from common.handle.impl.doc_split_handle import DocSplitHandle
+from common.handle.impl.html_split_handle import HTMLSplitHandle
+from common.handle.impl.pdf_split_handle import PdfSplitHandle
+from common.handle.impl.text_split_handle import TextSplitHandle
+from common.handle.impl.xls_split_handle import XlsSplitHandle
+from common.handle.impl.xlsx_split_handle import XlsxSplitHandle
+from common.util.common import parse_md_image
+from dataset.models import Image
+from django.utils.translation import gettext_lazy as _
+
+
+class FileBufferHandle:
+ buffer = None
+
+ def get_buffer(self, file):
+ if self.buffer is None:
+ self.buffer = file.read()
+ return self.buffer
+
+
+default_split_handle = TextSplitHandle()
+split_handles = [HTMLSplitHandle(), DocSplitHandle(), PdfSplitHandle(), XlsxSplitHandle(), XlsSplitHandle(),
+ CsvSplitHandle(),
+ default_split_handle]
+
+
+def save_inner_image(image_list):
+ if image_list is not None and len(image_list) > 0:
+ QuerySet(Image).bulk_create(image_list)
+
+
+def file_to_paragraph(file, pattern_list: List, with_filter: bool, limit: int):
+ get_buffer = FileBufferHandle().get_buffer
+ for split_handle in split_handles:
+ if split_handle.support(file, get_buffer):
+ return split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_inner_image)
+ raise Exception(_('Unsupported file format'))
+
+
+def is_valid_uuid(uuid_str: str):
+ try:
+ uuid.UUID(uuid_str)
+ except ValueError:
+ return False
+ return True
+
+
+def get_image_list(result_list: list, zip_files: List[str]):
+ image_file_list = []
+ for result in result_list:
+ for p in result.get('content', []):
+ content: str = p.get('content', '')
+ image_list = parse_md_image(content)
+ for image in image_list:
+ search = re.search("\(.*\)", image)
+ if search:
+ new_image_id = str(uuid.uuid1())
+ source_image_path = search.group().replace('(', '').replace(')', '')
+ source_image_path = source_image_path.strip().split(" ")[0]
+ image_path = urljoin(result.get('name'), '.' + source_image_path if source_image_path.startswith(
+ '/') else source_image_path)
+ if not zip_files.__contains__(image_path):
+ continue
+ if image_path.startswith('api/file/') or image_path.startswith('api/image/'):
+ image_id = image_path.replace('api/file/', '').replace('api/image/', '')
+ if is_valid_uuid(image_id):
+ image_file_list.append({'source_file': image_path,
+ 'image_id': image_id})
+ else:
+ image_file_list.append({'source_file': image_path,
+ 'image_id': new_image_id})
+ content = content.replace(source_image_path, f'/api/image/{new_image_id}')
+ p['content'] = content
+ else:
+ image_file_list.append({'source_file': image_path,
+ 'image_id': new_image_id})
+ content = content.replace(source_image_path, f'/api/image/{new_image_id}')
+ p['content'] = content
+
+ return image_file_list
+
+
+def get_file_name(file_name):
+ try:
+ file_name_code = file_name.encode('cp437')
+ charset = detect(file_name_code)['encoding']
+ return file_name_code.decode(charset)
+ except Exception as e:
+ return file_name
+
+
+def filter_image_file(result_list: list, image_list):
+ image_source_file_list = [image.get('source_file') for image in image_list]
+ return [r for r in result_list if not image_source_file_list.__contains__(r.get('name', ''))]
+
+
+class ZipSplitHandle(BaseSplitHandle):
+ def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
+ buffer = get_buffer(file)
+ bytes_io = io.BytesIO(buffer)
+ result = []
+ # 打开zip文件
+ with zipfile.ZipFile(bytes_io, 'r') as zip_ref:
+ # 获取压缩包中的文件名列表
+ files = zip_ref.namelist()
+ # 读取压缩包中的文件内容
+ for file in files:
+ if file.endswith('/') or file.startswith('__MACOSX'):
+ continue
+ with zip_ref.open(file) as f:
+ # 对文件内容进行处理
+ try:
+ # 处理一下文件名
+ f.name = get_file_name(f.name)
+ value = file_to_paragraph(f, pattern_list, with_filter, limit)
+ if isinstance(value, list):
+ result = [*result, *value]
+ else:
+ result.append(value)
+ except Exception:
+ pass
+ image_list = get_image_list(result, files)
+ result = filter_image_file(result, image_list)
+ image_mode_list = []
+ for image in image_list:
+ with zip_ref.open(image.get('source_file')) as f:
+ i = Image(id=image.get('image_id'), image=f.read(),
+ image_name=os.path.basename(image.get('source_file')))
+ image_mode_list.append(i)
+ save_image(image_mode_list)
+ return result
+
+ def support(self, file, get_buffer):
+ file_name: str = file.name.lower()
+ if file_name.endswith(".zip") or file_name.endswith(".ZIP"):
+ return True
+ return False
+
+ def get_content(self, file, save_image):
+ return ""
diff --git a/apps/common/init/init_doc.py b/apps/common/init/init_doc.py
new file mode 100644
index 00000000000..d66b0666370
--- /dev/null
+++ b/apps/common/init/init_doc.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: init_doc.py
+ @date:2024/5/24 14:11
+ @desc:
+"""
+import hashlib
+
+from django.urls import re_path, path, URLPattern
+from drf_yasg import openapi
+from drf_yasg.views import get_schema_view
+from rest_framework import permissions
+
+from common.auth import AnonymousAuthentication
+from smartdoc.const import CONFIG
+from django.utils.translation import gettext_lazy as _
+
+
+def init_app_doc(application_urlpatterns):
+ schema_view = get_schema_view(
+ openapi.Info(
+ title="Python API",
+ default_version='v1',
+ description=_('Intelligent customer service platform'),
+ ),
+ public=True,
+ permission_classes=[permissions.AllowAny],
+ authentication_classes=[AnonymousAuthentication]
+ )
+ application_urlpatterns += [
+ re_path(r'^doc(?P\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0),
+ name='schema-json'), # 导出
+ path('doc/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
+ path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
+ ]
+
+
+def init_chat_doc(application_urlpatterns, patterns):
+ chat_schema_view = get_schema_view(
+ openapi.Info(
+ title="Python API",
+ default_version='/chat',
+ description=_('Intelligent customer service platform'),
+ ),
+ public=True,
+ permission_classes=[permissions.AllowAny],
+ authentication_classes=[AnonymousAuthentication],
+ patterns=[
+ URLPattern(pattern='api/' + str(url.pattern), callback=url.callback, default_args=url.default_args,
+ name=url.name)
+ for url in patterns if
+ url.name is not None and ['application/message', 'application/open',
+ 'application/profile'].__contains__(
+ url.name)]
+ )
+
+ application_urlpatterns += [
+ path('doc/chat/', chat_schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
+ path('redoc/chat/', chat_schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
+ ]
+
+
+def encrypt(text):
+ md5 = hashlib.md5()
+ md5.update(text.encode())
+ result = md5.hexdigest()
+ return result
+
+
+def get_call(application_urlpatterns, patterns, params, func):
+ def run():
+ if params['valid']():
+ func(*params['get_params'](application_urlpatterns, patterns))
+
+ return run
+
+
+init_list = [(init_app_doc, {'valid': lambda: CONFIG.get('DOC_PASSWORD') is not None and encrypt(
+ CONFIG.get('DOC_PASSWORD')) == 'd4fc097197b4b90a122b92cbd5bbe867',
+ 'get_call': get_call,
+ 'get_params': lambda application_urlpatterns, patterns: (application_urlpatterns,)}),
+ (init_chat_doc, {'valid': lambda: CONFIG.get('DOC_PASSWORD') is not None and encrypt(
+ CONFIG.get('DOC_PASSWORD')) == 'd4fc097197b4b90a122b92cbd5bbe867' or True, 'get_call': get_call,
+ 'get_params': lambda application_urlpatterns, patterns: (
+ application_urlpatterns, patterns)})]
+
+
+def init_doc(application_urlpatterns, patterns):
+ for init, params in init_list:
+ if params['valid']():
+ get_call(application_urlpatterns, patterns, params, init)()
diff --git a/apps/common/job/__init__.py b/apps/common/job/__init__.py
index 895bf7f5d97..286c81cae74 100644
--- a/apps/common/job/__init__.py
+++ b/apps/common/job/__init__.py
@@ -7,7 +7,11 @@
@desc:
"""
from .client_access_num_job import *
+from .clean_chat_job import *
+from .clean_debug_file_job import *
def run():
client_access_num_job.run()
+ clean_chat_job.run()
+ clean_debug_file_job.run()
diff --git a/apps/common/job/clean_chat_job.py b/apps/common/job/clean_chat_job.py
new file mode 100644
index 00000000000..fb95c3a9fd0
--- /dev/null
+++ b/apps/common/job/clean_chat_job.py
@@ -0,0 +1,83 @@
+# coding=utf-8
+
+import logging
+import datetime
+
+from django.db import transaction
+from django.utils import timezone
+from apscheduler.schedulers.background import BackgroundScheduler
+from django_apscheduler.jobstores import DjangoJobStore
+from application.models import Application, Chat, ChatRecord
+from django.db.models import Q, Max
+from common.lock.impl.file_lock import FileLock
+from dataset.models import File
+
+
+from django.db import connection
+
+scheduler = BackgroundScheduler()
+scheduler.add_jobstore(DjangoJobStore(), "default")
+lock = FileLock()
+
+
+def clean_chat_log_job():
+ from django.utils.translation import gettext_lazy as _
+ logging.getLogger("max_kb").info(_('start clean chat log'))
+ now = timezone.now()
+
+ applications = Application.objects.all().values('id', 'clean_time')
+ cutoff_dates = {
+ app['id']: now - datetime.timedelta(days=app['clean_time'] or 180)
+ for app in applications
+ }
+
+ query_conditions = Q()
+ for app_id, cutoff_date in cutoff_dates.items():
+ query_conditions |= Q(chat__application_id=app_id, create_time__lt=cutoff_date)
+ batch_size = 500
+ while True:
+ with transaction.atomic():
+ chat_records = ChatRecord.objects.filter(query_conditions).select_related('chat').only('id', 'chat_id',
+ 'create_time')[
+ :batch_size]
+ if not chat_records:
+ break
+ chat_record_ids = [record.id for record in chat_records]
+ chat_ids = {record.chat_id for record in chat_records}
+
+ # 计算每个 chat_id 的最大 create_time
+ max_create_times = ChatRecord.objects.filter(id__in=chat_record_ids).values('chat_id').annotate(
+ max_create_time=Max('create_time'))
+
+ # 收集需要删除的文件
+ files_to_delete = []
+ for record in chat_records:
+ max_create_time = next(
+ (item['max_create_time'] for item in max_create_times if item['chat_id'] == record.chat_id), None)
+ if max_create_time:
+ files_to_delete.extend(
+ File.objects.filter(meta__chat_id=str(record.chat_id), create_time__lt=max_create_time)
+ )
+ # 删除 ChatRecord
+ deleted_count = ChatRecord.objects.filter(id__in=chat_record_ids).delete()[0]
+
+ # 删除没有关联 ChatRecord 的 Chat
+ Chat.objects.filter(chatrecord__isnull=True, id__in=chat_ids).delete()
+ File.objects.filter(loid__in=[file.loid for file in files_to_delete]).delete()
+
+ if deleted_count < batch_size:
+ break
+
+ logging.getLogger("max_kb").info(_('end clean chat log'))
+
+
+def run():
+ if lock.try_lock('clean_chat_log_job', 30 * 30):
+ try:
+ scheduler.start()
+ existing_job = scheduler.get_job(job_id='clean_chat_log')
+ if existing_job is not None:
+ existing_job.remove()
+ scheduler.add_job(clean_chat_log_job, 'cron', hour='0', minute='5', id='clean_chat_log')
+ finally:
+ lock.un_lock('clean_chat_log_job')
diff --git a/apps/common/job/clean_debug_file_job.py b/apps/common/job/clean_debug_file_job.py
new file mode 100644
index 00000000000..c701dd7d2f0
--- /dev/null
+++ b/apps/common/job/clean_debug_file_job.py
@@ -0,0 +1,37 @@
+# coding=utf-8
+
+import logging
+from datetime import timedelta
+
+from apscheduler.schedulers.background import BackgroundScheduler
+from django.db.models import Q
+from django.utils import timezone
+from django_apscheduler.jobstores import DjangoJobStore
+
+from common.lock.impl.file_lock import FileLock
+from dataset.models import File
+
+scheduler = BackgroundScheduler()
+scheduler.add_jobstore(DjangoJobStore(), "default")
+lock = FileLock()
+
+
+def clean_debug_file():
+ from django.utils.translation import gettext_lazy as _
+ logging.getLogger("max_kb").info(_('start clean debug file'))
+ two_hours_ago = timezone.now() - timedelta(hours=2)
+ # 删除对应的文件
+ File.objects.filter(Q(create_time__lt=two_hours_ago) & Q(meta__debug=True)).delete()
+ logging.getLogger("max_kb").info(_('end clean debug file'))
+
+
+def run():
+ if lock.try_lock('clean_debug_file', 30 * 30):
+ try:
+ scheduler.start()
+ clean_debug_file_job = scheduler.get_job(job_id='clean_debug_file')
+ if clean_debug_file_job is not None:
+ clean_debug_file_job.remove()
+ scheduler.add_job(clean_debug_file, 'cron', hour='2', minute='0', second='0', id='clean_debug_file')
+ finally:
+ lock.un_lock('clean_debug_file')
diff --git a/apps/common/job/client_access_num_job.py b/apps/common/job/client_access_num_job.py
index 4c03fd2100a..6488a602555 100644
--- a/apps/common/job/client_access_num_job.py
+++ b/apps/common/job/client_access_num_job.py
@@ -13,21 +13,28 @@
from django_apscheduler.jobstores import DjangoJobStore
from application.models.api_key_model import ApplicationPublicAccessClient
+from common.lock.impl.file_lock import FileLock
scheduler = BackgroundScheduler()
scheduler.add_jobstore(DjangoJobStore(), "default")
+lock = FileLock()
def client_access_num_reset_job():
- logging.getLogger("max_kb").info('开始重置access_num')
+ from django.utils.translation import gettext_lazy as _
+ logging.getLogger("max_kb").info(_('start reset access_num'))
QuerySet(ApplicationPublicAccessClient).update(intraday_access_num=0)
- logging.getLogger("max_kb").info('结束重置access_num')
+ logging.getLogger("max_kb").info(_('end reset access_num'))
def run():
- scheduler.start()
- access_num_reset = scheduler.get_job(job_id='access_num_reset')
- if access_num_reset is not None:
- access_num_reset.remove()
- scheduler.add_job(client_access_num_reset_job, 'cron', hour='0', minute='0', second='0',
- id='access_num_reset')
+ if lock.try_lock('client_access_num_reset_job', 30 * 30):
+ try:
+ scheduler.start()
+ access_num_reset = scheduler.get_job(job_id='access_num_reset')
+ if access_num_reset is not None:
+ access_num_reset.remove()
+ scheduler.add_job(client_access_num_reset_job, 'cron', hour='0', minute='0', second='0',
+ id='access_num_reset')
+ finally:
+ lock.un_lock('client_access_num_reset_job')
diff --git a/apps/common/lock/base_lock.py b/apps/common/lock/base_lock.py
new file mode 100644
index 00000000000..2ca5b21dada
--- /dev/null
+++ b/apps/common/lock/base_lock.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: base_lock.py
+ @date:2024/8/20 10:33
+ @desc:
+"""
+
+from abc import ABC, abstractmethod
+
+
+class BaseLock(ABC):
+ @abstractmethod
+ def try_lock(self, key, timeout):
+ pass
+
+ @abstractmethod
+ def un_lock(self, key):
+ pass
diff --git a/apps/common/lock/impl/file_lock.py b/apps/common/lock/impl/file_lock.py
new file mode 100644
index 00000000000..f8ea6396cf5
--- /dev/null
+++ b/apps/common/lock/impl/file_lock.py
@@ -0,0 +1,77 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: file_lock.py
+ @date:2024/8/20 10:48
+ @desc:
+"""
+import errno
+import hashlib
+import os
+import time
+
+import six
+
+from common.lock.base_lock import BaseLock
+from smartdoc.const import PROJECT_DIR
+
+
+def key_to_lock_name(key):
+ """
+ Combine part of a key with its hash to prevent very long filenames
+ """
+ MAX_LENGTH = 50
+ key_hash = hashlib.md5(six.b(key)).hexdigest()
+ lock_name = key[:MAX_LENGTH - len(key_hash) - 1] + '_' + key_hash
+ return lock_name
+
+
+class FileLock(BaseLock):
+ """
+ File locking backend.
+ """
+
+ def __init__(self, settings=None):
+ if settings is None:
+ settings = {}
+ self.location = settings.get('location')
+ if self.location is None:
+ self.location = os.path.join(PROJECT_DIR, 'data', 'lock')
+ try:
+ os.makedirs(self.location)
+ except OSError as error:
+ # Directory exists?
+ if error.errno != errno.EEXIST:
+ # Re-raise unexpected OSError
+ raise
+
+ def _get_lock_path(self, key):
+ lock_name = key_to_lock_name(key)
+ return os.path.join(self.location, lock_name)
+
+ def try_lock(self, key, timeout):
+ lock_path = self._get_lock_path(key)
+ try:
+ # 创建锁文件,如果没创建成功则拿不到
+ fd = os.open(lock_path, os.O_CREAT | os.O_EXCL)
+ except OSError as error:
+ if error.errno == errno.EEXIST:
+ # File already exists, check its modification time
+ mtime = os.path.getmtime(lock_path)
+ ttl = mtime + timeout - time.time()
+ if ttl > 0:
+ return False
+ else:
+ # 如果超时时间已到,直接上锁成功继续执行
+ os.utime(lock_path, None)
+ return True
+ else:
+ return False
+ else:
+ os.close(fd)
+ return True
+
+ def un_lock(self, key):
+ lock_path = self._get_lock_path(key)
+ os.remove(lock_path)
diff --git a/apps/common/log/log.py b/apps/common/log/log.py
new file mode 100644
index 00000000000..1942b4f994e
--- /dev/null
+++ b/apps/common/log/log.py
@@ -0,0 +1,100 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: log.py
+ @date:2025/3/14 16:09
+ @desc:
+"""
+from gettext import gettext
+
+from setting.models.log_management import Log
+
+
+def _get_ip_address(request):
+ """
+ 获取ip地址
+ @param request:
+ @return:
+ """
+ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
+ if x_forwarded_for:
+ ip = x_forwarded_for.split(',')[0]
+ else:
+ ip = request.META.get('REMOTE_ADDR')
+ return ip
+
+
+def _get_user(request):
+ """
+ 获取用户
+ @param request:
+ @return:
+ """
+ user = request.user
+ if user is None:
+ return {
+
+ }
+ return {
+ "id": str(user.id),
+ "email": user.email,
+ "phone": user.phone,
+ "nick_name": user.nick_name,
+ "username": user.username,
+ "role": user.role,
+ }
+
+
+def _get_details(request):
+ path = request.path
+ body = request.data
+ query = request.query_params
+ return {
+ 'path': path,
+ 'body': body,
+ 'query': query
+ }
+
+
+def log(menu: str, operate, get_user=_get_user, get_ip_address=_get_ip_address, get_details=_get_details,
+ get_operation_object=None):
+ """
+ 记录审计日志
+ @param menu: 操作菜单 str
+ @param operate: 操作 str|func 如果是一个函数 入参将是一个request 响应为str def operate(request): return "操作菜单"
+ @param get_user: 获取用户
+ @param get_ip_address:获取IP地址
+ @param get_details: 获取执行详情
+ @param get_operation_object: 获取操作对象
+ @return:
+ """
+
+ def inner(func):
+ def run(view, request, **kwargs):
+ status = 200
+ operation_object = {}
+ try:
+ if get_operation_object is not None:
+ operation_object = get_operation_object(request, kwargs)
+ except Exception as e:
+ pass
+ try:
+ return func(view, request, **kwargs)
+ except Exception as e:
+ status = 500
+ raise e
+ finally:
+ ip = get_ip_address(request)
+ user = get_user(request)
+ details = get_details(request)
+ _operate = operate
+ if callable(operate):
+ _operate = operate(request)
+ # 插入审计日志
+ Log(menu=menu, operate=_operate, user=user, status=status, ip_address=ip, details=details,
+ operation_object=operation_object).save()
+
+ return run
+
+ return inner
diff --git a/apps/common/management/__init__.py b/apps/common/management/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/common/management/commands/__init__.py b/apps/common/management/commands/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/common/management/commands/celery.py b/apps/common/management/commands/celery.py
new file mode 100644
index 00000000000..a26b43597de
--- /dev/null
+++ b/apps/common/management/commands/celery.py
@@ -0,0 +1,46 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: celery.py
+ @date:2024/8/19 11:57
+ @desc:
+"""
+import os
+import subprocess
+
+from django.core.management.base import BaseCommand
+
+from smartdoc.const import BASE_DIR
+
+
+class Command(BaseCommand):
+ help = 'celery'
+
+ def add_arguments(self, parser):
+ parser.add_argument(
+ 'service', nargs='+', type=str, choices=("celery", "model"), help='Service',
+ )
+
+ def handle(self, *args, **options):
+ service = options.get('service')
+ os.environ.setdefault('CELERY_NAME', ','.join(service))
+ server_hostname = os.environ.get("SERVER_HOSTNAME")
+ if hasattr(os, 'getuid') and os.getuid() == 0:
+ os.environ.setdefault('C_FORCE_ROOT', '1')
+ if not server_hostname:
+ server_hostname = '%h'
+ cmd = [
+ 'celery',
+ '-A', 'ops',
+ 'worker',
+ '-P', 'threads',
+ '-l', 'info',
+ '-c', '10',
+ '-Q', ','.join(service),
+ '--heartbeat-interval', '10',
+ '-n', f'{",".join(service)}@{server_hostname}',
+ '--without-mingle',
+ ]
+ kwargs = {'cwd': BASE_DIR}
+ subprocess.run(cmd, **kwargs)
diff --git a/apps/common/management/commands/restart.py b/apps/common/management/commands/restart.py
new file mode 100644
index 00000000000..57285f9c993
--- /dev/null
+++ b/apps/common/management/commands/restart.py
@@ -0,0 +1,6 @@
+from .services.command import BaseActionCommand, Action
+
+
+class Command(BaseActionCommand):
+ help = 'Restart services'
+ action = Action.restart.value
diff --git a/apps/common/management/commands/services/__init__.py b/apps/common/management/commands/services/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/common/management/commands/services/command.py b/apps/common/management/commands/services/command.py
new file mode 100644
index 00000000000..0c97d4af378
--- /dev/null
+++ b/apps/common/management/commands/services/command.py
@@ -0,0 +1,134 @@
+import math
+
+from django.core.management.base import BaseCommand
+from django.db.models import TextChoices
+
+from .hands import *
+from .utils import ServicesUtil
+import os
+
+
+class Services(TextChoices):
+ gunicorn = 'gunicorn', 'gunicorn'
+ celery_default = 'celery_default', 'celery_default'
+ local_model = 'local_model', 'local_model'
+ web = 'web', 'web'
+ celery = 'celery', 'celery'
+ celery_model = 'celery_model', 'celery_model'
+ task = 'task', 'task'
+ all = 'all', 'all'
+
+ @classmethod
+ def get_service_object_class(cls, name):
+ from . import services
+ services_map = {
+ cls.gunicorn.value: services.GunicornService,
+ cls.celery_default: services.CeleryDefaultService,
+ cls.local_model: services.GunicornLocalModelService
+ }
+ return services_map.get(name)
+
+ @classmethod
+ def web_services(cls):
+ return [cls.gunicorn, cls.local_model]
+
+ @classmethod
+ def celery_services(cls):
+ return [cls.celery_default, cls.celery_model]
+
+ @classmethod
+ def task_services(cls):
+ return cls.celery_services()
+
+ @classmethod
+ def all_services(cls):
+ return cls.web_services() + cls.task_services()
+
+ @classmethod
+ def export_services_values(cls):
+ return [cls.all.value, cls.web.value, cls.task.value] + [s.value for s in cls.all_services()]
+
+ @classmethod
+ def get_service_objects(cls, service_names, **kwargs):
+ services = set()
+ for name in service_names:
+ method_name = f'{name}_services'
+ if hasattr(cls, method_name):
+ _services = getattr(cls, method_name)()
+ elif hasattr(cls, name):
+ _services = [getattr(cls, name)]
+ else:
+ continue
+ services.update(set(_services))
+
+ service_objects = []
+ for s in services:
+ service_class = cls.get_service_object_class(s.value)
+ if not service_class:
+ continue
+ kwargs.update({
+ 'name': s.value
+ })
+ service_object = service_class(**kwargs)
+ service_objects.append(service_object)
+ return service_objects
+
+
+class Action(TextChoices):
+ start = 'start', 'start'
+ status = 'status', 'status'
+ stop = 'stop', 'stop'
+ restart = 'restart', 'restart'
+
+
+class BaseActionCommand(BaseCommand):
+ help = 'Service Base Command'
+
+ action = None
+ util = None
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def add_arguments(self, parser):
+ parser.add_argument(
+ 'services', nargs='+', choices=Services.export_services_values(), help='Service',
+ )
+ parser.add_argument('-d', '--daemon', nargs="?", const=True)
+ parser.add_argument('-w', '--worker', type=int, nargs="?",
+ default=3 if os.cpu_count() > 6 else math.floor(os.cpu_count() / 2))
+ parser.add_argument('-f', '--force', nargs="?", const=True)
+
+ def initial_util(self, *args, **options):
+ service_names = options.get('services')
+ service_kwargs = {
+ 'worker_gunicorn': options.get('worker')
+ }
+ services = Services.get_service_objects(service_names=service_names, **service_kwargs)
+
+ kwargs = {
+ 'services': services,
+ 'run_daemon': options.get('daemon', False),
+ 'stop_daemon': self.action == Action.stop.value and Services.all.value in service_names,
+ 'force_stop': options.get('force') or False,
+ }
+ self.util = ServicesUtil(**kwargs)
+
+ def handle(self, *args, **options):
+ self.initial_util(*args, **options)
+ assert self.action in Action.values, f'The action {self.action} is not in the optional list'
+ _handle = getattr(self, f'_handle_{self.action}', lambda: None)
+ _handle()
+
+ def _handle_start(self):
+ self.util.start_and_watch()
+ os._exit(0)
+
+ def _handle_stop(self):
+ self.util.stop()
+
+ def _handle_restart(self):
+ self.util.restart()
+
+ def _handle_status(self):
+ self.util.show_status()
diff --git a/apps/common/management/commands/services/hands.py b/apps/common/management/commands/services/hands.py
new file mode 100644
index 00000000000..82447024ef2
--- /dev/null
+++ b/apps/common/management/commands/services/hands.py
@@ -0,0 +1,26 @@
+import logging
+import os
+import sys
+
+from smartdoc.const import CONFIG, PROJECT_DIR
+
+try:
+ from apps.smartdoc import const
+
+ __version__ = const.VERSION
+except ImportError as e:
+ print("Not found __version__: {}".format(e))
+ print("Python is: ")
+ logging.info(sys.executable)
+ __version__ = 'Unknown'
+ sys.exit(1)
+
+HTTP_HOST = '0.0.0.0'
+HTTP_PORT = CONFIG.HTTP_LISTEN_PORT or 8080
+DEBUG = CONFIG.DEBUG or False
+
+LOG_DIR = os.path.join(PROJECT_DIR, 'data', 'logs')
+APPS_DIR = os.path.join(PROJECT_DIR, 'apps')
+TMP_DIR = os.path.join(PROJECT_DIR, 'tmp')
+if not os.path.exists(TMP_DIR):
+ os.makedirs(TMP_DIR)
diff --git a/apps/common/management/commands/services/services/__init__.py b/apps/common/management/commands/services/services/__init__.py
new file mode 100644
index 00000000000..1027392060c
--- /dev/null
+++ b/apps/common/management/commands/services/services/__init__.py
@@ -0,0 +1,3 @@
+from .celery_default import *
+from .gunicorn import *
+from .local_model import *
\ No newline at end of file
diff --git a/apps/common/management/commands/services/services/base.py b/apps/common/management/commands/services/services/base.py
new file mode 100644
index 00000000000..ddcb4feca3b
--- /dev/null
+++ b/apps/common/management/commands/services/services/base.py
@@ -0,0 +1,207 @@
+import abc
+import time
+import shutil
+import psutil
+import datetime
+import threading
+import subprocess
+from ..hands import *
+
+
+class BaseService(object):
+
+ def __init__(self, **kwargs):
+ self.name = kwargs['name']
+ self._process = None
+ self.STOP_TIMEOUT = 10
+ self.max_retry = 0
+ self.retry = 3
+ self.LOG_KEEP_DAYS = 7
+ self.EXIT_EVENT = threading.Event()
+
+ @property
+ @abc.abstractmethod
+ def cmd(self):
+ return []
+
+ @property
+ @abc.abstractmethod
+ def cwd(self):
+ return ''
+
+ @property
+ def is_running(self):
+ if self.pid == 0:
+ return False
+ try:
+ os.kill(self.pid, 0)
+ except (OSError, ProcessLookupError):
+ return False
+ else:
+ return True
+
+ def show_status(self):
+ if self.is_running:
+ msg = f'{self.name} is running: {self.pid}.'
+ else:
+ msg = f'{self.name} is stopped.'
+ if DEBUG:
+ msg = '\033[31m{} is stopped.\033[0m\nYou can manual start it to find the error: \n' \
+ ' $ cd {}\n' \
+ ' $ {}'.format(self.name, self.cwd, ' '.join(self.cmd))
+
+ print(msg)
+
+ # -- log --
+ @property
+ def log_filename(self):
+ return f'{self.name}.log'
+
+ @property
+ def log_filepath(self):
+ return os.path.join(LOG_DIR, self.log_filename)
+
+ @property
+ def log_file(self):
+ return open(self.log_filepath, 'a')
+
+ @property
+ def log_dir(self):
+ return os.path.dirname(self.log_filepath)
+ # -- end log --
+
+ # -- pid --
+ @property
+ def pid_filepath(self):
+ return os.path.join(TMP_DIR, f'{self.name}.pid')
+
+ @property
+ def pid(self):
+ if not os.path.isfile(self.pid_filepath):
+ return 0
+ with open(self.pid_filepath) as f:
+ try:
+ pid = int(f.read().strip())
+ except ValueError:
+ pid = 0
+ return pid
+
+ def write_pid(self):
+ with open(self.pid_filepath, 'w') as f:
+ f.write(str(self.process.pid))
+
+ def remove_pid(self):
+ if os.path.isfile(self.pid_filepath):
+ os.unlink(self.pid_filepath)
+ # -- end pid --
+
+ # -- process --
+ @property
+ def process(self):
+ if not self._process:
+ try:
+ self._process = psutil.Process(self.pid)
+ except:
+ pass
+ return self._process
+
+ # -- end process --
+
+ # -- action --
+ def open_subprocess(self):
+ kwargs = {'cwd': self.cwd, 'stderr': self.log_file, 'stdout': self.log_file}
+ self._process = subprocess.Popen(self.cmd, **kwargs)
+
+ def start(self):
+ if self.is_running:
+ self.show_status()
+ return
+ self.remove_pid()
+ self.open_subprocess()
+ self.write_pid()
+ self.start_other()
+
+ def start_other(self):
+ pass
+
+ def stop(self, force=False):
+ if not self.is_running:
+ self.show_status()
+ # self.remove_pid()
+ return
+
+ print(f'Stop service: {self.name}', end='')
+ sig = 9 if force else 15
+ os.kill(self.pid, sig)
+
+ if self.process is None:
+ print("\033[31m No process found\033[0m")
+ return
+ try:
+ self.process.wait(1)
+ except:
+ pass
+
+ for i in range(self.STOP_TIMEOUT):
+ if i == self.STOP_TIMEOUT - 1:
+ print("\033[31m Error\033[0m")
+ if not self.is_running:
+ print("\033[32m Ok\033[0m")
+ self.remove_pid()
+ break
+ else:
+ continue
+
+ def watch(self):
+ self._check()
+ if not self.is_running:
+ self._restart()
+ self._rotate_log()
+
+ def _check(self):
+ now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ print(f"{now} Check service status: {self.name} -> ", end='')
+ if self.process:
+ try:
+ self.process.wait(1) # 不wait,子进程可能无法回收
+ except:
+ pass
+
+ if self.is_running:
+ print(f'running at {self.pid}')
+ else:
+ print(f'stopped at {self.pid}')
+
+ def _restart(self):
+ if self.retry > self.max_retry:
+ logging.info("Service start failed, exit: {}".format(self.name))
+ self.EXIT_EVENT.set()
+ return
+ self.retry += 1
+ logging.info(f'> Find {self.name} stopped, retry {self.retry}, {self.pid}')
+ self.start()
+
+ def _rotate_log(self):
+ now = datetime.datetime.now()
+ _time = now.strftime('%H:%M')
+ if _time != '23:59':
+ return
+
+ backup_date = now.strftime('%Y-%m-%d')
+ backup_log_dir = os.path.join(self.log_dir, backup_date)
+ if not os.path.exists(backup_log_dir):
+ os.mkdir(backup_log_dir)
+
+ backup_log_path = os.path.join(backup_log_dir, self.log_filename)
+ if os.path.isfile(self.log_filepath) and not os.path.isfile(backup_log_path):
+ logging.info(f'Rotate log file: {self.log_filepath} => {backup_log_path}')
+ shutil.copy(self.log_filepath, backup_log_path)
+ with open(self.log_filepath, 'w') as f:
+ pass
+
+ to_delete_date = now - datetime.timedelta(days=self.LOG_KEEP_DAYS)
+ to_delete_dir = os.path.join(LOG_DIR, to_delete_date.strftime('%Y-%m-%d'))
+ if os.path.exists(to_delete_dir):
+ logging.info(f'Remove old log: {to_delete_dir}')
+ shutil.rmtree(to_delete_dir, ignore_errors=True)
+ # -- end action --
diff --git a/apps/common/management/commands/services/services/celery_base.py b/apps/common/management/commands/services/services/celery_base.py
new file mode 100644
index 00000000000..0ae219bd5eb
--- /dev/null
+++ b/apps/common/management/commands/services/services/celery_base.py
@@ -0,0 +1,45 @@
+from django.conf import settings
+
+from .base import BaseService
+from ..hands import *
+
+
+class CeleryBaseService(BaseService):
+
+ def __init__(self, queue, num=10, **kwargs):
+ super().__init__(**kwargs)
+ self.queue = queue
+ self.num = num
+
+ @property
+ def cmd(self):
+ print('\n- Start Celery as Distributed Task Queue: {}'.format(self.queue.capitalize()))
+
+ os.environ.setdefault('LC_ALL', 'C.UTF-8')
+ os.environ.setdefault('PYTHONOPTIMIZE', '1')
+ os.environ.setdefault('ANSIBLE_FORCE_COLOR', 'True')
+ os.environ.setdefault('PYTHONPATH', settings.APPS_DIR)
+
+ if os.getuid() == 0:
+ os.environ.setdefault('C_FORCE_ROOT', '1')
+ server_hostname = os.environ.get("SERVER_HOSTNAME")
+ if not server_hostname:
+ server_hostname = '%h'
+
+ cmd = [
+ 'celery',
+ '-A', 'ops',
+ 'worker',
+ '-P', 'threads',
+ '-l', 'error',
+ '-c', str(self.num),
+ '-Q', self.queue,
+ '--heartbeat-interval', '10',
+ '-n', f'{self.queue}@{server_hostname}',
+ '--without-mingle',
+ ]
+ return cmd
+
+ @property
+ def cwd(self):
+ return APPS_DIR
diff --git a/apps/common/management/commands/services/services/celery_default.py b/apps/common/management/commands/services/services/celery_default.py
new file mode 100644
index 00000000000..5d3e6d7b8a4
--- /dev/null
+++ b/apps/common/management/commands/services/services/celery_default.py
@@ -0,0 +1,10 @@
+from .celery_base import CeleryBaseService
+
+__all__ = ['CeleryDefaultService']
+
+
+class CeleryDefaultService(CeleryBaseService):
+
+ def __init__(self, **kwargs):
+ kwargs['queue'] = 'celery'
+ super().__init__(**kwargs)
diff --git a/apps/common/management/commands/services/services/gunicorn.py b/apps/common/management/commands/services/services/gunicorn.py
new file mode 100644
index 00000000000..cc42c4f7cb3
--- /dev/null
+++ b/apps/common/management/commands/services/services/gunicorn.py
@@ -0,0 +1,36 @@
+from .base import BaseService
+from ..hands import *
+
+__all__ = ['GunicornService']
+
+
+class GunicornService(BaseService):
+
+ def __init__(self, **kwargs):
+ self.worker = kwargs['worker_gunicorn']
+ super().__init__(**kwargs)
+
+ @property
+ def cmd(self):
+ print("\n- Start Gunicorn WSGI HTTP Server")
+
+ log_format = '%(h)s %(t)s %(L)ss "%(r)s" %(s)s %(b)s '
+ bind = f'{HTTP_HOST}:{HTTP_PORT}'
+ cmd = [
+ 'gunicorn', 'smartdoc.wsgi:application',
+ '-b', bind,
+ '-k', 'gthread',
+ '--threads', '200',
+ '-w', str(self.worker),
+ '--max-requests', '10240',
+ '--max-requests-jitter', '2048',
+ '--access-logformat', log_format,
+ '--access-logfile', '-'
+ ]
+ if DEBUG:
+ cmd.append('--reload')
+ return cmd
+
+ @property
+ def cwd(self):
+ return APPS_DIR
diff --git a/apps/common/management/commands/services/services/local_model.py b/apps/common/management/commands/services/services/local_model.py
new file mode 100644
index 00000000000..05f4f561009
--- /dev/null
+++ b/apps/common/management/commands/services/services/local_model.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: local_model.py
+ @date:2024/8/21 13:28
+ @desc:
+"""
+from .base import BaseService
+from ..hands import *
+
+__all__ = ['GunicornLocalModelService']
+
+
+class GunicornLocalModelService(BaseService):
+
+ def __init__(self, **kwargs):
+ self.worker = kwargs['worker_gunicorn']
+ super().__init__(**kwargs)
+
+ @property
+ def cmd(self):
+ print("\n- Start Gunicorn Local Model WSGI HTTP Server")
+ os.environ.setdefault('SERVER_NAME', 'local_model')
+ log_format = '%(h)s %(t)s %(L)ss "%(r)s" %(s)s %(b)s '
+ bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
+ worker = CONFIG.get("LOCAL_MODEL_HOST_WORKER", 1)
+ cmd = [
+ 'gunicorn', 'smartdoc.wsgi:application',
+ '-b', bind,
+ '-k', 'gthread',
+ '--threads', '200',
+ '-w', str(worker),
+ '--max-requests', '10240',
+ '--max-requests-jitter', '2048',
+ '--access-logformat', log_format,
+ '--access-logfile', '-'
+ ]
+ if DEBUG:
+ cmd.append('--reload')
+ return cmd
+
+ @property
+ def cwd(self):
+ return APPS_DIR
diff --git a/apps/common/management/commands/services/utils.py b/apps/common/management/commands/services/utils.py
new file mode 100644
index 00000000000..2426758b8de
--- /dev/null
+++ b/apps/common/management/commands/services/utils.py
@@ -0,0 +1,140 @@
+import threading
+import signal
+import time
+import daemon
+from daemon import pidfile
+from .hands import *
+from .hands import __version__
+from .services.base import BaseService
+
+
+class ServicesUtil(object):
+
+ def __init__(self, services, run_daemon=False, force_stop=False, stop_daemon=False):
+ self._services = services
+ self.run_daemon = run_daemon
+ self.force_stop = force_stop
+ self.stop_daemon = stop_daemon
+ self.EXIT_EVENT = threading.Event()
+ self.check_interval = 30
+ self.files_preserve_map = {}
+
+ def restart(self):
+ self.stop()
+ time.sleep(5)
+ self.start_and_watch()
+
+ def start_and_watch(self):
+ logging.info(time.ctime())
+ logging.info(f'MaxKB version {__version__}, more see https://www.jumpserver.org')
+ self.start()
+ if self.run_daemon:
+ self.show_status()
+ with self.daemon_context:
+ self.watch()
+ else:
+ self.watch()
+
+ def start(self):
+ for service in self._services:
+ service: BaseService
+ service.start()
+ self.files_preserve_map[service.name] = service.log_file
+
+ time.sleep(1)
+
+ def stop(self):
+ for service in self._services:
+ service: BaseService
+ service.stop(force=self.force_stop)
+
+ if self.stop_daemon:
+ self._stop_daemon()
+
+ # -- watch --
+ def watch(self):
+ while not self.EXIT_EVENT.is_set():
+ try:
+ _exit = self._watch()
+ if _exit:
+ break
+ time.sleep(self.check_interval)
+ except KeyboardInterrupt:
+ print('Start stop services')
+ break
+ self.clean_up()
+
+ def _watch(self):
+ for service in self._services:
+ service: BaseService
+ service.watch()
+ if service.EXIT_EVENT.is_set():
+ self.EXIT_EVENT.set()
+ return True
+ return False
+ # -- end watch --
+
+ def clean_up(self):
+ if not self.EXIT_EVENT.is_set():
+ self.EXIT_EVENT.set()
+ self.stop()
+
+ def show_status(self):
+ for service in self._services:
+ service: BaseService
+ service.show_status()
+
+ # -- daemon --
+ def _stop_daemon(self):
+ if self.daemon_pid and self.daemon_is_running:
+ os.kill(self.daemon_pid, 15)
+ self.remove_daemon_pid()
+
+ def remove_daemon_pid(self):
+ if os.path.isfile(self.daemon_pid_filepath):
+ os.unlink(self.daemon_pid_filepath)
+
+ @property
+ def daemon_pid(self):
+ if not os.path.isfile(self.daemon_pid_filepath):
+ return 0
+ with open(self.daemon_pid_filepath) as f:
+ try:
+ pid = int(f.read().strip())
+ except ValueError:
+ pid = 0
+ return pid
+
+ @property
+ def daemon_is_running(self):
+ try:
+ os.kill(self.daemon_pid, 0)
+ except (OSError, ProcessLookupError):
+ return False
+ else:
+ return True
+
+ @property
+ def daemon_pid_filepath(self):
+ return os.path.join(TMP_DIR, 'mk.pid')
+
+ @property
+ def daemon_log_filepath(self):
+ return os.path.join(LOG_DIR, 'mk.log')
+
+ @property
+ def daemon_context(self):
+ daemon_log_file = open(self.daemon_log_filepath, 'a')
+ context = daemon.DaemonContext(
+ pidfile=pidfile.TimeoutPIDLockFile(self.daemon_pid_filepath),
+ signal_map={
+ signal.SIGTERM: lambda x, y: self.clean_up(),
+ signal.SIGHUP: 'terminate',
+ },
+ stdout=daemon_log_file,
+ stderr=daemon_log_file,
+ files_preserve=list(self.files_preserve_map.values()),
+ detach_process=True,
+ )
+ return context
+ # -- end daemon --
diff --git a/apps/common/management/commands/start.py b/apps/common/management/commands/start.py
new file mode 100644
index 00000000000..4c078a8769a
--- /dev/null
+++ b/apps/common/management/commands/start.py
@@ -0,0 +1,6 @@
+from .services.command import BaseActionCommand, Action
+
+
+class Command(BaseActionCommand):
+ help = 'Start services'
+ action = Action.start.value
diff --git a/apps/common/management/commands/status.py b/apps/common/management/commands/status.py
new file mode 100644
index 00000000000..36f0d36080e
--- /dev/null
+++ b/apps/common/management/commands/status.py
@@ -0,0 +1,6 @@
+from .services.command import BaseActionCommand, Action
+
+
+class Command(BaseActionCommand):
+ help = 'Show services status'
+ action = Action.status.value
diff --git a/apps/common/management/commands/stop.py b/apps/common/management/commands/stop.py
new file mode 100644
index 00000000000..a79a5335c8f
--- /dev/null
+++ b/apps/common/management/commands/stop.py
@@ -0,0 +1,6 @@
+from .services.command import BaseActionCommand, Action
+
+
+class Command(BaseActionCommand):
+ help = 'Stop services'
+ action = Action.stop.value
diff --git a/apps/common/middleware/cross_domain_middleware.py b/apps/common/middleware/cross_domain_middleware.py
index d116dd7b7b8..06c0a6aba71 100644
--- a/apps/common/middleware/cross_domain_middleware.py
+++ b/apps/common/middleware/cross_domain_middleware.py
@@ -6,11 +6,10 @@
@date:2024/5/8 13:36
@desc:
"""
-from django.db.models import QuerySet
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
-from application.models.api_key_model import ApplicationApiKey
+from common.cache_data.application_api_key_cache import get_application_api_key
class CrossDomainMiddleware(MiddlewareMixin):
@@ -27,13 +26,15 @@ def process_response(self, request, response):
auth = request.META.get('HTTP_AUTHORIZATION')
origin = request.META.get('HTTP_ORIGIN')
if auth is not None and str(auth).startswith("application-") and origin is not None:
- application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=auth).first()
- if application_api_key.allow_cross_domain:
+ application_api_key = get_application_api_key(str(auth), True)
+ cross_domain_list = application_api_key.get('cross_domain_list', [])
+ allow_cross_domain = application_api_key.get('allow_cross_domain', False)
+ if allow_cross_domain:
response['Access-Control-Allow-Methods'] = 'GET,POST,DELETE,PUT'
response[
'Access-Control-Allow-Headers'] = "Origin,X-Requested-With,Content-Type,Accept,Authorization,token"
- if application_api_key.cross_domain_list is None or len(application_api_key.cross_domain_list) == 0:
+ if cross_domain_list is None or len(cross_domain_list) == 0:
response['Access-Control-Allow-Origin'] = "*"
- elif application_api_key.cross_domain_list.__contains__(origin):
+ elif cross_domain_list.__contains__(origin):
response['Access-Control-Allow-Origin'] = origin
return response
diff --git a/apps/common/middleware/doc_headers_middleware.py b/apps/common/middleware/doc_headers_middleware.py
new file mode 100644
index 00000000000..d818b842ca5
--- /dev/null
+++ b/apps/common/middleware/doc_headers_middleware.py
@@ -0,0 +1,62 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: static_headers_middleware.py
+ @date:2024/3/13 18:26
+ @desc:
+"""
+from django.http import HttpResponse
+from django.utils.deprecation import MiddlewareMixin
+
+content = """
+
+
+
+
+
+
+ Document
+
+
+
+
+
+"""
+
+
+class DocHeadersMiddleware(MiddlewareMixin):
+ def process_response(self, request, response):
+ if request.path.startswith('/doc/') or request.path.startswith('/doc/chat/'):
+ HTTP_REFERER = request.META.get('HTTP_REFERER')
+ if HTTP_REFERER is None:
+ return HttpResponse(content)
+ if HTTP_REFERER == request._current_scheme_host + request.path:
+ return response
+ return response
diff --git a/apps/common/middleware/gzip.py b/apps/common/middleware/gzip.py
new file mode 100644
index 00000000000..92c7cea3829
--- /dev/null
+++ b/apps/common/middleware/gzip.py
@@ -0,0 +1,84 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: gzip.py
+ @date:2025/2/27 10:03
+ @desc:
+"""
+from django.utils.cache import patch_vary_headers
+from django.utils.deprecation import MiddlewareMixin
+from django.utils.regex_helper import _lazy_re_compile
+from django.utils.text import compress_sequence, compress_string
+
+re_accepts_gzip = _lazy_re_compile(r"\bgzip\b")
+
+
+class GZipMiddleware(MiddlewareMixin):
+ """
+ Compress content if the browser allows gzip compression.
+ Set the Vary header accordingly, so that caches will base their storage
+ on the Accept-Encoding header.
+ """
+
+ max_random_bytes = 100
+
+ def process_response(self, request, response):
+ if request.method != 'GET' or request.path.startswith('/api'):
+ return response
+ # It's not worth attempting to compress really short responses.
+ if not response.streaming and len(response.content) < 200:
+ return response
+
+ # Avoid gzipping if we've already got a content-encoding.
+ if response.has_header("Content-Encoding"):
+ return response
+
+ patch_vary_headers(response, ("Accept-Encoding",))
+
+ ae = request.META.get("HTTP_ACCEPT_ENCODING", "")
+ if not re_accepts_gzip.search(ae):
+ return response
+
+ if response.streaming:
+ if response.is_async:
+ # pull to lexical scope to capture fixed reference in case
+ # streaming_content is set again later.
+ original_iterator = response.streaming_content
+
+ async def gzip_wrapper():
+ async for chunk in original_iterator:
+ yield compress_string(
+ chunk,
+ max_random_bytes=self.max_random_bytes,
+ )
+
+ response.streaming_content = gzip_wrapper()
+ else:
+ response.streaming_content = compress_sequence(
+ response.streaming_content,
+ max_random_bytes=self.max_random_bytes,
+ )
+ # Delete the `Content-Length` header for streaming content, because
+ # we won't know the compressed size until we stream it.
+ del response.headers["Content-Length"]
+ else:
+ # Return the compressed content only if it's actually shorter.
+ compressed_content = compress_string(
+ response.content,
+ max_random_bytes=self.max_random_bytes,
+ )
+ if len(compressed_content) >= len(response.content):
+ return response
+ response.content = compressed_content
+ response.headers["Content-Length"] = str(len(response.content))
+
+ # If there is a strong ETag, make it weak to fulfill the requirements
+ # of RFC 9110 Section 8.8.1 while also allowing conditional request
+ # matches on ETags.
+ etag = response.get("ETag")
+ if etag and etag.startswith('"'):
+ response.headers["ETag"] = "W/" + etag
+ response.headers["Content-Encoding"] = "gzip"
+
+ return response
diff --git a/apps/common/middleware/static_headers_middleware.py b/apps/common/middleware/static_headers_middleware.py
index 79b799a70a0..f5afcfb7c93 100644
--- a/apps/common/middleware/static_headers_middleware.py
+++ b/apps/common/middleware/static_headers_middleware.py
@@ -6,25 +6,28 @@
@date:2024/3/13 18:26
@desc:
"""
-from django.db.models import QuerySet
from django.utils.deprecation import MiddlewareMixin
-from application.models.api_key_model import ApplicationAccessToken
+from common.cache_data.application_access_token_cache import get_application_access_token
class StaticHeadersMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if request.path.startswith('/ui/chat/'):
access_token = request.path.replace('/ui/chat/', '')
- application_access_token = QuerySet(ApplicationAccessToken).filter(access_token=access_token).first()
+ application_access_token = get_application_access_token(access_token, True)
if application_access_token is not None:
- if application_access_token.white_active:
+ white_active = application_access_token.get('white_active', False)
+ white_list = application_access_token.get('white_list', [])
+ application_icon = application_access_token.get('application_icon')
+ application_name = application_access_token.get('application_name')
+ if white_active:
# 添加自定义的响应头
response[
- 'Content-Security-Policy'] = f'frame-ancestors {" ".join(application_access_token.white_list)}'
+ 'Content-Security-Policy'] = f'frame-ancestors {" ".join(white_list)}'
response.content = (response.content.decode('utf-8').replace(
' ',
- f' ')
- .replace('MaxKB ', f'{application_access_token.application.name} ').encode(
+ f' ')
+ .replace('MaxKB ', f'{application_name} ').encode(
"utf-8"))
return response
diff --git a/apps/common/models/db_model_manage.py b/apps/common/models/db_model_manage.py
new file mode 100644
index 00000000000..80ce0f55b54
--- /dev/null
+++ b/apps/common/models/db_model_manage.py
@@ -0,0 +1,35 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: db_model_manage.py
+ @date:2024/7/22 17:00
+ @desc:
+"""
+from importlib import import_module
+from django.conf import settings
+
+
+def new_instance_by_class_path(class_path: str):
+ parts = class_path.rpartition('.')
+ package_path = parts[0]
+ class_name = parts[2]
+ module = import_module(package_path)
+ HandlerClass = getattr(module, class_name)
+ return HandlerClass()
+
+
+class DBModelManage:
+ model_dict = {}
+
+ @staticmethod
+ def get_model(model_name):
+ return DBModelManage.model_dict.get(model_name)
+
+ @staticmethod
+ def init():
+ handles = [new_instance_by_class_path(class_path) for class_path in
+ (settings.MODEL_HANDLES if hasattr(settings, 'MODEL_HANDLES') else [])]
+ for h in handles:
+ model_dict = h.get_model_dict()
+ DBModelManage.model_dict = {**DBModelManage.model_dict, **model_dict}
diff --git a/apps/common/models/handle/base_handle.py b/apps/common/models/handle/base_handle.py
new file mode 100644
index 00000000000..17389673e2b
--- /dev/null
+++ b/apps/common/models/handle/base_handle.py
@@ -0,0 +1,15 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: base_handle.py
+ @date:2024/7/22 17:02
+ @desc:
+"""
+from abc import ABC, abstractmethod
+
+
+class IBaseModelHandle(ABC):
+ @abstractmethod
+ def get_model_dict(self):
+ pass
diff --git a/apps/common/models/handle/impl/default_base_model_handle.py b/apps/common/models/handle/impl/default_base_model_handle.py
new file mode 100644
index 00000000000..b1ed7051a53
--- /dev/null
+++ b/apps/common/models/handle/impl/default_base_model_handle.py
@@ -0,0 +1,14 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: default_base_model_handle.py
+ @date:2024/7/22 17:06
+ @desc:
+"""
+from common.models.handle.base_handle import IBaseModelHandle
+
+
+class DefaultBaseModelHandle(IBaseModelHandle):
+ def get_model_dict(self):
+ return {}
diff --git a/apps/common/response/result.py b/apps/common/response/result.py
index d1cf6a3ad5b..d2cc37e7755 100644
--- a/apps/common/response/result.py
+++ b/apps/common/response/result.py
@@ -3,6 +3,7 @@
from django.http import JsonResponse
from drf_yasg import openapi
from rest_framework import status
+from django.utils.translation import gettext_lazy as _
class Page(dict):
@@ -15,11 +16,12 @@ def __init__(self, total: int, records: List, current_page: int, page_size: int,
class Result(JsonResponse):
+ charset = 'utf-8'
"""
接口统一返回对象
"""
- def __init__(self, code=200, message="成功", data=None, response_status=status.HTTP_200_OK, **kwargs):
+ def __init__(self, code=200, message=_('Success'), data=None, response_status=status.HTTP_200_OK, **kwargs):
back_info_dict = {"code": code, "message": message, 'data': data}
super().__init__(data=back_info_dict, status=response_status, **kwargs)
@@ -31,13 +33,13 @@ def get_page_request_params(other_request_params=None):
in_=openapi.IN_PATH,
type=openapi.TYPE_INTEGER,
required=True,
- description='当前页')
+ description=_('current page'))
page_size = openapi.Parameter(name='page_size',
in_=openapi.IN_PATH,
type=openapi.TYPE_INTEGER,
required=True,
- description='每页大小')
+ description=_('page size'))
result = [current_page, page_size]
for other_request_param in other_request_params:
result.append(other_request_param)
@@ -48,41 +50,41 @@ def get_page_api_response(response_data_schema: openapi.Schema):
"""
获取统一返回 响应Api
"""
- return openapi.Responses(responses={200: openapi.Response(description="响应参数",
+ return openapi.Responses(responses={200: openapi.Response(description=_('response parameters'),
schema=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'code': openapi.Schema(
type=openapi.TYPE_INTEGER,
- title="响应码",
+ title=_('response code'),
default=200,
- description="成功:200 失败:其他"),
+ description=_('success:200 fail:other')),
"message": openapi.Schema(
type=openapi.TYPE_STRING,
- title="提示",
- default='成功',
- description="错误提示"),
+ title=_('prompt'),
+ default=_('success'),
+ description=_('error prompt')),
"data": openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'total': openapi.Schema(
type=openapi.TYPE_INTEGER,
- title="总条数",
+ title=_('total number of data'),
default=1,
- description="数据总条数"),
+ description=_('total number of data')),
"records": openapi.Schema(
type=openapi.TYPE_ARRAY,
items=response_data_schema),
"current": openapi.Schema(
type=openapi.TYPE_INTEGER,
- title="当前页",
+ title=_('current page'),
default=1,
- description="当前页"),
+ description=_('current page')),
"size": openapi.Schema(
type=openapi.TYPE_INTEGER,
- title="每页大小",
+ title=_('page size'),
default=10,
- description="每页大小")
+ description=_('page size'))
}
)
@@ -96,20 +98,20 @@ def get_api_response(response_data_schema: openapi.Schema):
"""
获取统一返回 响应Api
"""
- return openapi.Responses(responses={200: openapi.Response(description="响应参数",
+ return openapi.Responses(responses={200: openapi.Response(description=_('response parameters'),
schema=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'code': openapi.Schema(
type=openapi.TYPE_INTEGER,
- title="响应码",
+ title=_('response code'),
default=200,
- description="成功:200 失败:其他"),
+ description=_('success:200 fail:other')),
"message": openapi.Schema(
type=openapi.TYPE_STRING,
- title="提示",
- default='成功',
- description="错误提示"),
+ title=_('prompt'),
+ default=_('success'),
+ description=_('error prompt')),
"data": response_data_schema
}
@@ -125,20 +127,20 @@ def get_api_array_response(response_data_schema: openapi.Schema):
"""
获取统一返回 响应Api
"""
- return openapi.Responses(responses={200: openapi.Response(description="响应参数",
+ return openapi.Responses(responses={200: openapi.Response(description=_('response parameters'),
schema=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'code': openapi.Schema(
type=openapi.TYPE_INTEGER,
- title="响应码",
+ title=_('response code'),
default=200,
- description="成功:200 失败:其他"),
+ description=_('success:200 fail:other')),
"message": openapi.Schema(
type=openapi.TYPE_STRING,
- title="提示",
- default='成功',
- description="错误提示"),
+ title=_('prompt'),
+ default=_('success'),
+ description=_('error prompt')),
"data": openapi.Schema(type=openapi.TYPE_ARRAY,
items=response_data_schema)
@@ -156,10 +158,10 @@ def success(data, **kwargs):
return Result(data=data, **kwargs)
-def error(message):
+def error(message, **kwargs):
"""
获取一个失败的响应对象
:param message: 错误提示
:return: 接口响应对象
"""
- return Result(code=500, message=message)
+ return Result(code=500, message=message, **kwargs)
diff --git a/apps/common/sql/list_embedding_text.sql b/apps/common/sql/list_embedding_text.sql
index 74f3b224b42..ac0dc7b311e 100644
--- a/apps/common/sql/list_embedding_text.sql
+++ b/apps/common/sql/list_embedding_text.sql
@@ -19,9 +19,7 @@ SELECT
paragraph."id" AS paragraph_id,
paragraph.dataset_id AS dataset_id,
1 AS source_type,
- concat_ws('
-',concat_ws('
-',paragraph.title,paragraph."content"),paragraph.title) AS "text",
+ concat_ws(E'\n',paragraph.title,paragraph."content") AS "text",
paragraph.is_active AS is_active
FROM
paragraph paragraph
diff --git a/apps/common/swagger_api/common_api.py b/apps/common/swagger_api/common_api.py
index c3d8be6ca6f..3134db0d083 100644
--- a/apps/common/swagger_api/common_api.py
+++ b/apps/common/swagger_api/common_api.py
@@ -9,6 +9,7 @@
from drf_yasg import openapi
from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
class CommonApi:
@@ -16,31 +17,31 @@ class HitTestApi(ApiMixin):
@staticmethod
def get_request_params_api():
return [
- openapi.Parameter(name='query_text',
- in_=openapi.IN_QUERY,
- type=openapi.TYPE_STRING,
- required=True,
- description='问题文本'),
- openapi.Parameter(name='top_number',
- in_=openapi.IN_QUERY,
- type=openapi.TYPE_NUMBER,
- default=10,
- required=True,
- description='topN'),
- openapi.Parameter(name='similarity',
- in_=openapi.IN_QUERY,
- type=openapi.TYPE_NUMBER,
- default=0.6,
- required=True,
- description='相关性'),
- openapi.Parameter(name='search_mode',
- in_=openapi.IN_QUERY,
- type=openapi.TYPE_STRING,
- default="embedding",
- required=True,
- description='检索模式embedding|keywords|blend'
- )
- ]
+ openapi.Parameter(name='query_text',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('query text')),
+ openapi.Parameter(name='top_number',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_NUMBER,
+ default=10,
+ required=True,
+ description='topN'),
+ openapi.Parameter(name='similarity',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_NUMBER,
+ default=0.6,
+ required=True,
+ description=_('similarity')),
+ openapi.Parameter(name='search_mode',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_STRING,
+ default="embedding",
+ required=True,
+ description=_('Retrieval pattern embedding|keywords|blend')
+ )
+ ]
@staticmethod
def get_response_body_api():
@@ -53,31 +54,32 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容",
- description="段落内容", default='段落内容'),
- 'title': openapi.Schema(type=openapi.TYPE_STRING, title="标题",
- description="标题", default="xxx的描述"),
- 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量",
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('Paragraph content'),
+ description=_('Paragraph content'), default=_('Paragraph content')),
+ 'title': openapi.Schema(type=openapi.TYPE_STRING, title=_('title'),
+ description=_('title'), default=_('Description of xxx')),
+ 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of hits'),
+ description=_('Number of hits'),
default=1),
- 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点赞数量",
- description="点赞数量", default=1),
- 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点踩数量",
- description="点踩数", default=1),
- 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id",
- description="知识库id", default='xxx'),
- 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id",
- description="文档id", default='xxx'),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用",
- description="是否可用", default=True),
- 'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title="相关性得分",
- description="相关性得分", default=True),
- 'comprehensive_score': openapi.Schema(type=openapi.TYPE_NUMBER, title="综合得分,用于排序",
- description="综合得分,用于排序", default=True),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of likes'),
+ description=_('Number of likes'), default=1),
+ 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of clicks and dislikes'),
+ description=_('Number of clicks and dislikes'), default=1),
+ 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'),
+ description=_('dataset id'), default='xxx'),
+ 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('document id'),
+ description=_('document id'), default='xxx'),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active'), default=True),
+ 'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('relevance score'),
+ description=_('relevance score'), default=True),
+ 'comprehensive_score': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('Comprehensive score, used for ranking'),
+ description=_('Comprehensive score, used for ranking'), default=True),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('Update time'),
+ description=_('Update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('Create time'),
+ description=_('Create time'),
default="1970-01-01 00:00:00"
),
diff --git a/apps/common/task/__init__.py b/apps/common/task/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/common/template/email_template_en.html b/apps/common/template/email_template_en.html
new file mode 100644
index 00000000000..5d5515c449b
--- /dev/null
+++ b/apps/common/template/email_template_en.html
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Intelligent Knowledge Q&A System
+
+
+
+
+
+
+
+
+
+
+ Dear user:
+
+
+
+
+ ${code} This is your dynamic verification code. Please fill it in within 30 minutes. To protect the security of your account, please do not provide this verification code to anyone.
+
+
+
+
+
+
Intelligent knowledge base project team
+
+
+ Please do not reply to this system email
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/apps/common/template/email_template.html b/apps/common/template/email_template_zh.html
similarity index 92%
rename from apps/common/template/email_template.html
rename to apps/common/template/email_template_zh.html
index 4b4f913ef9c..29c1fc65dad 100644
--- a/apps/common/template/email_template.html
+++ b/apps/common/template/email_template_zh.html
@@ -22,7 +22,6 @@
border-bottom-right-radius: 5px;
border-bottom-left-radius: 5px;
font-size: 14px;
- font-family: 微软雅黑, 黑体;
line-height: 1.5;
box-shadow: rgb(153, 153, 153) 0px 0px 5px;
border-collapse: collapse;
@@ -56,17 +55,16 @@
- MaxKB 智能知识库
+ 智能知识库问答系统
@@ -105,11 +103,10 @@
text-align: right;
"
>
- 飞致云 - MaxKB 项目组
+ 智能知识库项目组
此为系统邮件,请勿回复
- Please do not reply to this system email
diff --git a/apps/common/template/email_template_zh_Hant.html b/apps/common/template/email_template_zh_Hant.html
new file mode 100644
index 00000000000..d54d4023138
--- /dev/null
+++ b/apps/common/template/email_template_zh_Hant.html
@@ -0,0 +1,123 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 智慧知識庫問答系統
+
+
+
+
+
+
+
+
+
+
+ 尊敬的用戶:
+
+
+
+
+ ${code} 為您的動態驗證碼,請於30分鐘內填寫,為保障帳戶安全,請勿向任何人提供此驗證碼。
+
+
+
+
+
+
智慧知識庫專案組
+
+
+ 此為系統郵件,請勿回覆
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/apps/common/util/cache_util.py b/apps/common/util/cache_util.py
new file mode 100644
index 00000000000..3d97a47cd14
--- /dev/null
+++ b/apps/common/util/cache_util.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: cache_util.py
+ @date:2024/7/24 19:23
+ @desc:
+"""
+from django.core.cache import caches
+
+cache = caches['default_file']
+
+
+def get_data_by_default_cache(key: str, get_data, cache_instance=cache, version=None, kwargs=None):
+ """
+ 获取数据, 先从缓存中获取,如果获取不到再调用get_data 获取数据
+ @param kwargs: get_data所需参数
+ @param key: key
+ @param get_data: 获取数据函数
+ @param cache_instance: cache实例
+ @param version: 版本用于隔离
+ @return:
+ """
+ if kwargs is None:
+ kwargs = {}
+ if cache_instance.has_key(key, version=version):
+ return cache_instance.get(key, version=version)
+ data = get_data(**kwargs)
+ cache_instance.add(key, data, version=version)
+ return data
+
+
+def set_data_by_default_cache(key: str, get_data, cache_instance=cache, version=None):
+ data = get_data()
+ cache_instance.set(key, data, version=version)
+ return data
+
+
+def get_cache(cache_key, use_get_data: any = True, cache_instance=cache, version=None):
+ def inner(get_data):
+ def run(*args, **kwargs):
+ key = cache_key(*args, **kwargs) if callable(cache_key) else cache_key
+ is_use_get_data = use_get_data(*args, **kwargs) if callable(use_get_data) else use_get_data
+ if is_use_get_data:
+ if cache_instance.has_key(key, version=version):
+ return cache_instance.get(key, version=version)
+ data = get_data(*args, **kwargs)
+ cache_instance.add(key, data, timeout=None, version=version)
+ return data
+ data = get_data(*args, **kwargs)
+ cache_instance.set(key, data, timeout=None, version=version)
+ return data
+
+ return run
+
+ return inner
+
+
+def del_cache(cache_key, cache_instance=cache, version=None):
+ def inner(func):
+ def run(*args, **kwargs):
+ key = cache_key(*args, **kwargs) if callable(cache_key) else cache_key
+ func(*args, **kwargs)
+ cache_instance.delete(key, version=version)
+
+ return run
+
+ return inner
diff --git a/apps/common/util/common.py b/apps/common/util/common.py
index 52d90ec8548..8583a1c989f 100644
--- a/apps/common/util/common.py
+++ b/apps/common/util/common.py
@@ -6,10 +6,70 @@
@date:2023/10/16 16:42
@desc:
"""
+import hashlib
import importlib
+import io
+import mimetypes
+import pickle
+import random
+import re
+import shutil
from functools import reduce
from typing import Dict, List
+from django.core.files.uploadedfile import InMemoryUploadedFile
+from django.db.models import QuerySet
+from django.utils.translation import gettext as _
+from pydub import AudioSegment
+
+from ..exception.app_exception import AppApiException
+from ..models.db_model_manage import DBModelManage
+
+safe_builtins = {
+ 'MKInstance'
+}
+
+ALLOWED_CLASSES = {
+ ("builtins", "dict"),
+ ('uuid', 'UUID'),
+ ("application.serializers.application_serializers", "MKInstance"),
+ ("function_lib.serializers.function_lib_serializer", "FlibInstance")
+}
+
+
+class RestrictedUnpickler(pickle.Unpickler):
+
+ def find_class(self, module, name):
+ if (module, name) in ALLOWED_CLASSES:
+ return super().find_class(module, name)
+ raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
+ (module, name))
+
+
+def restricted_loads(s):
+ """Helper function analogous to pickle.loads()."""
+ return RestrictedUnpickler(io.BytesIO(s)).load()
+
+
+def encryption(message: str):
+ """
+ 加密敏感字段数据 加密方式是 如果密码是 1234567890 那么给前端则是 123******890
+ :param message:
+ :return:
+ """
+ max_pre_len = 8
+ max_post_len = 4
+ message_len = len(message)
+ pre_len = int(message_len / 5 * 2)
+ post_len = int(message_len / 5 * 1)
+ pre_str = "".join([message[index] for index in
+ range(0, max_pre_len if pre_len > max_pre_len else 1 if pre_len <= 0 else int(pre_len))])
+ end_str = "".join(
+ [message[index] for index in
+ range(message_len - (int(post_len) if pre_len < max_post_len else max_post_len), message_len)])
+ content = "***************"
+ return pre_str + content + end_str
+
def sub_array(array: List, item_num=10):
result = []
@@ -45,6 +105,30 @@ def get_exec_method(clazz_: str, method_: str):
return getattr(getattr(package_model, clazz_name), method_)
+def flat_map(array: List[List]):
+ """
+ 将二位数组转为一维数组
+ :param array: 二维数组
+ :return: 一维数组
+ """
+ result = []
+ for e in array:
+ result += e
+ return result
+
+
+def password_encrypt(raw_password):
+ """
+ 密码 md5加密
+ :param raw_password: 密码
+ :return: 加密后密码
+ """
+ md5 = hashlib.md5() # 2,实例化md5() 方法
+ md5.update(raw_password.encode()) # 3,对字符串的字节类型加密
+ result = md5.hexdigest() # 4,加密
+ return result
+
+
def post(post_function):
def inner(func):
def run(*args, **kwargs):
@@ -54,3 +138,174 @@ def run(*args, **kwargs):
return run
return inner
+
+
+def valid_license(model=None, count=None, message=None):
+ def inner(func):
+ def run(*args, **kwargs):
+ xpack_cache = DBModelManage.get_model('xpack_cache')
+ is_license_valid = xpack_cache.get('XPACK_LICENSE_IS_VALID', False) if xpack_cache is not None else False
+ record_count = QuerySet(model).count()
+
+ if not is_license_valid and record_count >= count:
+ error_message = message or f'超出限制{count}, 请联系我们(https://fit2cloud.com/)。'
+ raise AppApiException(400, error_message)
+
+ return func(*args, **kwargs)
+
+ return run
+
+ return inner
+
+
+def parse_image(content: str):
+ matches = re.finditer("!\[.*?\]\(\/api\/(image|file)\/.*?\)", content)
+ image_list = [match.group() for match in matches]
+ return image_list
+
+
+def parse_md_image(content: str):
+ matches = re.finditer("!\[.*?\]\(.*?\)", content)
+ image_list = [match.group() for match in matches]
+ return image_list
+
+
+def bulk_create_in_batches(model, data, batch_size=1000):
+ if len(data) == 0:
+ return
+ for i in range(0, len(data), batch_size):
+ batch = data[i:i + batch_size]
+ model.objects.bulk_create(batch)
+
+
+def bytes_to_uploaded_file(file_bytes, file_name="file.txt"):
+ content_type, _ = mimetypes.guess_type(file_name)
+ if content_type is None:
+ # 如果未能识别,设置为默认的二进制文件类型
+ content_type = "application/octet-stream"
+ # 创建一个内存中的字节流对象
+ file_stream = io.BytesIO(file_bytes)
+
+ # 获取文件大小
+ file_size = len(file_bytes)
+
+ # 创建 InMemoryUploadedFile 对象
+ uploaded_file = InMemoryUploadedFile(
+ file=file_stream,
+ field_name=None,
+ name=file_name,
+ content_type=content_type,
+ size=file_size,
+ charset=None,
+ )
+ return uploaded_file
+
+
+def any_to_amr(any_path, amr_path):
+ """
+ 把任意格式转成amr文件
+ """
+ if any_path.endswith(".amr"):
+ shutil.copy2(any_path, amr_path)
+ return
+ if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
+ raise NotImplementedError("Not support file type: {}".format(any_path))
+ audio = AudioSegment.from_file(any_path)
+ audio = audio.set_frame_rate(8000) # only support 8000
+ audio.export(amr_path, format="amr")
+ return audio.duration_seconds * 1000
+
+
+def any_to_mp3(any_path, mp3_path):
+ """
+ 把任意格式转成mp3文件
+ """
+ if any_path.endswith(".mp3"):
+ shutil.copy2(any_path, mp3_path)
+ return
+ if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
+ sil_to_wav(any_path, any_path)
+ any_path = mp3_path
+ audio = AudioSegment.from_file(any_path)
+ audio = audio.set_frame_rate(16000)
+ audio.export(mp3_path, format="mp3")
+
+
+def sil_to_wav(silk_path, wav_path, rate: int = 24000):
+ """
+ silk 文件转 wav
+ """
+ try:
+ import pysilk
+ except ImportError:
+ raise AppApiException("import pysilk failed, wechaty voice message will not be supported.")
+ wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
+ with open(wav_path, "wb") as f:
+ f.write(wav_data)
+
+
+def split_and_transcribe(file_path, model, max_segment_length_ms=59000, audio_format="mp3"):
+ audio_data = AudioSegment.from_file(file_path, format=audio_format)
+ audio_length_ms = len(audio_data)
+
+ if audio_length_ms <= max_segment_length_ms:
+ return model.speech_to_text(io.BytesIO(audio_data.export(format=audio_format).read()))
+
+ full_text = []
+ for start_ms in range(0, audio_length_ms, max_segment_length_ms):
+ end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
+ segment = audio_data[start_ms:end_ms]
+ text = model.speech_to_text(io.BytesIO(segment.export(format=audio_format).read()))
+ if isinstance(text, str):
+ full_text.append(text)
+ return ' '.join(full_text)
+
+
+def _remove_empty_lines(text):
+ if not isinstance(text, str):
+ raise AppApiException(500, _('Text-to-speech node, the text content must be of string type'))
+ if not text:
+ raise AppApiException(500, _('Text-to-speech node, the text content cannot be empty'))
+ result = '\n'.join(line for line in text.split('\n') if line.strip())
+ return markdown_to_plain_text(result)
+
+
+def markdown_to_plain_text(md: str) -> str:
+ # 移除图片 
+ text = re.sub(r'!\[.*?\]\(.*?\)', '', md)
+ # 移除链接 [text](url)
+ text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
+ # 移除 Markdown 标题符号 (#, ##, ###)
+ text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE)
+ # 移除加粗 **text** 或 __text__
+ text = re.sub(r'\*\*(.*?)\*\*', r'\1', text)
+ text = re.sub(r'__(.*?)__', r'\1', text)
+ # 移除斜体 *text* 或 _text_
+ text = re.sub(r'\*(.*?)\*', r'\1', text)
+ text = re.sub(r'_(.*?)_', r'\1', text)
+ # 移除行内代码 `code`
+ text = re.sub(r'`(.*?)`', r'\1', text)
+ # 移除代码块 ```code```
+ text = re.sub(r'```[\s\S]*?```', '', text)
+ # 移除多余的换行符
+ text = re.sub(r'\n{2,}', '\n', text)
+ # 使用正则表达式去除所有 HTML 标签
+ text = re.sub(r'<[^>]+>', '', text)
+ # 去除多余的空白字符(包括换行符、制表符等)
+ text = re.sub(r'\s+', ' ', text)
+ # 去除表单渲染
+ re.sub(r'
[\s\S]*?<\/form_rander>', '', text)
+ # 去除首尾空格
+ text = text.strip()
+ return text
+
+
+SAFE_CHAR_SET = (
+ [chr(i) for i in range(65, 91) if chr(i) not in {'I', 'O'}] + # 大写字母 A-H, J-N, P-Z
+ [chr(i) for i in range(97, 123) if chr(i) not in {'i', 'l', 'o'}] + # 小写字母 a-h, j-n, p-z
+ [str(i) for i in range(10) if str(i) not in {'0', '1', '7'}] # 数字 2-6, 8-9
+)
+
+
+def get_random_chars(number=4):
+ return ''.join(random.choices(SAFE_CHAR_SET, k=number))
diff --git a/apps/common/util/field_message.py b/apps/common/util/field_message.py
index 93b51b92097..409100b485b 100644
--- a/apps/common/util/field_message.py
+++ b/apps/common/util/field_message.py
@@ -6,101 +6,67 @@
@date:2024/3/1 14:30
@desc:
"""
-from django.utils.translation import gettext_lazy
+from django.utils.functional import lazy
+from rest_framework import serializers
+
+
+def value_(field, value):
+ return f"【{field}】 {value}"
+
+
+def reset_messages(field, messages):
+ return {key: lazy(value_, str)(field, messages.get(key)) for key in messages}
+
+
+def reset_message_by_field(field_text, field):
+ return reset_messages(field_text, {**field.default_error_messages, **field.__bases__[0].default_error_messages})
class ErrMessage:
@staticmethod
def char(field: str):
- return {
- 'invalid': gettext_lazy("【%s】不是有效的字符串。" % field),
- 'blank': gettext_lazy("【%s】此字段不能为空字符串。" % field),
- 'max_length': gettext_lazy("【%s】请确保此字段的字符数不超过 {max_length} 个。" % field),
- 'min_length': gettext_lazy("【%s】请确保此字段至少包含 {min_length} 个字符。" % field),
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field)
- }
+ return reset_message_by_field(field, serializers.CharField)
@staticmethod
def uuid(field: str):
- return {'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- 'invalid': gettext_lazy("【%s】必须是有效的UUID。" % field),
- }
+ return reset_messages(field, serializers.UUIDField.default_error_messages)
@staticmethod
def integer(field: str):
- return {'invalid': gettext_lazy('【%s】必须是有效的integer。' % field),
- 'max_value': gettext_lazy('【%s】请确保此值小于或等于 {max_value} 。' % field),
- 'min_value': gettext_lazy('【%s】请确保此值大于或等于 {min_value} 。' % field),
- 'max_string_length': gettext_lazy('【%s】字符串值太大。') % field,
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- }
+ return reset_messages(field, serializers.IntegerField.default_error_messages)
@staticmethod
def list(field: str):
- return {'not_a_list': gettext_lazy('【%s】应为列表,但得到的类型为 "{input_type}".' % field),
- 'empty': gettext_lazy('【%s】此列表不能为空。' % field),
- 'min_length': gettext_lazy('【%s】请确保此字段至少包含 {min_length} 个元素。' % field),
- 'max_length': gettext_lazy('【%s】请确保此字段的元素不超过 {max_length} 个。' % field),
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- }
+ return reset_messages(field, serializers.ListField.default_error_messages)
@staticmethod
def boolean(field: str):
- return {'invalid': gettext_lazy('【%s】必须是有效的布尔值。' % field),
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field)}
+ return reset_messages(field, serializers.BooleanField.default_error_messages)
@staticmethod
def dict(field: str):
- return {'not_a_dict': gettext_lazy('【%s】应为字典,但得到的类型为 "{input_type}' % field),
- 'empty': gettext_lazy('【%s】能是空的。' % field),
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- }
+ return reset_messages(field, serializers.DictField.default_error_messages)
@staticmethod
def float(field: str):
- return {'invalid': gettext_lazy('【%s】需要一个有效的数字。' % field),
- 'max_value': gettext_lazy('【%s】请确保此值小于或等于 {max_value}。' % field),
- 'min_value': gettext_lazy('【%s】请确保此值大于或等于 {min_value}。' % field),
- 'max_string_length': gettext_lazy('【%s】字符串值太大。' % field),
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- }
+ return reset_messages(field, serializers.FloatField.default_error_messages)
@staticmethod
def json(field: str):
- return {
- 'invalid': gettext_lazy('【%s】值必须是有效的JSON。' % field),
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- }
+ return reset_messages(field, serializers.JSONField.default_error_messages)
@staticmethod
def base(field: str):
- return {
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- }
+ return reset_messages(field, serializers.Field.default_error_messages)
@staticmethod
def date(field: str):
- return {
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- 'invalid': gettext_lazy('【%s】日期格式错误。请改用以下格式之一: {format}。'),
- 'datetime': gettext_lazy('【%s】应为日期,但得到的是日期时间。')
- }
+ return reset_messages(field, serializers.DateField.default_error_messages)
@staticmethod
def image(field: str):
- return {
- 'required': gettext_lazy('【%s】此字段必填。' % field),
- 'null': gettext_lazy('【%s】此字段不能为null。' % field),
- 'invalid_image': gettext_lazy('【%s】上载有效的图像。您上载的文件不是图像或图像已损坏。' % field),
- 'max_length': gettext_lazy('请确保此文件名最多包含 {max_length} 个字符(长度为 {length})。')
- }
+ return reset_messages(field, serializers.ImageField.default_error_messages)
+
+ @staticmethod
+ def file(field: str):
+ return reset_messages(field, serializers.FileField.default_error_messages)
diff --git a/apps/common/util/fork.py b/apps/common/util/fork.py
index ee30f696e82..4405b9b76e4 100644
--- a/apps/common/util/fork.py
+++ b/apps/common/util/fork.py
@@ -142,7 +142,10 @@ def get_beautiful_soup(response):
if len(charset_list) > 0:
charset = charset_list[0]
if charset != encoding:
- html_content = response.content.decode(charset)
+ try:
+ html_content = response.content.decode(charset)
+ except Exception as e:
+ logging.getLogger("max_kb").error(f'{e}')
return BeautifulSoup(html_content, "html.parser")
return beautiful_soup
diff --git a/apps/common/util/function_code.py b/apps/common/util/function_code.py
new file mode 100644
index 00000000000..3a877a62367
--- /dev/null
+++ b/apps/common/util/function_code.py
@@ -0,0 +1,99 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: function_code.py
+ @date:2024/8/7 16:11
+ @desc:
+"""
+import os
+import pickle
+import subprocess
+import sys
+import uuid
+from textwrap import dedent
+
+from smartdoc.const import BASE_DIR
+from smartdoc.const import PROJECT_DIR
+
+python_directory = sys.executable
+
+
+class FunctionExecutor:
+ def __init__(self, sandbox=False):
+ self.sandbox = sandbox
+ if sandbox:
+ self.sandbox_path = '/opt/maxkb/app/sandbox'
+ self.user = 'sandbox'
+ else:
+ self.sandbox_path = os.path.join(PROJECT_DIR, 'data', 'sandbox')
+ self.user = None
+ self._createdir()
+ if self.sandbox:
+ os.system(f"chown -R {self.user}:root {self.sandbox_path}")
+
+ def _createdir(self):
+ old_mask = os.umask(0o077)
+ try:
+ os.makedirs(self.sandbox_path, 0o700, exist_ok=True)
+ os.makedirs(os.path.join(self.sandbox_path, 'execute'), 0o700, exist_ok=True)
+ os.makedirs(os.path.join(self.sandbox_path, 'result'), 0o700, exist_ok=True)
+ finally:
+ os.umask(old_mask)
+
+ def exec_code(self, code_str, keywords):
+ _id = str(uuid.uuid1())
+ success = '{"code":200,"msg":"成功","data":exec_result}'
+ err = '{"code":500,"msg":str(e),"data":None}'
+ result_path = f'{self.sandbox_path}/result/{_id}.result'
+ _exec_code = f"""
+try:
+ import os
+ import pickle
+ env = dict(os.environ)
+ for key in list(env.keys()):
+ if key in os.environ and (key.startswith('MAXKB') or key.startswith('POSTGRES') or key.startswith('PG')):
+ del os.environ[key]
+ locals_v={'{}'}
+ keywords={keywords}
+ globals_v=globals()
+ exec({dedent(code_str)!a}, globals_v, locals_v)
+ f_name, f = locals_v.popitem()
+ for local in locals_v:
+ globals_v[local] = locals_v[local]
+ exec_result=f(**keywords)
+ with open({result_path!a}, 'wb') as file:
+ file.write(pickle.dumps({success}))
+except Exception as e:
+ with open({result_path!a}, 'wb') as file:
+ file.write(pickle.dumps({err}))
+"""
+ if self.sandbox:
+ subprocess_result = self._exec_sandbox(_exec_code, _id)
+ else:
+ subprocess_result = self._exec(_exec_code)
+ if subprocess_result.returncode == 1:
+ raise Exception(subprocess_result.stderr)
+ with open(result_path, 'rb') as file:
+ result = pickle.loads(file.read())
+ os.remove(result_path)
+ if result.get('code') == 200:
+ return result.get('data')
+ raise Exception(result.get('msg'))
+
+ def _exec_sandbox(self, _code, _id):
+ exec_python_file = f'{self.sandbox_path}/execute/{_id}.py'
+ with open(exec_python_file, 'w') as file:
+ file.write(_code)
+ os.system(f"chown {self.user}:root {exec_python_file}")
+ kwargs = {'cwd': BASE_DIR}
+ subprocess_result = subprocess.run(
+ ['su', '-s', python_directory, '-c', "exec(open('" + exec_python_file + "').read())", self.user],
+ text=True,
+ capture_output=True, **kwargs)
+ os.remove(exec_python_file)
+ return subprocess_result
+
+ @staticmethod
+ def _exec(_code):
+ return subprocess.run([python_directory, '-c', _code], text=True, capture_output=True)
diff --git a/apps/common/util/page_utils.py b/apps/common/util/page_utils.py
new file mode 100644
index 00000000000..61c52920d9a
--- /dev/null
+++ b/apps/common/util/page_utils.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: page_utils.py
+ @date:2024/11/21 10:32
+ @desc:
+"""
+from math import ceil
+
+
+def page(query_set, page_size, handler, is_the_task_interrupted=lambda: False):
+ """
+
+ @param query_set: 查询query_set
+ @param page_size: 每次查询大小
+ @param handler: 数据处理器
+ @param is_the_task_interrupted: 任务是否被中断
+ @return:
+ """
+ query = query_set.order_by("id")
+ count = query_set.count()
+ for i in range(0, ceil(count / page_size)):
+ if is_the_task_interrupted():
+ return
+ offset = i * page_size
+ paragraph_list = query.all()[offset: offset + page_size]
+ handler(paragraph_list)
+
+
+def page_desc(query_set, page_size, handler, is_the_task_interrupted=lambda: False):
+ """
+
+ @param query_set: 查询query_set
+ @param page_size: 每次查询大小
+ @param handler: 数据处理器
+ @param is_the_task_interrupted: 任务是否被中断
+ @return:
+ """
+ query = query_set.order_by("id")
+ count = query_set.count()
+ for i in sorted(range(0, ceil(count / page_size)), reverse=True):
+ if is_the_task_interrupted():
+ return
+ offset = i * page_size
+ paragraph_list = query.all()[offset: offset + page_size]
+ handler(paragraph_list)
diff --git a/apps/common/util/rsa_util.py b/apps/common/util/rsa_util.py
index 00301867208..452ca678d9e 100644
--- a/apps/common/util/rsa_util.py
+++ b/apps/common/util/rsa_util.py
@@ -40,15 +40,12 @@ def generate():
def get_key_pair():
rsa_value = rsa_cache.get(cache_key)
if rsa_value is None:
- lock.acquire()
- rsa_value = rsa_cache.get(cache_key)
- if rsa_value is not None:
- return rsa_value
- try:
+ with lock:
+ rsa_value = rsa_cache.get(cache_key)
+ if rsa_value is not None:
+ return rsa_value
rsa_value = get_key_pair_by_sql()
rsa_cache.set(cache_key, rsa_value)
- finally:
- lock.release()
return rsa_value
diff --git a/apps/common/util/split_model.py b/apps/common/util/split_model.py
index 19b265fc6ea..81b253531c9 100644
--- a/apps/common/util/split_model.py
+++ b/apps/common/util/split_model.py
@@ -19,6 +19,7 @@ def get_level_block(text, level_content_list, level_content_index, cursor):
:param text: 文本
:param level_content_list: 拆分的title数组
:param level_content_index: 指定的下标
+ :param cursor: 开始的下标位置
:return: 拆分后的文本数据
"""
start_content: str = level_content_list[level_content_index].get('content')
@@ -26,7 +27,7 @@ def get_level_block(text, level_content_list, level_content_index, cursor):
level_content_list) else None
start_index = text.index(start_content, cursor)
end_index = text.index(next_content, start_index + 1) if next_content is not None else len(text)
- return text[start_index:end_index].lstrip(level_content_list[level_content_index]['content']), end_index
+ return text[start_index + len(start_content):end_index], end_index
def to_tree_obj(content, state='title'):
@@ -148,10 +149,10 @@ def to_block_paragraph(tree_data_list: List[dict]):
def parse_title_level(text, content_level_pattern: List, index):
- if len(content_level_pattern) == index:
+ if index >= len(content_level_pattern):
return []
result = parse_level(text, content_level_pattern[index])
- if len(result) == 0 and len(content_level_pattern) > index + 1:
+ if len(result) == 0 and len(content_level_pattern) > index:
return parse_title_level(text, content_level_pattern, index + 1)
return result
@@ -163,7 +164,7 @@ def parse_level(text, pattern: str):
:param pattern: 正则
:return: 符合正则的文本
"""
- level_content_list = list(map(to_tree_obj, re_findall(pattern, text)))
+ level_content_list = list(map(to_tree_obj, [r[0:255] for r in re_findall(pattern, text) if r is not None]))
return list(map(filter_special_symbol, level_content_list))
@@ -213,51 +214,57 @@ def group_by(list_source: List, key):
return result
-def result_tree_to_paragraph(result_tree: List[dict], result, parent_chain):
+def result_tree_to_paragraph(result_tree: List[dict], result, parent_chain, with_filter: bool):
"""
转换为分段对象
:param result_tree: 解析文本的树
:param result: 传[] 用于递归
:param parent_chain: 传[] 用户递归存储数据
+ :param with_filter: 是否过滤block
:return: List[{'problem':'xx','content':'xx'}]
"""
for item in result_tree:
if item.get('state') == 'block':
- result.append({'title': " ".join(parent_chain), 'content': item.get("content")})
+ result.append({'title': " ".join(parent_chain),
+ 'content': filter_special_char(item.get("content")) if with_filter else item.get("content")})
children = item.get("children")
if children is not None and len(children) > 0:
- result_tree_to_paragraph(children, result, [*parent_chain, item.get('content')])
+ result_tree_to_paragraph(children, result,
+ [*parent_chain, remove_special_symbol(item.get('content'))], with_filter)
return result
-def post_handler_paragraph(content: str, limit: int, with_filter: bool):
+def post_handler_paragraph(content: str, limit: int):
+ """
+ 根据文本的最大字符分段
+ :param content: 需要分段的文本字段
+ :param limit: 最大分段字符
+ :return: 分段后数据
"""
- 根据文本的最大字符分段
- :param with_filter: 是否过滤特殊字符
- :param content: 需要分段的文本字段
- :param limit: 最大分段字符
- :return: 分段后数据
- """
- split_list = content.split('\n')
result = []
- temp_char = ''
- for split in split_list:
+ temp_char, start = '', 0
+ while (pos := content.find("\n", start)) != -1:
+ split, start = content[start:pos + 1], pos + 1
if len(temp_char + split) > limit:
+ if len(temp_char) > 4096:
+ pass
result.append(temp_char)
temp_char = ''
- temp_char = temp_char + split + '\n'
+ temp_char = temp_char + split
+ temp_char = temp_char + content[start:]
if len(temp_char) > 0:
+ if len(temp_char) > 4096:
+ pass
result.append(temp_char)
+
pattern = "[\\S\\s]{1," + str(limit) + '}'
# 如果\n 单段超过限制,则继续拆分
- s = list(map(lambda row: filter_special_char(row) if with_filter else row, list(
- reduce(lambda x, y: [*x, *y], list(map(lambda row: list(re.findall(pattern, row)), result)), []))))
- return s
+ return reduce(lambda x, y: [*x, *y], map(lambda row: re.findall(pattern, row), result), [])
replace_map = {
re.compile('\n+'): '\n',
- re.compile('\\s+'): ' ',
+ re.compile(' +'): ' ',
re.compile('#+'): "",
re.compile("\t+"): ''
}
@@ -277,11 +284,11 @@ def filter_special_char(content: str):
class SplitModel:
- def __init__(self, content_level_pattern, with_filter=True, limit=4096):
+ def __init__(self, content_level_pattern, with_filter=True, limit=100000):
self.content_level_pattern = content_level_pattern
self.with_filter = with_filter
- if limit is None or limit > 4096:
- limit = 4096
+ if limit is None or limit > 100000:
+ limit = 100000
if limit < 50:
limit = 50
self.limit = limit
@@ -293,40 +300,29 @@ def parse_to_tree(self, text: str, index=0):
:param index: 从那个正则开始解析
:return: 解析后的树形结果数据
"""
- if len(self.content_level_pattern) == index:
- return
- level_content_list = parse_title_level(text, self.content_level_pattern, 0)
+ level_content_list = parse_title_level(text, self.content_level_pattern, index)
+ if len(level_content_list) == 0:
+ return [to_tree_obj(row, 'block') for row in post_handler_paragraph(text, limit=self.limit)]
+ if index == 0 and text.lstrip().index(level_content_list[0]["content"].lstrip()) != 0:
+ level_content_list.insert(0, to_tree_obj(""))
+
cursor = 0
- for i in range(len(level_content_list)):
- block, cursor = get_level_block(text, level_content_list, i, cursor)
- children = self.parse_to_tree(text=block,
- index=index + 1)
- if children is not None and len(children) > 0:
- level_content_list[i]['children'] = children
- else:
- if len(block) > 0:
- level_content_list[i]['children'] = list(
- map(lambda row: to_tree_obj(row, 'block'),
- post_handler_paragraph(block, with_filter=self.with_filter, limit=self.limit)))
- if len(level_content_list) > 0:
- end_index = text.index(level_content_list[0].get('content'))
- if end_index == 0:
- return level_content_list
- other_content = text[0:end_index]
- children = self.parse_to_tree(text=other_content,
- index=index)
- if len(children) > 0:
- level_content_list = [*level_content_list, *children]
- else:
- if len(other_content.strip()) > 0:
- level_content_list = [*level_content_list, *list(
- map(lambda row: to_tree_obj(row, 'block'),
- post_handler_paragraph(other_content, with_filter=self.with_filter, limit=self.limit)))]
- else:
- if len(text.strip()) > 0:
- level_content_list = [*level_content_list, *list(
- map(lambda row: to_tree_obj(row, 'block'),
- post_handler_paragraph(text, with_filter=self.with_filter, limit=self.limit)))]
+ level_title_content_list = [item for item in level_content_list if item.get('state') == 'title']
+ for i in range(len(level_title_content_list)):
+ start_content: str = level_title_content_list[i].get('content')
+ if cursor < text.index(start_content, cursor):
+ for row in post_handler_paragraph(text[cursor: text.index(start_content, cursor)], limit=self.limit):
+ level_content_list.insert(0, to_tree_obj(row, 'block'))
+
+ block, cursor = get_level_block(text, level_title_content_list, i, cursor)
+ if len(block) == 0:
+ continue
+ children = self.parse_to_tree(text=block, index=index + 1)
+ level_title_content_list[i]['children'] = children
+ first_child_idx_in_block = block.lstrip().index(children[0]["content"].lstrip())
+ if first_child_idx_in_block != 0:
+ inner_children = self.parse_to_tree(block[:first_child_idx_in_block], index + 1)
+ level_title_content_list[i]['children'].extend(inner_children)
return level_content_list
def parse(self, text: str):
@@ -335,17 +331,22 @@ def parse(self, text: str):
:param text: 文本数据
:return: 解析后数据 {content:段落数据,keywords:[‘段落关键词’],parent_chain:['段落父级链路']}
"""
+ text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
text = text.replace("\0", '')
result_tree = self.parse_to_tree(text, 0)
- result = result_tree_to_paragraph(result_tree, [], [])
- return [item for item in [self.post_reset_paragraph(row) for row in result] if
+ result = result_tree_to_paragraph(result_tree, [], [], self.with_filter)
+ for e in result:
+ if len(e['content']) > 4096:
+ pass
+ title_list = list(set([row.get('title') for row in result]))
+ return [item for item in [self.post_reset_paragraph(row, title_list) for row in result] if
'content' in item and len(item.get('content').strip()) > 0]
- def post_reset_paragraph(self, paragraph: Dict):
- result = self.filter_title_special_characters(paragraph)
+ def post_reset_paragraph(self, paragraph: Dict, title_list: List[str]):
+ result = self.content_is_null(paragraph, title_list)
+ result = self.filter_title_special_characters(result)
result = self.sub_title(result)
- result = self.content_is_null(result)
return result
@staticmethod
@@ -357,11 +358,14 @@ def sub_title(paragraph: Dict):
return paragraph
@staticmethod
- def content_is_null(paragraph: Dict):
+ def content_is_null(paragraph: Dict, title_list: List[str]):
if 'title' in paragraph:
title = paragraph.get('title')
content = paragraph.get('content')
if (content is None or len(content.strip()) == 0) and (title is not None and len(title) > 0):
+ find = [t for t in title_list if t.__contains__(title) and t != title]
+ if find:
+ return {'title': '', 'content': ''}
return {'title': '', 'content': title}
return paragraph
@@ -387,7 +391,7 @@ def filter_title_special_characters(paragraph: Dict):
}
-def get_split_model(filename: str, with_filter: bool = False, limit: int = 4096):
+def get_split_model(filename: str, with_filter: bool = False, limit: int = 100000):
"""
根据文件名称获取分段模型
:param limit: 每段大小
diff --git a/apps/common/util/test.py b/apps/common/util/test.py
index a9536ba9ca7..bfcab42d4f1 100644
--- a/apps/common/util/test.py
+++ b/apps/common/util/test.py
@@ -13,11 +13,12 @@
from django.core.cache import cache
# alg使用的算法
-HEADER = {'typ': 'JWP', 'alg': 'default'}
+HEADER = {'type': 'JWP', 'alg': 'default'}
TOKEN_KEY = 'solomon_world_token'
TOKEN_SALT = 'solomonwanc@gmail.com'
TIME_OUT = 30 * 60
+
# 加密
def encrypt(obj):
value = signing.dumps(obj, key=TOKEN_KEY, salt=TOKEN_SALT)
@@ -29,7 +30,6 @@ def encrypt(obj):
def decrypt(src):
src = signing.b64_decode(src.encode()).decode()
raw = signing.loads(src, key=TOKEN_KEY, salt=TOKEN_SALT)
- print(type(raw))
return raw
@@ -74,5 +74,3 @@ def check_token(token):
if last_token:
return last_token == token
return False
-
-
diff --git a/apps/common/util/ts_vecto_util.py b/apps/common/util/ts_vecto_util.py
index 451d87bf870..37d03d4b544 100644
--- a/apps/common/util/ts_vecto_util.py
+++ b/apps/common/util/ts_vecto_util.py
@@ -12,9 +12,6 @@
import jieba
import jieba.posseg
-from jieba import analyse
-
-from common.util.split_model import group_by
jieba_word_list_cache = [chr(item) for item in range(38, 84)]
@@ -49,14 +46,16 @@ def get_word_list(text: str):
def replace_word(word_dict, text: str):
for key in word_dict:
- text = re.sub('(?= 0])
+ result = jieba.lcut(text, cut_all=True)
+ return " ".join(result)
def to_query(text: str):
- # 获取不分词的数据
- word_list = get_word_list(text)
- # 获取关键词关系
- word_dict = to_word_dict(word_list, text)
- # 替换字符串
- text = replace_word(word_dict, text)
- extract_tags = analyse.extract_tags(text, topK=5, withWeight=True, allowPOS=('ns', 'n', 'vn', 'v', 'eng'))
- result = " ".join([get_key_by_word_dict(word, word_dict) for word, score in extract_tags if
- not remove_chars.__contains__(word)])
- # 删除词库
- for word in word_list:
- jieba.del_word(word)
+ extract_tags = jieba.lcut(text, cut_all=True)
+ result = " ".join(extract_tags)
return result
diff --git a/apps/dataset/migrations/0005_file.py b/apps/dataset/migrations/0005_file.py
new file mode 100644
index 00000000000..3c74fc8dbbe
--- /dev/null
+++ b/apps/dataset/migrations/0005_file.py
@@ -0,0 +1,30 @@
+# Generated by Django 4.2.13 on 2024-07-05 18:59
+
+from django.db import migrations, models
+import uuid
+
+from smartdoc.const import CONFIG
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ('dataset', '0004_document_directly_return_similarity'),
+ ]
+
+ operations = [
+ migrations.RunSQL(f"grant execute on function lo_from_bytea to {CONFIG.get('DB_USER')}"),
+ migrations.CreateModel(
+ name='File',
+ fields=[
+ ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
+ ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
+ ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False,
+ verbose_name='主键id')),
+ ('file_name', models.CharField(default='', max_length=256, verbose_name='文件名称')),
+ ('loid', models.IntegerField(verbose_name='loid')),
+ ],
+ options={
+ 'db_table': 'file',
+ },
+ ),
+ ]
diff --git a/apps/dataset/migrations/0006_dataset_embedding_mode.py b/apps/dataset/migrations/0006_dataset_embedding_mode.py
new file mode 100644
index 00000000000..2248d8e3634
--- /dev/null
+++ b/apps/dataset/migrations/0006_dataset_embedding_mode.py
@@ -0,0 +1,21 @@
+# Generated by Django 4.2.13 on 2024-07-17 13:56
+
+import dataset.models.data_set
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('setting', '0005_model_permission_type'),
+ ('dataset', '0005_file'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='dataset',
+ name='embedding_mode',
+ field=models.ForeignKey(default=dataset.models.data_set.default_model, on_delete=django.db.models.deletion.DO_NOTHING, to='setting.model', verbose_name='向量模型'),
+ ),
+ ]
diff --git a/apps/dataset/migrations/0007_alter_paragraph_content.py b/apps/dataset/migrations/0007_alter_paragraph_content.py
new file mode 100644
index 00000000000..ab654b1a1e3
--- /dev/null
+++ b/apps/dataset/migrations/0007_alter_paragraph_content.py
@@ -0,0 +1,18 @@
+# Generated by Django 4.2.14 on 2024-07-24 14:35
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('dataset', '0006_dataset_embedding_mode'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='paragraph',
+ name='content',
+ field=models.CharField(max_length=102400, verbose_name='段落内容'),
+ ),
+ ]
diff --git a/apps/dataset/migrations/0008_alter_document_status_alter_paragraph_status.py b/apps/dataset/migrations/0008_alter_document_status_alter_paragraph_status.py
new file mode 100644
index 00000000000..3380d7b928a
--- /dev/null
+++ b/apps/dataset/migrations/0008_alter_document_status_alter_paragraph_status.py
@@ -0,0 +1,23 @@
+# Generated by Django 4.2.14 on 2024-07-29 15:37
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('dataset', '0007_alter_paragraph_content'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='document',
+ name='status',
+ field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中')], default='3', max_length=1, verbose_name='状态'),
+ ),
+ migrations.AlterField(
+ model_name='paragraph',
+ name='status',
+ field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中')], default='0', max_length=1, verbose_name='状态'),
+ ),
+ ]
diff --git a/apps/dataset/migrations/0009_alter_document_status_alter_paragraph_status.py b/apps/dataset/migrations/0009_alter_document_status_alter_paragraph_status.py
new file mode 100644
index 00000000000..7c138a609e0
--- /dev/null
+++ b/apps/dataset/migrations/0009_alter_document_status_alter_paragraph_status.py
@@ -0,0 +1,23 @@
+# Generated by Django 4.2.15 on 2024-10-15 14:49
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('dataset', '0008_alter_document_status_alter_paragraph_status'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='document',
+ name='status',
+ field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中'), ('4', '生成问题中')], default='3', max_length=1, verbose_name='状态'),
+ ),
+ migrations.AlterField(
+ model_name='paragraph',
+ name='status',
+ field=models.CharField(choices=[('0', '导入中'), ('1', '已完成'), ('2', '导入失败'), ('3', '排队中'), ('4', '生成问题中')], default='0', max_length=1, verbose_name='状态'),
+ ),
+ ]
diff --git a/apps/dataset/migrations/0010_file_meta.py b/apps/dataset/migrations/0010_file_meta.py
new file mode 100644
index 00000000000..6e28e3eecc3
--- /dev/null
+++ b/apps/dataset/migrations/0010_file_meta.py
@@ -0,0 +1,18 @@
+# Generated by Django 4.2.15 on 2024-11-07 15:32
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('dataset', '0009_alter_document_status_alter_paragraph_status'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='file',
+ name='meta',
+ field=models.JSONField(default=dict, verbose_name='文件关联数据'),
+ ),
+ ]
diff --git a/apps/dataset/migrations/0011_document_status_meta_paragraph_status_meta_and_more.py b/apps/dataset/migrations/0011_document_status_meta_paragraph_status_meta_and_more.py
new file mode 100644
index 00000000000..7c5d6375506
--- /dev/null
+++ b/apps/dataset/migrations/0011_document_status_meta_paragraph_status_meta_and_more.py
@@ -0,0 +1,54 @@
+# Generated by Django 4.2.15 on 2024-11-22 14:44
+from django.db.models import QuerySet
+
+from django.db import migrations, models
+
+import dataset
+from common.event import ListenerManagement
+from dataset.models import State, TaskType
+
+sql = """
+UPDATE "document"
+SET status ="replace"("replace"("replace"(status, '2', '3'),'0','3'),'1','2')
+"""
+sql_paragraph = """
+UPDATE "paragraph"
+SET status ="replace"("replace"("replace"(status, '2', '3'),'0','3'),'1','2')
+"""
+
+
+def updateDocumentStatus(apps, schema_editor):
+ DocumentModel = apps.get_model('dataset', 'Document')
+ ListenerManagement.get_aggregation_document_status_by_query_set(QuerySet(DocumentModel))()
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ('dataset', '0010_file_meta'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='document',
+ name='status_meta',
+ field=models.JSONField(default=dataset.models.data_set.default_status_meta, verbose_name='状态统计数据'),
+ ),
+ migrations.AddField(
+ model_name='paragraph',
+ name='status_meta',
+ field=models.JSONField(default=dataset.models.data_set.default_status_meta, verbose_name='状态数据'),
+ ),
+ migrations.AlterField(
+ model_name='document',
+ name='status',
+ field=models.CharField(default=dataset.models.data_set.Status.__str__, max_length=20, verbose_name='状态'),
+ ),
+ migrations.AlterField(
+ model_name='paragraph',
+ name='status',
+ field=models.CharField(default=dataset.models.data_set.Status.__str__, max_length=20, verbose_name='状态'),
+ ),
+ migrations.RunSQL(sql_paragraph),
+ migrations.RunSQL(sql),
+ migrations.RunPython(updateDocumentStatus)
+ ]
diff --git a/apps/dataset/models/data_set.py b/apps/dataset/models/data_set.py
index d0f56a017fc..dd2f1b7c9a7 100644
--- a/apps/dataset/models/data_set.py
+++ b/apps/dataset/models/data_set.py
@@ -7,18 +7,74 @@
@desc: 数据集
"""
import uuid
+from enum import Enum
from django.db import models
+from django.db.models.signals import pre_delete
+from django.dispatch import receiver
+from common.db.sql_execute import select_one
from common.mixins.app_model_mixin import AppModelMixin
+from setting.models import Model
from users.models import User
-class Status(models.TextChoices):
- """订单类型"""
- embedding = 0, '导入中'
- success = 1, '已完成'
- error = 2, '导入失败'
+class TaskType(Enum):
+ # 向量
+ EMBEDDING = 1
+ # 生成问题
+ GENERATE_PROBLEM = 2
+ # 同步
+ SYNC = 3
+
+
+class State(Enum):
+ # 等待
+ PENDING = '0'
+ # 执行中
+ STARTED = '1'
+ # 成功
+ SUCCESS = '2'
+ # 失败
+ FAILURE = '3'
+ # 取消任务
+ REVOKE = '4'
+ # 取消成功
+ REVOKED = '5'
+ # 忽略
+ IGNORED = 'n'
+
+
+class Status:
+ type_cls = TaskType
+ state_cls = State
+
+ def __init__(self, status: str = None):
+ self.task_status = {}
+ status_list = list(status[::-1] if status is not None else '')
+ for _type in self.type_cls:
+ index = _type.value - 1
+ _state = self.state_cls(status_list[index] if len(status_list) > index else 'n')
+ self.task_status[_type] = _state
+
+ @staticmethod
+ def of(status: str):
+ return Status(status)
+
+ def __str__(self):
+ result = []
+ for _type in sorted(self.type_cls, key=lambda item: item.value, reverse=True):
+ result.insert(len(self.type_cls) - _type.value, self.task_status[_type].value)
+ return ''.join(result)
+
+ def __setitem__(self, key, value):
+ self.task_status[key] = value
+
+ def __getitem__(self, item):
+ return self.task_status[item]
+
+ def update_status(self, task_type: TaskType, state: State):
+ self.task_status[task_type] = state
class Type(models.TextChoices):
@@ -26,12 +82,23 @@ class Type(models.TextChoices):
web = 1, 'web站点类型'
+ lark = 2, '飞书类型'
+ yuque = 3, '语雀类型'
+
class HitHandlingMethod(models.TextChoices):
optimization = 'optimization', '模型优化'
directly_return = 'directly_return', '直接返回'
+def default_model():
+ return uuid.UUID('42f63a3d-427e-11ef-b3ec-a8a1595801ab')
+
+
+def default_status_meta():
+ return {"state_time": {}}
+
+
class DataSet(AppModelMixin):
"""
数据集表
@@ -42,7 +109,8 @@ class DataSet(AppModelMixin):
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name="所属用户")
type = models.CharField(verbose_name='类型', max_length=1, choices=Type.choices,
default=Type.base)
-
+ embedding_mode = models.ForeignKey(Model, on_delete=models.DO_NOTHING, verbose_name="向量模型",
+ default=default_model)
meta = models.JSONField(verbose_name="元数据", default=dict)
class Meta:
@@ -57,8 +125,8 @@ class Document(AppModelMixin):
dataset = models.ForeignKey(DataSet, on_delete=models.DO_NOTHING)
name = models.CharField(max_length=150, verbose_name="文档名称")
char_length = models.IntegerField(verbose_name="文档字符数 冗余字段")
- status = models.CharField(verbose_name='状态', max_length=1, choices=Status.choices,
- default=Status.embedding)
+ status = models.CharField(verbose_name='状态', max_length=20, default=Status('').__str__)
+ status_meta = models.JSONField(verbose_name="状态统计数据", default=default_status_meta)
is_active = models.BooleanField(default=True)
type = models.CharField(verbose_name='类型', max_length=1, choices=Type.choices,
@@ -81,10 +149,10 @@ class Paragraph(AppModelMixin):
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
document = models.ForeignKey(Document, on_delete=models.DO_NOTHING, db_constraint=False)
dataset = models.ForeignKey(DataSet, on_delete=models.DO_NOTHING)
- content = models.CharField(max_length=4096, verbose_name="段落内容")
+ content = models.CharField(max_length=102400, verbose_name="段落内容")
title = models.CharField(max_length=256, verbose_name="标题", default="")
- status = models.CharField(verbose_name='状态', max_length=1, choices=Status.choices,
- default=Status.embedding)
+ status = models.CharField(verbose_name='状态', max_length=20, default=Status('').__str__)
+ status_meta = models.JSONField(verbose_name="状态数据", default=default_status_meta)
hit_num = models.IntegerField(verbose_name="命中次数", default=0)
is_active = models.BooleanField(default=True)
@@ -123,3 +191,32 @@ class Image(AppModelMixin):
class Meta:
db_table = "image"
+
+
+class File(AppModelMixin):
+ id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
+
+ file_name = models.CharField(max_length=256, verbose_name="文件名称", default="")
+
+ loid = models.IntegerField(verbose_name="loid")
+
+ meta = models.JSONField(verbose_name="文件关联数据", default=dict)
+
+ class Meta:
+ db_table = "file"
+
+ def save(
+ self, bytea=None, force_insert=False, force_update=False, using=None, update_fields=None
+ ):
+ result = select_one("SELECT lo_from_bytea(%s, %s::bytea) as loid", [0, bytea])
+ self.loid = result['loid']
+ super().save()
+
+ def get_byte(self):
+ result = select_one(f'SELECT lo_get({self.loid}) as "data"', [])
+ return result['data']
+
+
+@receiver(pre_delete, sender=File)
+def on_delete_file(sender, instance, **kwargs):
+ select_one(f'SELECT lo_unlink({instance.loid})', [])
diff --git a/apps/dataset/serializers/common_serializers.py b/apps/dataset/serializers/common_serializers.py
index 16d33e66202..856f3da1584 100644
--- a/apps/dataset/serializers/common_serializers.py
+++ b/apps/dataset/serializers/common_serializers.py
@@ -7,12 +7,16 @@
@desc:
"""
import os
+import re
+import uuid
+import zipfile
from typing import List
from django.db.models import QuerySet
from drf_yasg import openapi
from rest_framework import serializers
+from common.config.embedding_config import ModelManage
from common.db.search import native_search
from common.db.sql_execute import update_execute
from common.exception.app_exception import AppApiException
@@ -20,8 +24,63 @@
from common.util.field_message import ErrMessage
from common.util.file_util import get_file_content
from common.util.fork import Fork
-from dataset.models import Paragraph
+from dataset.models import Paragraph, Problem, ProblemParagraphMapping, DataSet, File, Image
+from setting.models_provider import get_model
from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext_lazy as _
+
+
+def zip_dir(zip_path, output=None):
+ output = output or os.path.basename(zip_path) + '.zip'
+ zip = zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED)
+ for root, dirs, files in os.walk(zip_path):
+ relative_root = '' if root == zip_path else root.replace(zip_path, '') + os.sep
+ for filename in files:
+ zip.write(os.path.join(root, filename), relative_root + filename)
+ zip.close()
+
+
+def is_valid_uuid(s):
+ try:
+ uuid.UUID(s)
+ return True
+ except ValueError:
+ return False
+
+
+def write_image(zip_path: str, image_list: List[str]):
+ for image in image_list:
+ search = re.search("\(.*\)", image)
+ if search:
+ text = search.group()
+ if text.startswith('(/api/file/'):
+ r = text.replace('(/api/file/', '').replace(')', '')
+ r = r.strip().split(" ")[0]
+ if not is_valid_uuid(r):
+ break
+ file = QuerySet(File).filter(id=r).first()
+ if file is None:
+ break
+ zip_inner_path = os.path.join('api', 'file', r)
+ file_path = os.path.join(zip_path, zip_inner_path)
+ if not os.path.exists(os.path.dirname(file_path)):
+ os.makedirs(os.path.dirname(file_path))
+ with open(os.path.join(zip_path, file_path), 'wb') as f:
+ f.write(file.get_byte())
+ else:
+ r = text.replace('(/api/image/', '').replace(')', '')
+ r = r.strip().split(" ")[0]
+ if not is_valid_uuid(r):
+ break
+ image_model = QuerySet(Image).filter(id=r).first()
+ if image_model is None:
+ break
+ zip_inner_path = os.path.join('api', 'image', r)
+ file_path = os.path.join(zip_path, zip_inner_path)
+ if not os.path.exists(os.path.dirname(file_path)):
+ os.makedirs(os.path.dirname(file_path))
+ with open(file_path, 'wb') as f:
+ f.write(image_model.image)
def update_document_char_length(document_id: str):
@@ -39,16 +98,16 @@ def list_paragraph(paragraph_list: List[str]):
class MetaSerializer(serializers.Serializer):
class WebMeta(serializers.Serializer):
- source_url = serializers.CharField(required=True, error_messages=ErrMessage.char("文档地址"))
+ source_url = serializers.CharField(required=True, error_messages=ErrMessage.char(_('source url')))
selector = serializers.CharField(required=False, allow_null=True, allow_blank=True,
- error_messages=ErrMessage.char("选择器"))
+ error_messages=ErrMessage.char(_('selector')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
source_url = self.data.get('source_url')
response = Fork(source_url, []).fork()
if response.status == 500:
- raise AppApiException(500, f"url错误,无法解析【{source_url}】")
+ raise AppApiException(500, _('URL error, cannot parse [{source_url}]').format(source_url=source_url))
class BaseMeta(serializers.Serializer):
def is_valid(self, *, raise_exception=False):
@@ -57,7 +116,7 @@ def is_valid(self, *, raise_exception=False):
class BatchSerializer(ApiMixin, serializers.Serializer):
id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
- error_messages=ErrMessage.char("id列表"))
+ error_messages=ErrMessage.char(_('id list')))
def is_valid(self, *, model=None, raise_exception=False):
super().is_valid(raise_exception=True)
@@ -67,7 +126,8 @@ def is_valid(self, *, model=None, raise_exception=False):
if len(model_list) != len(id_list):
model_id_list = [str(m.id) for m in model_list]
error_id_list = list(filter(lambda row_id: not model_id_list.__contains__(row_id), id_list))
- raise AppApiException(500, f"id不正确:{error_id_list}")
+ raise AppApiException(500, _('The following id does not exist: {error_id_list}').format(
+ error_id_list=error_id_list))
@staticmethod
def get_request_body_api():
@@ -75,7 +135,113 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
properties={
'id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
- title="主键id列表",
- description="主键id列表")
+ title=_('id list'),
+ description=_('id list'))
+ }
+ )
+
+
+class ProblemParagraphObject:
+ def __init__(self, dataset_id: str, document_id: str, paragraph_id: str, problem_content: str):
+ self.dataset_id = dataset_id
+ self.document_id = document_id
+ self.paragraph_id = paragraph_id
+ self.problem_content = problem_content
+
+
+def or_get(exists_problem_list, content, dataset_id, document_id, paragraph_id, problem_content_dict):
+ if content in problem_content_dict:
+ return problem_content_dict.get(content)[0], document_id, paragraph_id
+ exists = [row for row in exists_problem_list if row.content == content]
+ if len(exists) > 0:
+ problem_content_dict[content] = exists[0], False
+ return exists[0], document_id, paragraph_id
+ else:
+ problem = Problem(id=uuid.uuid1(), content=content, dataset_id=dataset_id)
+ problem_content_dict[content] = problem, True
+ return problem, document_id, paragraph_id
+
+
+class ProblemParagraphManage:
+ def __init__(self, problemParagraphObjectList: [ProblemParagraphObject], dataset_id):
+ self.dataset_id = dataset_id
+ self.problemParagraphObjectList = problemParagraphObjectList
+
+ def to_problem_model_list(self):
+ problem_list = [item.problem_content for item in self.problemParagraphObjectList]
+ exists_problem_list = []
+ if len(self.problemParagraphObjectList) > 0:
+ # 查询到已存在的问题列表
+ exists_problem_list = QuerySet(Problem).filter(dataset_id=self.dataset_id,
+ content__in=problem_list).all()
+ problem_content_dict = {}
+ problem_model_list = [
+ or_get(exists_problem_list, problemParagraphObject.problem_content, problemParagraphObject.dataset_id,
+ problemParagraphObject.document_id, problemParagraphObject.paragraph_id, problem_content_dict) for
+ problemParagraphObject in self.problemParagraphObjectList]
+
+ problem_paragraph_mapping_list = [
+ ProblemParagraphMapping(id=uuid.uuid1(), document_id=document_id, problem_id=problem_model.id,
+ paragraph_id=paragraph_id,
+ dataset_id=self.dataset_id) for
+ problem_model, document_id, paragraph_id in problem_model_list]
+
+ result = [problem_model for problem_model, is_create in problem_content_dict.values() if
+ is_create], problem_paragraph_mapping_list
+ return result
+
+
+def get_embedding_model_by_dataset_id_list(dataset_id_list: List):
+ dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list)
+ if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1:
+ raise Exception(_('The knowledge base is inconsistent with the vector model'))
+ if len(dataset_list) == 0:
+ raise Exception(_('Knowledge base setting error, please reset the knowledge base'))
+ return ModelManage.get_model(str(dataset_list[0].embedding_mode_id),
+ lambda _id: get_model(dataset_list[0].embedding_mode))
+
+
+def get_embedding_model_by_dataset_id(dataset_id: str):
+ dataset = QuerySet(DataSet).select_related('embedding_mode').filter(id=dataset_id).first()
+ return ModelManage.get_model(str(dataset.embedding_mode_id), lambda _id: get_model(dataset.embedding_mode))
+
+
+def get_embedding_model_by_dataset(dataset):
+ return ModelManage.get_model(str(dataset.embedding_mode_id), lambda _id: get_model(dataset.embedding_mode))
+
+
+def get_embedding_model_id_by_dataset_id(dataset_id):
+ dataset = QuerySet(DataSet).select_related('embedding_mode').filter(id=dataset_id).first()
+ return str(dataset.embedding_mode_id)
+
+
+def get_embedding_model_id_by_dataset_id_list(dataset_id_list: List):
+ dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list)
+ if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1:
+ raise Exception(_('The knowledge base is inconsistent with the vector model'))
+ if len(dataset_list) == 0:
+ raise Exception(_('Knowledge base setting error, please reset the knowledge base'))
+ return str(dataset_list[0].embedding_mode_id)
+
+
+class GenerateRelatedSerializer(ApiMixin, serializers.Serializer):
+ model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('Model id')))
+ prompt = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_('Prompt word')))
+ state_list = serializers.ListField(required=False, child=serializers.CharField(required=True),
+ error_messages=ErrMessage.list("state list"))
+
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ properties={
+ 'model_id': openapi.Schema(type=openapi.TYPE_STRING,
+ title=_('Model id'),
+ description=_('Model id')),
+ 'prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_('Prompt word'),
+ description=_("Prompt word")),
+ 'state_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ items=openapi.Schema(type=openapi.TYPE_STRING),
+ title=_('state list'))
}
)
diff --git a/apps/dataset/serializers/dataset_serializers.py b/apps/dataset/serializers/dataset_serializers.py
index d3f5af73a4c..895443d997f 100644
--- a/apps/dataset/serializers/dataset_serializers.py
+++ b/apps/dataset/serializers/dataset_serializers.py
@@ -6,40 +6,52 @@
@date:2023/9/21 16:14
@desc:
"""
+import io
import logging
import os.path
import re
import traceback
import uuid
+import zipfile
from functools import reduce
-from typing import Dict
+from tempfile import TemporaryDirectory
+from typing import Dict, List
from urllib.parse import urlparse
+from celery_once import AlreadyQueued
from django.contrib.postgres.fields import ArrayField
from django.core import validators
from django.db import transaction, models
-from django.db.models import QuerySet, Q
+from django.db.models import QuerySet
+from django.db.models.functions import Reverse, Substr
+from django.http import HttpResponse
from drf_yasg import openapi
from rest_framework import serializers
from application.models import ApplicationDatasetMapping
-from common.config.embedding_config import VectorStore, EmbeddingModel
+from common.config.embedding_config import VectorStore
from common.db.search import get_dynamics_model, native_page_search, native_search
from common.db.sql_execute import select_list
-from common.event import ListenerManagement, SyncWebDatasetArgs
+from common.event import ListenerManagement
from common.exception.app_exception import AppApiException
from common.mixins.api_mixin import ApiMixin
-from common.util.common import post
+from common.util.common import post, flat_map, valid_license, parse_image
from common.util.field_message import ErrMessage
from common.util.file_util import get_file_content
from common.util.fork import ChildLink, Fork
from common.util.split_model import get_split_model
-from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, ProblemParagraphMapping
-from dataset.serializers.common_serializers import list_paragraph, MetaSerializer
+from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, ProblemParagraphMapping, TaskType, \
+ State, File, Image
+from dataset.serializers.common_serializers import list_paragraph, MetaSerializer, ProblemParagraphManage, \
+ get_embedding_model_by_dataset_id, get_embedding_model_id_by_dataset_id, write_image, zip_dir, \
+ GenerateRelatedSerializer
from dataset.serializers.document_serializers import DocumentSerializers, DocumentInstanceSerializer
+from dataset.task import sync_web_dataset, sync_replace_web_dataset, generate_related_by_dataset_id
from embedding.models import SearchMode
-from setting.models import AuthOperate
+from embedding.task import embedding_by_dataset, delete_embedding_by_dataset
+from setting.models import AuthOperate, Model
from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext_lazy as _
"""
# __exact 精确等于 like ‘aaa’
@@ -69,9 +81,9 @@ class Meta:
fields = ['id', 'name', 'desc', 'meta', 'create_time', 'update_time']
class Application(ApiMixin, serializers.Serializer):
- user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id"))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id')))
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("数据集id"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('dataset id')))
@staticmethod
def get_request_params_api():
@@ -80,7 +92,7 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id')
+ description=_('dataset id')),
]
@staticmethod
@@ -91,22 +103,31 @@ def get_response_body_api():
'create_time',
'update_time'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"),
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
- 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
- "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
- description="是否开启多轮对话"),
- 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('id')),
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('application name'),
+ description=_('application name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="_('application description')",
+ description="_('application description')"),
+ 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('model id'),
+ description=_('model id')),
+ "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN,
+ title=_('Whether to start multiple rounds of dialogue'),
+ description=_(
+ 'Whether to start multiple rounds of dialogue')),
+ 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_('opening remarks'),
+ description=_('opening remarks')),
'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
- title="示例列表", description="示例列表"),
- 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户", description="所属用户"),
+ title=_('example'), description=_('example')),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('User id'), description=_('User id')),
- 'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否发布", description='是否发布'),
+ 'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Whether to publish'),
+ description=_('Whether to publish')),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description='创建时间'),
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time')),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description='修改时间')
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'))
}
)
@@ -115,29 +136,33 @@ class Query(ApiMixin, serializers.Serializer):
查询对象
"""
name = serializers.CharField(required=False,
- error_messages=ErrMessage.char("知识库名称"),
+ error_messages=ErrMessage.char(_('dataset name')),
max_length=64,
min_length=1)
desc = serializers.CharField(required=False,
- error_messages=ErrMessage.char("知识库描述"),
+ error_messages=ErrMessage.char(_('dataset description')),
max_length=256,
min_length=1,
)
user_id = serializers.CharField(required=True)
+ select_user_id = serializers.CharField(required=False)
def get_query_set(self):
user_id = self.data.get("user_id")
query_set_dict = {}
query_set = QuerySet(model=get_dynamics_model(
{'temp.name': models.CharField(), 'temp.desc': models.CharField(),
- "document_temp.char_length": models.IntegerField(), 'temp.create_time': models.DateTimeField()}))
+ "document_temp.char_length": models.IntegerField(), 'temp.create_time': models.DateTimeField(),
+ 'temp.user_id': models.CharField(), 'temp.id': models.CharField()}))
if "desc" in self.data and self.data.get('desc') is not None:
query_set = query_set.filter(**{'temp.desc__icontains': self.data.get("desc")})
if "name" in self.data and self.data.get('name') is not None:
query_set = query_set.filter(**{'temp.name__icontains': self.data.get("name")})
- query_set = query_set.order_by("-temp.create_time")
+ if "select_user_id" in self.data and self.data.get('select_user_id') is not None:
+ query_set = query_set.filter(**{'temp.user_id__exact': self.data.get("select_user_id")})
+ query_set = query_set.order_by("-temp.create_time", "temp.id")
query_set_dict['default_sql'] = query_set
query_set_dict['dataset_custom_sql'] = QuerySet(model=get_dynamics_model(
@@ -149,7 +174,7 @@ def get_query_set(self):
query_set_dict['team_member_permission_custom_sql'] = QuerySet(model=get_dynamics_model(
{'user_id': models.CharField(),
'team_member_permission.auth_target_type': models.CharField(),
- 'team_member_permission.operate': ArrayField(verbose_name="权限操作列表",
+ 'team_member_permission.operate': ArrayField(verbose_name=_('permission'),
base_field=models.CharField(max_length=256,
blank=True,
choices=AuthOperate.choices,
@@ -175,12 +200,12 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='知识库名称'),
+ description=_('dataset name')),
openapi.Parameter(name='desc',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='知识库描述')
+ description=_('dataset description'))
]
@staticmethod
@@ -188,52 +213,134 @@ def get_response_body_api():
return DataSetSerializers.Operate.get_response_body_api()
class Create(ApiMixin, serializers.Serializer):
- user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id"), )
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id')), )
class CreateBaseSerializers(ApiMixin, serializers.Serializer):
"""
创建通用数据集序列化对象
"""
name = serializers.CharField(required=True,
- error_messages=ErrMessage.char("知识库名称"),
+ error_messages=ErrMessage.char(_('dataset name')),
max_length=64,
min_length=1)
desc = serializers.CharField(required=True,
- error_messages=ErrMessage.char("知识库描述"),
+ error_messages=ErrMessage.char(_('dataset description')),
max_length=256,
min_length=1)
+ embedding_mode_id = serializers.UUIDField(required=True,
+ error_messages=ErrMessage.uuid(_('embedding mode')))
+
documents = DocumentInstanceSerializer(required=False, many=True)
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
return True
+ class CreateQASerializers(serializers.Serializer):
+ """
+ 创建web站点序列化对象
+ """
+ name = serializers.CharField(required=True,
+ error_messages=ErrMessage.char(_('dataset name')),
+ max_length=64,
+ min_length=1)
+
+ desc = serializers.CharField(required=True,
+ error_messages=ErrMessage.char(_('dataset description')),
+ max_length=256,
+ min_length=1)
+
+ embedding_mode_id = serializers.UUIDField(required=True,
+ error_messages=ErrMessage.uuid(_('embedding mode')))
+
+ file_list = serializers.ListSerializer(required=True,
+ error_messages=ErrMessage.list(_('file list')),
+ child=serializers.FileField(required=True,
+ error_messages=ErrMessage.file(
+ _('file list'))))
+
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='file',
+ in_=openapi.IN_FORM,
+ type=openapi.TYPE_ARRAY,
+ items=openapi.Items(type=openapi.TYPE_FILE),
+ required=True,
+ description=_('upload files ')),
+ openapi.Parameter(name='name',
+ in_=openapi.IN_FORM,
+ required=True,
+ type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name')),
+ openapi.Parameter(name='desc',
+ in_=openapi.IN_FORM,
+ required=True,
+ type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description')),
+ ]
+
+ @staticmethod
+ def get_response_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['id', 'name', 'desc', 'user_id', 'char_length', 'document_count',
+ 'update_time', 'create_time', 'document_list'],
+ properties={
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
+ description="id", default="xx"),
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name'), default=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description'), default=_('dataset description')),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'),
+ description=_('user id'), default="user_xxxx"),
+ 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'),
+ description=_('char length'), default=10),
+ 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'),
+ description=_('document count'), default=1),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
+ default="1970-01-01 00:00:00"),
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
+ default="1970-01-01 00:00:00"
+ ),
+ 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('document list'),
+ description=_('document list'),
+ items=DocumentSerializers.Operate.get_response_body_api())
+ }
+ )
+
class CreateWebSerializers(serializers.Serializer):
"""
创建web站点序列化对象
"""
name = serializers.CharField(required=True,
- error_messages=ErrMessage.char("知识库名称"),
+ error_messages=ErrMessage.char(_('dataset name')),
max_length=64,
min_length=1)
desc = serializers.CharField(required=True,
- error_messages=ErrMessage.char("知识库描述"),
+ error_messages=ErrMessage.char(_('dataset description')),
max_length=256,
min_length=1)
- source_url = serializers.CharField(required=True, error_messages=ErrMessage.char("Web 根地址"), )
+ source_url = serializers.CharField(required=True, error_messages=ErrMessage.char(_('web source url')), )
+
+ embedding_mode_id = serializers.UUIDField(required=True,
+ error_messages=ErrMessage.uuid(_('embedding mode')))
selector = serializers.CharField(required=False, allow_null=True, allow_blank=True,
- error_messages=ErrMessage.char("选择器"))
+ error_messages=ErrMessage.char(_('selector')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
source_url = self.data.get('source_url')
response = Fork(source_url, []).fork()
if response.status == 500:
- raise AppApiException(500, f"url错误,无法解析【{source_url}】")
+ raise AppApiException(500,
+ _('URL error, cannot parse [{source_url}]').format(source_url=source_url))
return True
@staticmethod
@@ -245,25 +352,25 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称",
- description="名称", default="测试知识库"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="描述",
- description="描述", default="测试知识库描述"),
- 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户id",
- description="所属用户id", default="user_xxxx"),
- 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title="字符数",
- description="字符数", default=10),
- 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title="文档数量",
- description="文档数量", default=1),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name'), default=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description'), default=_('dataset description')),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'),
+ description=_('user id'), default="user_xxxx"),
+ 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'),
+ description=_('char length'), default=10),
+ 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'),
+ description=_('document count'), default=1),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
),
- 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档列表",
- description="文档列表",
+ 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('document list'),
+ description=_('document list'),
items=DocumentSerializers.Operate.get_response_body_api())
}
)
@@ -274,20 +381,39 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['name', 'desc', 'url'],
properties={
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="知识库名称", description="知识库名称"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="知识库描述", description="知识库描述"),
- 'source_url': openapi.Schema(type=openapi.TYPE_STRING, title="web站点url",
- description="web站点url"),
- 'selector': openapi.Schema(type=openapi.TYPE_STRING, title="选择器", description="选择器")
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description')),
+ 'embedding_mode_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('embedding mode'),
+ description=_('embedding mode')),
+ 'source_url': openapi.Schema(type=openapi.TYPE_STRING, title=_('web source url'),
+ description=_('web source url')),
+ 'selector': openapi.Schema(type=openapi.TYPE_STRING, title=_('selector'),
+ description=_('selector'))
}
)
@staticmethod
def post_embedding_dataset(document_list, dataset_id):
+ model_id = get_embedding_model_id_by_dataset_id(dataset_id)
# 发送向量化事件
- ListenerManagement.embedding_by_dataset_signal.send(dataset_id)
+ embedding_by_dataset.delay(dataset_id, model_id)
return document_list
+ def save_qa(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ self.CreateQASerializers(data=instance).is_valid()
+ file_list = instance.get('file_list')
+ document_list = flat_map([DocumentSerializers.Create.parse_qa_file(file) for file in file_list])
+ dataset_instance = {'name': instance.get('name'), 'desc': instance.get('desc'), 'documents': document_list,
+ 'embedding_mode_id': instance.get('embedding_mode_id')}
+ return self.save(dataset_instance, with_valid=True)
+
+ @valid_license(model=DataSet, count=50,
+ message=_(
+ 'The community version supports up to 50 knowledge bases. If you need more knowledge bases, please contact us (https://fit2cloud.com/).'))
@post(post_function=post_embedding_dataset)
@transaction.atomic
def save(self, instance: Dict, with_valid=True):
@@ -297,14 +423,14 @@ def save(self, instance: Dict, with_valid=True):
dataset_id = uuid.uuid1()
user_id = self.data.get('user_id')
if QuerySet(DataSet).filter(user_id=user_id, name=instance.get('name')).exists():
- raise AppApiException(500, "知识库名称重复!")
+ raise AppApiException(500, _('Knowledge base name duplicate!'))
dataset = DataSet(
- **{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id})
+ **{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id,
+ 'embedding_mode_id': instance.get('embedding_mode_id')})
document_model_list = []
paragraph_model_list = []
- problem_model_list = []
- problem_paragraph_mapping_list = []
+ problem_paragraph_object_list = []
# 插入文档
for document in instance.get('documents') if 'documents' in instance else []:
document_paragraph_dict_model = DocumentSerializers.Create.get_document_paragraph_model(dataset_id,
@@ -312,12 +438,12 @@ def save(self, instance: Dict, with_valid=True):
document_model_list.append(document_paragraph_dict_model.get('document'))
for paragraph in document_paragraph_dict_model.get('paragraph_model_list'):
paragraph_model_list.append(paragraph)
- for problem in document_paragraph_dict_model.get('problem_model_list'):
- problem_model_list.append(problem)
- for problem_paragraph_mapping in document_paragraph_dict_model.get('problem_paragraph_mapping_list'):
- problem_paragraph_mapping_list.append(problem_paragraph_mapping)
- problem_model_list, problem_paragraph_mapping_list = DocumentSerializers.Create.reset_problem_model(
- problem_model_list, problem_paragraph_mapping_list)
+ for problem_paragraph_object in document_paragraph_dict_model.get('problem_paragraph_object_list'):
+ problem_paragraph_object_list.append(problem_paragraph_object)
+
+ problem_model_list, problem_paragraph_mapping_list = (ProblemParagraphManage(problem_paragraph_object_list,
+ dataset_id)
+ .to_problem_model_list())
# 插入知识库
dataset.save()
# 插入文档
@@ -329,11 +455,13 @@ def save(self, instance: Dict, with_valid=True):
# 批量插入关联问题
QuerySet(ProblemParagraphMapping).bulk_create(problem_paragraph_mapping_list) if len(
problem_paragraph_mapping_list) > 0 else None
-
# 响应数据
return {**DataSetSerializers(dataset).data,
- 'document_list': DocumentSerializers.Query(data={'dataset_id': dataset_id}).list(
- with_valid=True)}, dataset_id
+ 'user_id': user_id,
+ 'document_list': document_model_list,
+ "document_count": len(document_model_list),
+ "char_length": reduce(lambda x, y: x + y, [d.char_length for d in document_model_list],
+ 0)}, dataset_id
@staticmethod
def get_last_url_path(url):
@@ -343,39 +471,22 @@ def get_last_url_path(url):
else:
return parsed_url.path.split("/")[-1]
- @staticmethod
- def get_save_handler(dataset_id, selector):
- def handler(child_link: ChildLink, response: Fork.Response):
- if response.status == 200:
- try:
- document_name = child_link.tag.text if child_link.tag is not None and len(
- child_link.tag.text.strip()) > 0 else child_link.url
- paragraphs = get_split_model('web.md').parse(response.content)
- DocumentSerializers.Create(data={'dataset_id': dataset_id}).save(
- {'name': document_name, 'paragraphs': paragraphs,
- 'meta': {'source_url': child_link.url, 'selector': selector},
- 'type': Type.web}, with_valid=True)
- except Exception as e:
- logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
-
- return handler
-
def save_web(self, instance: Dict, with_valid=True):
if with_valid:
self.is_valid(raise_exception=True)
self.CreateWebSerializers(data=instance).is_valid(raise_exception=True)
user_id = self.data.get('user_id')
if QuerySet(DataSet).filter(user_id=user_id, name=instance.get('name')).exists():
- raise AppApiException(500, "知识库名称重复!")
+ raise AppApiException(500, _('Knowledge base name duplicate!'))
dataset_id = uuid.uuid1()
dataset = DataSet(
**{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id,
'type': Type.web,
- 'meta': {'source_url': instance.get('source_url'), 'selector': instance.get('selector')}})
+ 'embedding_mode_id': instance.get('embedding_mode_id'),
+ 'meta': {'source_url': instance.get('source_url'), 'selector': instance.get('selector'),
+ 'embedding_mode_id': instance.get('embedding_mode_id')}})
dataset.save()
- ListenerManagement.sync_web_dataset_signal.send(
- SyncWebDatasetArgs(str(dataset_id), instance.get('source_url'), instance.get('selector'),
- self.get_save_handler(dataset_id, instance.get('selector'))))
+ sync_web_dataset.delay(str(dataset_id), instance.get('source_url'), instance.get('selector'))
return {**DataSetSerializers(dataset).data,
'document_list': []}
@@ -388,25 +499,25 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称",
- description="名称", default="测试知识库"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="描述",
- description="描述", default="测试知识库描述"),
- 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户id",
- description="所属用户id", default="user_xxxx"),
- 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title="字符数",
- description="字符数", default=10),
- 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title="文档数量",
- description="文档数量", default=1),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name'), default=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description'), default=_('dataset description')),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'),
+ description=_('user id'), default="user_xxxx"),
+ 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'),
+ description=_('char length'), default=10),
+ 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'),
+ description=_('document count'), default=1),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
),
- 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档列表",
- description="文档列表",
+ 'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('document list'),
+ description=_('document list'),
items=DocumentSerializers.Operate.get_response_body_api())
}
)
@@ -417,9 +528,14 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['name', 'desc'],
properties={
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="知识库名称", description="知识库名称"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="知识库描述", description="知识库描述"),
- 'documents': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档数据", description="文档数据",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description')),
+ 'embedding_mode_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('embedding mode'),
+ description=_('embedding mode')),
+ 'documents': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('documents'),
+ description=_('documents'),
items=DocumentSerializers().Create.get_request_body_api()
)
}
@@ -427,14 +543,14 @@ def get_request_body_api():
class Edit(serializers.Serializer):
name = serializers.CharField(required=False, max_length=64, min_length=1,
- error_messages=ErrMessage.char("知识库名称"))
+ error_messages=ErrMessage.char(_('dataset name')))
desc = serializers.CharField(required=False, max_length=256, min_length=1,
- error_messages=ErrMessage.char("知识库描述"))
+ error_messages=ErrMessage.char(_('dataset description')))
meta = serializers.DictField(required=False)
application_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True,
error_messages=ErrMessage.char(
- "应用id")),
- error_messages=ErrMessage.char("应用列表"))
+ _('application id'))),
+ error_messages=ErrMessage.char(_('application id list')))
@staticmethod
def get_dataset_meta_valid_map():
@@ -453,21 +569,21 @@ def is_valid(self, *, dataset: DataSet = None):
class HitTest(ApiMixin, serializers.Serializer):
id = serializers.CharField(required=True, error_messages=ErrMessage.char("id"))
- user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char("用户id"))
- query_text = serializers.CharField(required=True, error_messages=ErrMessage.char("查询文本"))
- top_number = serializers.IntegerField(required=True, max_value=10, min_value=1,
- error_messages=ErrMessage.char("响应Top"))
- similarity = serializers.FloatField(required=True, max_value=1, min_value=0,
- error_messages=ErrMessage.char("相似度"))
+ user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char(_('user id')))
+ query_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_('query text')))
+ top_number = serializers.IntegerField(required=True, max_value=10000, min_value=1,
+ error_messages=ErrMessage.char("top number"))
+ similarity = serializers.FloatField(required=True, max_value=2, min_value=0,
+ error_messages=ErrMessage.char(_('similarity')))
search_mode = serializers.CharField(required=True, validators=[
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
- message="类型只支持register|reset_password", code=500)
- ], error_messages=ErrMessage.char("检索模式"))
+ message=_('The type only supports embedding|keywords|blend'), code=500)
+ ], error_messages=ErrMessage.char(_('search mode')))
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)
if not QuerySet(DataSet).filter(id=self.data.get("id")).exists():
- raise AppApiException(300, "id不存在")
+ raise AppApiException(300, _('id does not exist'))
def hit_test(self):
self.is_valid()
@@ -476,12 +592,13 @@ def hit_test(self):
QuerySet(Document).filter(
dataset_id=self.data.get('id'),
is_active=False)]
+ model = get_embedding_model_by_dataset_id(self.data.get('id'))
# 向量库检索
hit_list = vector.hit_test(self.data.get('query_text'), [self.data.get('id')], exclude_document_id_list,
self.data.get('top_number'),
self.data.get('similarity'),
SearchMode(self.data.get('search_mode')),
- EmbeddingModel.get_embedding_model())
+ model)
hit_dict = reduce(lambda x, y: {**x, **y}, [{hit.get('paragraph_id'): hit} for hit in hit_list], {})
p_list = list_paragraph([h.get('paragraph_id') for h in hit_list])
return [{**p, 'similarity': hit_dict.get(p.get('id')).get('similarity'),
@@ -489,22 +606,22 @@ def hit_test(self):
class SyncWeb(ApiMixin, serializers.Serializer):
id = serializers.CharField(required=True, error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char(
- "用户id"))
+ _('user id')))
sync_type = serializers.CharField(required=True, error_messages=ErrMessage.char(
- "同步类型"), validators=[
+ _(_('sync type'))), validators=[
validators.RegexValidator(regex=re.compile("^replace|complete$"),
- message="同步类型只支持:replace|complete", code=500)
+ message=_('The synchronization type only supports:replace|complete'), code=500)
])
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
first = QuerySet(DataSet).filter(id=self.data.get("id")).first()
if first is None:
- raise AppApiException(300, "id不存在")
+ raise AppApiException(300, _('id does not exist'))
if first.type != Type.web:
- raise AppApiException(500, "只有web站点类型才支持同步")
+ raise AppApiException(500, _('Synchronization is only supported for web site types'))
def sync(self, with_valid=True):
if with_valid:
@@ -523,7 +640,9 @@ def handler(child_link: ChildLink, response: Fork.Response):
document_name = child_link.tag.text if child_link.tag is not None and len(
child_link.tag.text.strip()) > 0 else child_link.url
paragraphs = get_split_model('web.md').parse(response.content)
- first = QuerySet(Document).filter(meta__source_url=child_link.url, dataset=dataset).first()
+ print(child_link.url.strip())
+ first = QuerySet(Document).filter(meta__source_url=child_link.url.strip(),
+ dataset=dataset).first()
if first is not None:
# 如果存在,使用文档同步
DocumentSerializers.Sync(data={'document_id': first.id}).sync()
@@ -531,7 +650,8 @@ def handler(child_link: ChildLink, response: Fork.Response):
# 插入
DocumentSerializers.Create(data={'dataset_id': dataset.id}).save(
{'name': document_name, 'paragraphs': paragraphs,
- 'meta': {'source_url': child_link.url, 'selector': dataset.meta.get('selector')},
+ 'meta': {'source_url': child_link.url.strip(),
+ 'selector': dataset.meta.get('selector')},
'type': Type.web}, with_valid=True)
except Exception as e:
logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
@@ -545,9 +665,7 @@ def replace_sync(self, dataset):
"""
url = dataset.meta.get('source_url')
selector = dataset.meta.get('selector') if 'selector' in dataset.meta else None
- ListenerManagement.sync_web_dataset_signal.send(
- SyncWebDatasetArgs(str(dataset.id), url, selector,
- self.get_sync_handler(dataset)))
+ sync_replace_web_dataset.delay(str(dataset.id), url, selector)
def complete_sync(self, dataset):
"""
@@ -561,7 +679,7 @@ def complete_sync(self, dataset):
# 删除段落
QuerySet(Paragraph).filter(dataset=dataset).delete()
# 删除向量
- ListenerManagement.delete_embedding_by_dataset_signal.send(self.data.get('id'))
+ delete_embedding_by_dataset(self.data.get('id'))
# 同步
self.replace_sync(dataset)
@@ -571,24 +689,96 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
openapi.Parameter(name='sync_type',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='同步类型->replace:替换同步,complete:完整同步')
+ description=_(
+ 'Synchronization type->replace: replacement synchronization, complete: complete synchronization'))
]
class Operate(ApiMixin, serializers.Serializer):
id = serializers.CharField(required=True, error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char(
- "用户id"))
+ _('user id')))
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)
if not QuerySet(DataSet).filter(id=self.data.get("id")).exists():
- raise AppApiException(300, "id不存在")
+ raise AppApiException(300, _('id does not exist'))
+
+ def export_excel(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document_list = QuerySet(Document).filter(dataset_id=self.data.get('id'))
+ paragraph_list = native_search(QuerySet(Paragraph).filter(dataset_id=self.data.get("id")), get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_paragraph_document_name.sql')))
+ problem_mapping_list = native_search(
+ QuerySet(ProblemParagraphMapping).filter(dataset_id=self.data.get("id")), get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')),
+ with_table_name=True)
+ data_dict, document_dict = DocumentSerializers.Operate.merge_problem(paragraph_list, problem_mapping_list,
+ document_list)
+ workbook = DocumentSerializers.Operate.get_workbook(data_dict, document_dict)
+ response = HttpResponse(content_type='application/vnd.ms-excel')
+ response['Content-Disposition'] = 'attachment; filename="dataset.xlsx"'
+ workbook.save(response)
+ return response
+
+ def export_zip(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document_list = QuerySet(Document).filter(dataset_id=self.data.get('id'))
+ paragraph_list = native_search(QuerySet(Paragraph).filter(dataset_id=self.data.get("id")), get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_paragraph_document_name.sql')))
+ problem_mapping_list = native_search(
+ QuerySet(ProblemParagraphMapping).filter(dataset_id=self.data.get("id")), get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')),
+ with_table_name=True)
+ data_dict, document_dict = DocumentSerializers.Operate.merge_problem(paragraph_list, problem_mapping_list,
+ document_list)
+ res = [parse_image(paragraph.get('content')) for paragraph in paragraph_list]
+
+ workbook = DocumentSerializers.Operate.get_workbook(data_dict, document_dict)
+ response = HttpResponse(content_type='application/zip')
+ response['Content-Disposition'] = 'attachment; filename="archive.zip"'
+ zip_buffer = io.BytesIO()
+ with TemporaryDirectory() as tempdir:
+ dataset_file = os.path.join(tempdir, 'dataset.xlsx')
+ workbook.save(dataset_file)
+ for r in res:
+ write_image(tempdir, r)
+ zip_dir(tempdir, zip_buffer)
+ response.write(zip_buffer.getvalue())
+ return response
+
+ @staticmethod
+ def merge_problem(paragraph_list: List[Dict], problem_mapping_list: List[Dict]):
+ result = {}
+ document_dict = {}
+
+ for paragraph in paragraph_list:
+ problem_list = [problem_mapping.get('content') for problem_mapping in problem_mapping_list if
+ problem_mapping.get('paragraph_id') == paragraph.get('id')]
+ document_sheet = result.get(paragraph.get('document_id'))
+ d = document_dict.get(paragraph.get('document_name'))
+ if d is None:
+ document_dict[paragraph.get('document_name')] = {paragraph.get('document_id')}
+ else:
+ d.add(paragraph.get('document_id'))
+
+ if document_sheet is None:
+ result[paragraph.get('document_id')] = [[paragraph.get('title'), paragraph.get('content'),
+ '\n'.join(problem_list)]]
+ else:
+ document_sheet.append([paragraph.get('title'), paragraph.get('content'), '\n'.join(problem_list)])
+ result_document_dict = {}
+ for d_name in document_dict:
+ for index, d_id in enumerate(document_dict.get(d_name)):
+ result_document_dict[d_id] = d_name if index == 0 else d_name + str(index)
+ return result, result_document_dict
@transaction.atomic
def delete(self):
@@ -599,9 +789,60 @@ def delete(self):
QuerySet(Paragraph).filter(dataset=dataset).delete()
QuerySet(Problem).filter(dataset=dataset).delete()
dataset.delete()
- ListenerManagement.delete_embedding_by_dataset_signal.send(self.data.get('id'))
+ delete_embedding_by_dataset(self.data.get('id'))
return True
+ @transaction.atomic
+ def re_embedding(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ dataset_id = self.data.get('id')
+ dataset = QuerySet(DataSet).filter(id=dataset_id).first()
+ embedding_model_id = dataset.embedding_mode_id
+ dataset_user_id = dataset.user_id
+ embedding_model = QuerySet(Model).filter(id=embedding_model_id).first()
+ if embedding_model is None:
+ raise AppApiException(500, _('Model does not exist'))
+ if embedding_model.permission_type == 'PRIVATE' and dataset_user_id != embedding_model.user_id:
+ raise AppApiException(500, _('No permission to use this model') + f"{embedding_model.name}")
+ ListenerManagement.update_status(QuerySet(Document).filter(dataset_id=self.data.get('id')),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(dataset_id=self.data.get('id')),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status_by_dataset_id(self.data.get('id'))()
+ embedding_model_id = get_embedding_model_id_by_dataset_id(self.data.get('id'))
+ try:
+ embedding_by_dataset.delay(dataset_id, embedding_model_id)
+ except AlreadyQueued as e:
+ raise AppApiException(500, _('Failed to send the vectorization task, please try again later!'))
+
+ def generate_related(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ GenerateRelatedSerializer(data=instance).is_valid(raise_exception=True)
+ dataset_id = self.data.get('id')
+ model_id = instance.get("model_id")
+ prompt = instance.get("prompt")
+ state_list = instance.get('state_list')
+ ListenerManagement.update_status(QuerySet(Document).filter(dataset_id=dataset_id),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType.GENERATE_PROBLEM.value,
+ 1),
+ ).filter(task_type_status__in=state_list, dataset_id=dataset_id)
+ .values('id'),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status_by_dataset_id(dataset_id)()
+ try:
+ generate_related_by_dataset_id.delay(dataset_id, model_id, prompt, state_list)
+ except AlreadyQueued as e:
+ raise AppApiException(500, _('Failed to send the vectorization task, please try again later!'))
+
def list_application(self, with_valid=True):
if with_valid:
self.is_valid(raise_exception=True)
@@ -622,7 +863,7 @@ def one(self, user_id, with_valid=True):
), 'team_member_permission_custom_sql': QuerySet(
model=get_dynamics_model({'user_id': models.CharField(),
'team_member_permission.operate': ArrayField(
- verbose_name="权限操作列表",
+ verbose_name=_('permission'),
base_field=models.CharField(max_length=256,
blank=True,
choices=AuthOperate.choices,
@@ -639,6 +880,7 @@ def one(self, user_id, with_valid=True):
QuerySet(ApplicationDatasetMapping).filter(
dataset_id=self.data.get('id'))]))}
+ @transaction.atomic
def edit(self, dataset: Dict, user_id: str):
"""
修改知识库
@@ -649,9 +891,11 @@ def edit(self, dataset: Dict, user_id: str):
self.is_valid()
if QuerySet(DataSet).filter(user_id=user_id, name=dataset.get('name')).exclude(
id=self.data.get('id')).exists():
- raise AppApiException(500, "知识库名称重复!")
+ raise AppApiException(500, _('Knowledge base name duplicate!'))
_dataset = QuerySet(DataSet).get(id=self.data.get("id"))
DataSetSerializers.Edit(data=dataset).is_valid(dataset=_dataset)
+ if 'embedding_mode_id' in dataset:
+ _dataset.embedding_mode_id = dataset.get('embedding_mode_id')
if "name" in dataset:
_dataset.name = dataset.get("name")
if 'desc' in dataset:
@@ -665,7 +909,9 @@ def edit(self, dataset: Dict, user_id: str):
self.list_application(with_valid=False)]
for dataset_id in application_id_list:
if not application_dataset_id_list.__contains__(dataset_id):
- raise AppApiException(500, f"未知的应用id${dataset_id},无法关联")
+ raise AppApiException(500,
+ _('Unknown application id {dataset_id}, cannot be associated').format(
+ dataset_id=dataset_id))
# 删除已经关联的id
QuerySet(ApplicationDatasetMapping).filter(application_id__in=application_dataset_id_list,
@@ -687,12 +933,15 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['name', 'desc'],
properties={
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="知识库名称", description="知识库名称"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="知识库描述", description="知识库描述"),
- 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title="知识库元数据",
- description="知识库元数据->web:{source_url:xxx,selector:'xxx'},base:{}"),
- 'application_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="应用id列表",
- description="应用id列表",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description')),
+ 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title=_('meta'),
+ description=_(
+ 'Knowledge base metadata->web:{source_url:xxx,selector:\'xxx\'},base:{}')),
+ 'application_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('application id list'),
+ description=_('application id list'),
items=openapi.Schema(type=openapi.TYPE_STRING))
}
)
@@ -706,21 +955,21 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称",
- description="名称", default="测试知识库"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="描述",
- description="描述", default="测试知识库描述"),
- 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户id",
- description="所属用户id", default="user_xxxx"),
- 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title="字符数",
- description="字符数", default=10),
- 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title="文档数量",
- description="文档数量", default=1),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
+ description=_('dataset name'), default=_('dataset name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset description'),
+ description=_('dataset description'), default=_('dataset description')),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'),
+ description=_('user id'), default="user_xxxx"),
+ 'char_length': openapi.Schema(type=openapi.TYPE_STRING, title=_('char length'),
+ description=_('char length'), default=10),
+ 'document_count': openapi.Schema(type=openapi.TYPE_STRING, title=_('document count'),
+ description=_('document count'), default=1),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
)
}
@@ -732,5 +981,5 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id')
+ description=_('dataset id')),
]
diff --git a/apps/dataset/serializers/document_serializers.py b/apps/dataset/serializers/document_serializers.py
index b659ea9781a..3b92a7e60fa 100644
--- a/apps/dataset/serializers/document_serializers.py
+++ b/apps/dataset/serializers/document_serializers.py
@@ -6,58 +6,127 @@
@date:2023/9/22 13:43
@desc:
"""
+import io
import logging
import os
import re
import traceback
import uuid
from functools import reduce
+from tempfile import TemporaryDirectory
from typing import List, Dict
+import openpyxl
+from celery_once import AlreadyQueued
from django.core import validators
-from django.db import transaction
-from django.db.models import QuerySet
+from django.db import transaction, models
+from django.db.models import QuerySet, Count
+from django.db.models.functions import Substr, Reverse
+from django.http import HttpResponse
+from django.utils.translation import get_language
+from django.utils.translation import gettext_lazy as _, gettext, to_locale
from drf_yasg import openapi
+from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
from rest_framework import serializers
+from xlwt import Utils
-from common.db.search import native_search, native_page_search
+from common.db.search import native_search, native_page_search, get_dynamics_model
+from common.event import ListenerManagement
from common.event.common import work_thread_pool
-from common.event.listener_manage import ListenerManagement, SyncWebDocumentArgs, UpdateEmbeddingDatasetIdArgs
from common.exception.app_exception import AppApiException
+from common.handle.impl.csv_split_handle import CsvSplitHandle
from common.handle.impl.doc_split_handle import DocSplitHandle
+from common.handle.impl.html_split_handle import HTMLSplitHandle
from common.handle.impl.pdf_split_handle import PdfSplitHandle
+from common.handle.impl.qa.csv_parse_qa_handle import CsvParseQAHandle
+from common.handle.impl.qa.xls_parse_qa_handle import XlsParseQAHandle
+from common.handle.impl.qa.xlsx_parse_qa_handle import XlsxParseQAHandle
+from common.handle.impl.qa.zip_parse_qa_handle import ZipParseQAHandle
+from common.handle.impl.table.csv_parse_table_handle import CsvSplitHandle as CsvSplitTableHandle
+from common.handle.impl.table.xls_parse_table_handle import XlsSplitHandle as XlsSplitTableHandle
+from common.handle.impl.table.xlsx_parse_table_handle import XlsxSplitHandle as XlsxSplitTableHandle
from common.handle.impl.text_split_handle import TextSplitHandle
+from common.handle.impl.xls_split_handle import XlsSplitHandle
+from common.handle.impl.xlsx_split_handle import XlsxSplitHandle
+from common.handle.impl.zip_split_handle import ZipSplitHandle
from common.mixins.api_mixin import ApiMixin
-from common.util.common import post
+from common.util.common import post, flat_map, bulk_create_in_batches, parse_image
from common.util.field_message import ErrMessage
from common.util.file_util import get_file_content
from common.util.fork import Fork
from common.util.split_model import get_split_model
-from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, Status, ProblemParagraphMapping, Image
-from dataset.serializers.common_serializers import BatchSerializer, MetaSerializer
+from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type, ProblemParagraphMapping, Image, \
+ TaskType, State
+from dataset.serializers.common_serializers import BatchSerializer, MetaSerializer, ProblemParagraphManage, \
+ get_embedding_model_id_by_dataset_id, write_image, zip_dir
from dataset.serializers.paragraph_serializers import ParagraphSerializers, ParagraphInstanceSerializer
+from dataset.task import sync_web_document, generate_related_by_document_id
+from embedding.task.embedding import embedding_by_document, delete_embedding_by_document_list, \
+ delete_embedding_by_document, update_embedding_dataset_id, delete_embedding_by_paragraph_ids, \
+ embedding_by_document_list
+from setting.models import Model
from smartdoc.conf import PROJECT_DIR
+parse_qa_handle_list = [XlsParseQAHandle(), CsvParseQAHandle(), XlsxParseQAHandle(), ZipParseQAHandle()]
+parse_table_handle_list = [CsvSplitTableHandle(), XlsSplitTableHandle(), XlsxSplitTableHandle()]
+
+
+class FileBufferHandle:
+ buffer = None
+
+ def get_buffer(self, file):
+ if self.buffer is None:
+ self.buffer = file.read()
+ return self.buffer
+
+
+class BatchCancelInstanceSerializer(serializers.Serializer):
+ id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
+ error_messages=ErrMessage.char(_('id list')))
+ type = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(
+ _('task type')))
+
+ def is_valid(self, *, raise_exception=False):
+ super().is_valid(raise_exception=True)
+ _type = self.data.get('type')
+ try:
+ TaskType(_type)
+ except Exception as e:
+ raise AppApiException(500, _('task type not support'))
+
+
+class CancelInstanceSerializer(serializers.Serializer):
+ type = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(
+ _('task type')))
+
+ def is_valid(self, *, raise_exception=False):
+ super().is_valid(raise_exception=True)
+ _type = self.data.get('type')
+ try:
+ TaskType(_type)
+ except Exception as e:
+ raise AppApiException(500, _('task type not support'))
+
class DocumentEditInstanceSerializer(ApiMixin, serializers.Serializer):
meta = serializers.DictField(required=False)
name = serializers.CharField(required=False, max_length=128, min_length=1,
error_messages=ErrMessage.char(
- "文档名称"))
+ _('document name')))
hit_handling_method = serializers.CharField(required=False, validators=[
validators.RegexValidator(regex=re.compile("^optimization|directly_return$"),
- message="类型只支持optimization|directly_return",
+ message=_('The type only supports optimization|directly_return'),
code=500)
- ], error_messages=ErrMessage.char("命中处理方式"))
+ ], error_messages=ErrMessage.char(_('hit handling method')))
directly_return_similarity = serializers.FloatField(required=False,
max_value=2,
min_value=0,
error_messages=ErrMessage.float(
- "直接返回分数"))
+ _('directly return similarity')))
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(
- "文档是否可用"))
+ _('document is active')))
@staticmethod
def get_meta_valid_map():
@@ -78,12 +147,27 @@ def is_valid(self, *, document: Document = None):
class DocumentWebInstanceSerializer(ApiMixin, serializers.Serializer):
source_url_list = serializers.ListField(required=True,
child=serializers.CharField(required=True, error_messages=ErrMessage.char(
- "文档地址")),
+ _('document url list'))),
error_messages=ErrMessage.char(
- "文档地址列表"))
+ _('document url list')))
selector = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char(
- "选择器"))
+ _('selector')))
+
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='file',
+ in_=openapi.IN_FORM,
+ type=openapi.TYPE_ARRAY,
+ items=openapi.Items(type=openapi.TYPE_FILE),
+ required=True,
+ description=_('file')),
+ openapi.Parameter(name='dataset_id',
+ in_=openapi.IN_PATH,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('dataset id')),
+ ]
@staticmethod
def get_request_body_api():
@@ -91,16 +175,17 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['source_url_list'],
properties={
- 'source_url_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="段落列表", description="段落列表",
+ 'source_url_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('source url list'),
+ description=_('source url list'),
items=openapi.Schema(type=openapi.TYPE_STRING)),
- 'selector': openapi.Schema(type=openapi.TYPE_STRING, title="文档名称", description="文档名称")
+ 'selector': openapi.Schema(type=openapi.TYPE_STRING, title=_('selector'), description=_('selector'))
}
)
class DocumentInstanceSerializer(ApiMixin, serializers.Serializer):
name = serializers.CharField(required=True,
- error_messages=ErrMessage.char("文档名称"),
+ error_messages=ErrMessage.char(_('document name')),
max_length=128,
min_length=1)
@@ -112,24 +197,100 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['name', 'paragraphs'],
properties={
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="文档名称", description="文档名称"),
- 'paragraphs': openapi.Schema(type=openapi.TYPE_ARRAY, title="段落列表", description="段落列表",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('document name'),
+ description=_('document name')),
+ 'paragraphs': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('paragraphs'),
+ description=_('paragraphs'),
items=ParagraphSerializers.Create.get_request_body_api())
}
)
+class DocumentInstanceQASerializer(ApiMixin, serializers.Serializer):
+ file_list = serializers.ListSerializer(required=True,
+ error_messages=ErrMessage.list(_('file list')),
+ child=serializers.FileField(required=True,
+ error_messages=ErrMessage.file(_('file'))))
+
+
+class DocumentInstanceTableSerializer(ApiMixin, serializers.Serializer):
+ file_list = serializers.ListSerializer(required=True,
+ error_messages=ErrMessage.list(_('file list')),
+ child=serializers.FileField(required=True,
+ error_messages=ErrMessage.file(_('file'))))
+
+
class DocumentSerializers(ApiMixin, serializers.Serializer):
+ class Export(ApiMixin, serializers.Serializer):
+ type = serializers.CharField(required=True, validators=[
+ validators.RegexValidator(regex=re.compile("^csv|excel$"),
+ message=_('The template type only supports excel|csv'),
+ code=500)
+ ], error_messages=ErrMessage.char(_('type')))
+
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='type',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('Export template type csv|excel')),
+
+ ]
+
+ def export(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ language = get_language()
+ if self.data.get('type') == 'csv':
+ file = open(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'template', f'csv_template_{to_locale(language)}.csv'),
+ "rb")
+ content = file.read()
+ file.close()
+ return HttpResponse(content, status=200, headers={'Content-Type': 'text/csv',
+ 'Content-Disposition': 'attachment; filename="csv_template.csv"'})
+ elif self.data.get('type') == 'excel':
+ file = open(os.path.join(PROJECT_DIR, "apps", "dataset", 'template',
+ f'excel_template_{to_locale(language)}.xlsx'), "rb")
+ content = file.read()
+ file.close()
+ return HttpResponse(content, status=200, headers={'Content-Type': 'application/vnd.ms-excel',
+ 'Content-Disposition': 'attachment; filename="excel_template.xlsx"'})
+
+ def table_export(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ language = get_language()
+ if self.data.get('type') == 'csv':
+ file = open(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'template',
+ f'table_template_{to_locale(language)}.csv'),
+ "rb")
+ content = file.read()
+ file.close()
+ return HttpResponse(content, status=200, headers={'Content-Type': 'text/cxv',
+ 'Content-Disposition': 'attachment; filename="csv_template.csv"'})
+ elif self.data.get('type') == 'excel':
+ file = open(os.path.join(PROJECT_DIR, "apps", "dataset", 'template',
+ f'table_template_{to_locale(language)}.xlsx'),
+ "rb")
+ content = file.read()
+ file.close()
+ return HttpResponse(content, status=200, headers={'Content-Type': 'application/vnd.ms-excel',
+ 'Content-Disposition': 'attachment; filename="excel_template.xlsx"'})
+
class Migrate(ApiMixin, serializers.Serializer):
dataset_id = serializers.UUIDField(required=True,
error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
target_dataset_id = serializers.UUIDField(required=True,
error_messages=ErrMessage.char(
- "目标知识库id"))
- document_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char("文档列表"),
+ _('target dataset id')))
+ document_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char(_('document list')),
child=serializers.UUIDField(required=True,
- error_messages=ErrMessage.uuid("文档id")))
+ error_messages=ErrMessage.uuid(
+ _('document id'))))
@transaction.atomic
def migrate(self, with_valid=True):
@@ -172,12 +333,27 @@ def migrate(self, with_valid=True):
meta={})
else:
document_list.update(dataset_id=target_dataset_id)
- # 修改向量信息
- ListenerManagement.update_embedding_dataset_id(UpdateEmbeddingDatasetIdArgs(
- [paragraph.id for paragraph in paragraph_list],
- target_dataset_id))
+ model_id = None
+ if dataset.embedding_mode_id != target_dataset.embedding_mode_id:
+ model_id = get_embedding_model_id_by_dataset_id(target_dataset_id)
+
+ pid_list = [paragraph.id for paragraph in paragraph_list]
# 修改段落信息
paragraph_list.update(dataset_id=target_dataset_id)
+ # 修改向量信息
+ if model_id:
+ delete_embedding_by_paragraph_ids(pid_list)
+ ListenerManagement.update_status(QuerySet(Document).filter(id__in=document_id_list),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id__in=document_id_list),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status_by_query_set(
+ QuerySet(Document).filter(id__in=document_id_list))()
+ embedding_by_document_list.delay(document_id_list, model_id)
+ else:
+ update_embedding_dataset_id(pid_list, target_dataset_id)
@staticmethod
def get_target_dataset_problem(target_dataset_id: str,
@@ -207,12 +383,12 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('document id')),
openapi.Parameter(name='target_dataset_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='目标知识库id')
+ description=_('target document id'))
]
@staticmethod
@@ -220,21 +396,26 @@ def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
- title='文档id列表',
- description="文档id列表"
+ title=_('document id list'),
+ description=_('document id list')
)
class Query(ApiMixin, serializers.Serializer):
# 知识库id
dataset_id = serializers.UUIDField(required=True,
error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
name = serializers.CharField(required=False, max_length=128,
min_length=1,
error_messages=ErrMessage.char(
- "文档名称"))
- hit_handling_method = serializers.CharField(required=False, error_messages=ErrMessage.char("命中处理方式"))
+ _('document name')))
+ hit_handling_method = serializers.CharField(required=False,
+ error_messages=ErrMessage.char(_('hit handling method')))
+ is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('document is active')))
+ task_type = serializers.IntegerField(required=False, error_messages=ErrMessage.integer(_('task type')))
+ status = serializers.CharField(required=False, error_messages=ErrMessage.char(_('status')))
+ order_by = serializers.CharField(required=False, error_messages=ErrMessage.char(_('order by')))
def get_query_set(self):
query_set = QuerySet(model=Document)
@@ -243,8 +424,36 @@ def get_query_set(self):
query_set = query_set.filter(**{'name__icontains': self.data.get('name')})
if 'hit_handling_method' in self.data and self.data.get('hit_handling_method') is not None:
query_set = query_set.filter(**{'hit_handling_method': self.data.get('hit_handling_method')})
- query_set = query_set.order_by('-create_time')
- return query_set
+ if 'is_active' in self.data and self.data.get('is_active') is not None:
+ query_set = query_set.filter(**{'is_active': self.data.get('is_active')})
+ if 'status' in self.data and self.data.get(
+ 'status') is not None:
+ task_type = self.data.get('task_type')
+ status = self.data.get(
+ 'status')
+ if task_type is not None:
+ query_set = query_set.annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType(task_type).value,
+ 1),
+ ).filter(task_type_status=State(status).value).values('id')
+ else:
+ if status != State.SUCCESS.value:
+ query_set = query_set.filter(status__icontains=status)
+ else:
+ query_set = query_set.filter(status__iregex='^[2n]*$')
+ order_by = self.data.get('order_by', '')
+ order_by_query_set = QuerySet(model=get_dynamics_model(
+ {'char_length': models.CharField(), 'paragraph_count': models.IntegerField(),
+ "update_time": models.IntegerField(), 'create_time': models.DateTimeField()}))
+ if order_by:
+ order_by_query_set = order_by_query_set.order_by(order_by)
+ else:
+ order_by_query_set = order_by_query_set.order_by('-create_time', 'id')
+ return {
+ 'document_custom_sql': query_set,
+ 'order_by_query': order_by_query_set
+ }
def list(self, with_valid=False):
if with_valid:
@@ -264,41 +473,44 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='文档名称'),
+ description=_('document name')),
openapi.Parameter(name='hit_handling_method', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='文档命中处理方式')]
+ description=_('hit handling method')), ]
@staticmethod
def get_response_body_api():
return openapi.Schema(type=openapi.TYPE_ARRAY,
- title="文档列表", description="文档列表",
+ title=_('document list'), description=_('document list'),
items=DocumentSerializers.Operate.get_response_body_api())
class Sync(ApiMixin, serializers.Serializer):
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "文档id"))
+ _('document id')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
document_id = self.data.get('document_id')
first = QuerySet(Document).filter(id=document_id).first()
if first is None:
- raise AppApiException(500, "文档id不存在")
+ raise AppApiException(500, _('document id not exist'))
if first.type != Type.web:
- raise AppApiException(500, "只有web站点类型才支持同步")
+ raise AppApiException(500, _('Synchronization is only supported for web site types'))
def sync(self, with_valid=True, with_embedding=True):
if with_valid:
self.is_valid(raise_exception=True)
document_id = self.data.get('document_id')
document = QuerySet(Document).filter(id=document_id).first()
+ state = State.SUCCESS
if document.type != Type.web:
return True
try:
- document.status = Status.embedding
- document.save()
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.SYNC,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status(document_id)()
source_url = document.meta.get('source_url')
selector_list = document.meta.get('selector').split(
" ") if 'selector' in document.meta and document.meta.get('selector') is not None else []
@@ -308,18 +520,20 @@ def sync(self, with_valid=True, with_embedding=True):
QuerySet(model=Paragraph).filter(document_id=document_id).delete()
# 删除问题
QuerySet(model=ProblemParagraphMapping).filter(document_id=document_id).delete()
+ delete_problems_and_mappings([document_id])
# 删除向量库
- ListenerManagement.delete_embedding_by_document_signal.send(document_id)
+ delete_embedding_by_document(document_id)
paragraphs = get_split_model('web.md').parse(result.content)
- document.char_length = reduce(lambda x, y: x + y,
- [len(p.get('content')) for p in paragraphs],
- 0)
- document.save()
+ char_length = reduce(lambda x, y: x + y,
+ [len(p.get('content')) for p in paragraphs],
+ 0)
+ QuerySet(Document).filter(id=document_id).update(char_length=char_length)
document_paragraph_model = DocumentSerializers.Create.get_paragraph_model(document, paragraphs)
paragraph_model_list = document_paragraph_model.get('paragraph_model_list')
- problem_model_list = document_paragraph_model.get('problem_model_list')
- problem_paragraph_mapping_list = document_paragraph_model.get('problem_paragraph_mapping_list')
+ problem_paragraph_object_list = document_paragraph_model.get('problem_paragraph_object_list')
+ problem_model_list, problem_paragraph_mapping_list = ProblemParagraphManage(
+ problem_paragraph_object_list, document.dataset_id).to_problem_model_list()
# 批量插入段落
QuerySet(Paragraph).bulk_create(paragraph_model_list) if len(paragraph_model_list) > 0 else None
# 批量插入问题
@@ -329,19 +543,34 @@ def sync(self, with_valid=True, with_embedding=True):
problem_paragraph_mapping_list) > 0 else None
# 向量化
if with_embedding:
- ListenerManagement.embedding_by_document_signal.send(document_id)
+ embedding_model_id = get_embedding_model_id_by_dataset_id(document.dataset_id)
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status(document_id)()
+ embedding_by_document.delay(document_id, embedding_model_id)
+
else:
- document.status = Status.error
- document.save()
+ state = State.FAILURE
except Exception as e:
logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
- document.status = Status.error
- document.save()
+ state = State.FAILURE
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.SYNC,
+ state)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id),
+ TaskType.SYNC,
+ state)
+ ListenerManagement.get_aggregation_document_status(document_id)()
return True
class Operate(ApiMixin, serializers.Serializer):
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "文档id"))
+ _('document id')))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('dataset id')))
@staticmethod
def get_request_params_api():
@@ -349,26 +578,148 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('document id')),
openapi.Parameter(name='document_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='文档id')
+ description=_('document id'))
]
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
document_id = self.data.get('document_id')
if not QuerySet(Document).filter(id=document_id).exists():
- raise AppApiException(500, "文档id不存在")
+ raise AppApiException(500, _('document id not exist'))
+
+ def export(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document = QuerySet(Document).filter(id=self.data.get("document_id")).first()
+ paragraph_list = native_search(QuerySet(Paragraph).filter(document_id=self.data.get("document_id")),
+ get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql',
+ 'list_paragraph_document_name.sql')))
+ problem_mapping_list = native_search(
+ QuerySet(ProblemParagraphMapping).filter(document_id=self.data.get("document_id")), get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')),
+ with_table_name=True)
+ data_dict, document_dict = self.merge_problem(paragraph_list, problem_mapping_list, [document])
+ workbook = self.get_workbook(data_dict, document_dict)
+ response = HttpResponse(content_type='application/vnd.ms-excel')
+ response['Content-Disposition'] = f'attachment; filename="data.xlsx"'
+ workbook.save(response)
+ return response
+
+ def export_zip(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document = QuerySet(Document).filter(id=self.data.get("document_id")).first()
+ paragraph_list = native_search(QuerySet(Paragraph).filter(document_id=self.data.get("document_id")),
+ get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql',
+ 'list_paragraph_document_name.sql')))
+ problem_mapping_list = native_search(
+ QuerySet(ProblemParagraphMapping).filter(document_id=self.data.get("document_id")), get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem_mapping.sql')),
+ with_table_name=True)
+ data_dict, document_dict = self.merge_problem(paragraph_list, problem_mapping_list, [document])
+ res = [parse_image(paragraph.get('content')) for paragraph in paragraph_list]
+
+ workbook = DocumentSerializers.Operate.get_workbook(data_dict, document_dict)
+ response = HttpResponse(content_type='application/zip')
+ response['Content-Disposition'] = 'attachment; filename="archive.zip"'
+ zip_buffer = io.BytesIO()
+ with TemporaryDirectory() as tempdir:
+ dataset_file = os.path.join(tempdir, 'dataset.xlsx')
+ workbook.save(dataset_file)
+ for r in res:
+ write_image(tempdir, r)
+ zip_dir(tempdir, zip_buffer)
+ response.write(zip_buffer.getvalue())
+ return response
+
+ @staticmethod
+ def get_workbook(data_dict, document_dict):
+ # 创建工作簿对象
+ workbook = openpyxl.Workbook()
+ workbook.remove_sheet(workbook.active)
+ if len(data_dict.keys()) == 0:
+ data_dict['sheet'] = []
+ for sheet_id in data_dict:
+ # 添加工作表
+ worksheet = workbook.create_sheet(document_dict.get(sheet_id))
+ data = [
+ [gettext('Section title (optional)'),
+ gettext('Section content (required, question answer, no more than 4096 characters)'),
+ gettext('Question (optional, one per line in the cell)')],
+ *data_dict.get(sheet_id, [])
+ ]
+ # 写入数据到工作表
+ for row_idx, row in enumerate(data):
+ for col_idx, col in enumerate(row):
+ cell = worksheet.cell(row=row_idx + 1, column=col_idx + 1)
+ if isinstance(col, str):
+ col = re.sub(ILLEGAL_CHARACTERS_RE, '', col)
+ if col.startswith(('=', '+', '-', '@')):
+ col = '\ufeff' + col
+ cell.value = col
+ # 创建HttpResponse对象返回Excel文件
+ return workbook
+
+ @staticmethod
+ def merge_problem(paragraph_list: List[Dict], problem_mapping_list: List[Dict], document_list):
+ result = {}
+ document_dict = {}
+
+ for paragraph in paragraph_list:
+ problem_list = [problem_mapping.get('content') for problem_mapping in problem_mapping_list if
+ problem_mapping.get('paragraph_id') == paragraph.get('id')]
+ document_sheet = result.get(paragraph.get('document_id'))
+ document_name = DocumentSerializers.Operate.reset_document_name(paragraph.get('document_name'))
+ d = document_dict.get(document_name)
+ if d is None:
+ document_dict[document_name] = {paragraph.get('document_id')}
+ else:
+ d.add(paragraph.get('document_id'))
+
+ if document_sheet is None:
+ result[paragraph.get('document_id')] = [[paragraph.get('title'), paragraph.get('content'),
+ '\n'.join(problem_list)]]
+ else:
+ document_sheet.append([paragraph.get('title'), paragraph.get('content'), '\n'.join(problem_list)])
+ for document in document_list:
+ if document.id not in result:
+ document_name = DocumentSerializers.Operate.reset_document_name(document.name)
+ result[document.id] = [[]]
+ d = document_dict.get(document_name)
+ if d is None:
+ document_dict[document_name] = {document.id}
+ else:
+ d.add(document.id)
+ result_document_dict = {}
+ for d_name in document_dict:
+ for index, d_id in enumerate(document_dict.get(d_name)):
+ result_document_dict[d_id] = d_name if index == 0 else d_name + str(index)
+ return result, result_document_dict
+
+ @staticmethod
+ def reset_document_name(document_name):
+ if document_name is not None:
+ document_name = document_name.strip()[0:29]
+ if document_name is None or not Utils.valid_sheet_name(document_name):
+ return "Sheet"
+ return document_name.strip()
def one(self, with_valid=False):
if with_valid:
self.is_valid(raise_exception=True)
query_set = QuerySet(model=Document)
query_set = query_set.filter(**{'id': self.data.get("document_id")})
- return native_search(query_set, select_string=get_file_content(
+ return native_search({
+ 'document_custom_sql': query_set,
+ 'order_by_query': QuerySet(Document).order_by('-create_time', 'id')
+ }, select_string=get_file_content(
os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_document.sql')), with_search_one=True)
def edit(self, instance: Dict, with_valid=False):
@@ -384,21 +735,61 @@ def edit(self, instance: Dict, with_valid=False):
_document.save()
return self.one()
- def refresh(self, with_valid=True):
+ def refresh(self, state_list=None, with_valid=True):
+ if state_list is None:
+ state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value,
+ State.REVOKE.value,
+ State.REVOKED.value, State.IGNORED.value]
if with_valid:
self.is_valid(raise_exception=True)
+ dataset = QuerySet(DataSet).filter(id=self.data.get('dataset_id')).first()
+ embedding_model_id = dataset.embedding_mode_id
+ dataset_user_id = dataset.user_id
+ embedding_model = QuerySet(Model).filter(id=embedding_model_id).first()
+ if embedding_model is None:
+ raise AppApiException(500, _('Model does not exist'))
+ if embedding_model.permission_type == 'PRIVATE' and dataset_user_id != embedding_model.user_id:
+ raise AppApiException(500, _('No permission to use this model') + f"{embedding_model.name}")
document_id = self.data.get("document_id")
- document = QuerySet(Document).filter(id=document_id).first()
- if document.type == Type.web:
- # 异步同步
- work_thread_pool.submit(lambda x: DocumentSerializers.Sync(data={'document_id': document_id}).sync(),
- {})
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType.EMBEDDING.value,
+ 1),
+ ).filter(task_type_status__in=state_list, document_id=document_id)
+ .values('id'),
+ TaskType.EMBEDDING,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status(document_id)()
+
+ try:
+ embedding_by_document.delay(document_id, embedding_model_id, state_list)
+ except AlreadyQueued as e:
+ raise AppApiException(500, _('The task is being executed, please do not send it repeatedly.'))
+
+ def cancel(self, instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ CancelInstanceSerializer(data=instance).is_valid()
+ document_id = self.data.get("document_id")
+ ListenerManagement.update_status(QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
+ 1),
+ ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
+ document_id=document_id).values('id'),
+ TaskType(instance.get('type')),
+ State.REVOKE)
+ ListenerManagement.update_status(QuerySet(Document).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
+ 1),
+ ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
+ id=document_id).values('id'),
+ TaskType(instance.get('type')),
+ State.REVOKE)
- else:
- if document.status != Status.embedding.value:
- document.status = Status.embedding
- document.save()
- ListenerManagement.embedding_by_document_signal.send(document_id)
return True
@transaction.atomic
@@ -408,9 +799,9 @@ def delete(self):
# 删除段落
QuerySet(model=Paragraph).filter(document_id=document_id).delete()
# 删除问题
- QuerySet(model=ProblemParagraphMapping).filter(document_id=document_id).delete()
+ delete_problems_and_mappings([document_id])
# 删除向量库
- ListenerManagement.delete_embedding_by_document_signal.send(document_id)
+ delete_embedding_by_document(document_id)
return True
@staticmethod
@@ -422,20 +813,20 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="名称",
- description="名称", default="测试知识库"),
- 'char_length': openapi.Schema(type=openapi.TYPE_INTEGER, title="字符数",
- description="字符数", default=10),
- 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"),
- 'paragraph_count': openapi.Schema(type=openapi.TYPE_INTEGER, title="文档数量",
- description="文档数量", default=1),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用",
- description="是否可用", default=True),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('name'),
+ description=_('name'), default="xx"),
+ 'char_length': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('char length'),
+ description=_('char length'), default=10),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
+ 'paragraph_count': openapi.Schema(type=openapi.TYPE_INTEGER, title="_('document count')",
+ description="_('document count')", default=1),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active'), default=True),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
)
}
@@ -446,32 +837,70 @@ def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="文档名称", description="文档名称"),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"),
- 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title="命中处理方式",
- description="ai优化:optimization,直接返回:directly_return"),
- 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title="直接返回分数",
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('document name'),
+ description=_('document name')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active')),
+ 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title=_('hit handling method'),
+ description=_(
+ 'ai optimization: optimization, direct return: directly_return')),
+ 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER,
+ title=_('directly return similarity'),
default=0.9),
- 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title="文档元数据",
- description="文档元数据->web:{source_url:xxx,selector:'xxx'},base:{}"),
+ 'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title=_('meta'),
+ description=_(
+ 'Document metadata->web:{source_url:xxx,selector:\'xxx\'},base:{}')),
}
)
class Create(ApiMixin, serializers.Serializer):
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "文档id"))
+ _('document id')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if not QuerySet(DataSet).filter(id=self.data.get('dataset_id')).exists():
- raise AppApiException(10000, "知识库id不存在")
+ raise AppApiException(10000, _('dataset id not exist'))
return True
@staticmethod
- def post_embedding(result, document_id):
- ListenerManagement.embedding_by_document_signal.send(document_id)
+ def post_embedding(result, document_id, dataset_id):
+ DocumentSerializers.Operate(
+ data={'dataset_id': dataset_id, 'document_id': document_id}).refresh()
return result
+ @staticmethod
+ def parse_qa_file(file):
+ get_buffer = FileBufferHandle().get_buffer
+ for parse_qa_handle in parse_qa_handle_list:
+ if parse_qa_handle.support(file, get_buffer):
+ return parse_qa_handle.handle(file, get_buffer, save_image)
+ raise AppApiException(500, _('Unsupported file format'))
+
+ @staticmethod
+ def parse_table_file(file):
+ get_buffer = FileBufferHandle().get_buffer
+ for parse_table_handle in parse_table_handle_list:
+ if parse_table_handle.support(file, get_buffer):
+ return parse_table_handle.handle(file, get_buffer, save_image)
+ raise AppApiException(500, _('Unsupported file format'))
+
+ def save_qa(self, instance: Dict, with_valid=True):
+ if with_valid:
+ DocumentInstanceQASerializer(data=instance).is_valid(raise_exception=True)
+ self.is_valid(raise_exception=True)
+ file_list = instance.get('file_list')
+ document_list = flat_map([self.parse_qa_file(file) for file in file_list])
+ return DocumentSerializers.Batch(data={'dataset_id': self.data.get('dataset_id')}).batch_save(document_list)
+
+ def save_table(self, instance: Dict, with_valid=True):
+ if with_valid:
+ DocumentInstanceTableSerializer(data=instance).is_valid(raise_exception=True)
+ self.is_valid(raise_exception=True)
+ file_list = instance.get('file_list')
+ document_list = flat_map([self.parse_table_file(file) for file in file_list])
+ return DocumentSerializers.Batch(data={'dataset_id': self.data.get('dataset_id')}).batch_save(document_list)
+
@post(post_function=post_embedding)
@transaction.atomic
def save(self, instance: Dict, with_valid=False, **kwargs):
@@ -480,11 +909,13 @@ def save(self, instance: Dict, with_valid=False, **kwargs):
self.is_valid(raise_exception=True)
dataset_id = self.data.get('dataset_id')
document_paragraph_model = self.get_document_paragraph_model(dataset_id, instance)
+
document_model = document_paragraph_model.get('document')
paragraph_model_list = document_paragraph_model.get('paragraph_model_list')
- problem_model_list = document_paragraph_model.get('problem_model_list')
- problem_paragraph_mapping_list = document_paragraph_model.get('problem_paragraph_mapping_list')
-
+ problem_paragraph_object_list = document_paragraph_model.get('problem_paragraph_object_list')
+ problem_model_list, problem_paragraph_mapping_list = (ProblemParagraphManage(problem_paragraph_object_list,
+ dataset_id)
+ .to_problem_model_list())
# 插入文档
document_model.save()
# 批量插入段落
@@ -497,29 +928,7 @@ def save(self, instance: Dict, with_valid=False, **kwargs):
document_id = str(document_model.id)
return DocumentSerializers.Operate(
data={'dataset_id': dataset_id, 'document_id': document_id}).one(
- with_valid=True), document_id
-
- @staticmethod
- def get_sync_handler(dataset_id):
- def handler(source_url: str, selector, response: Fork.Response):
- if response.status == 200:
- try:
- paragraphs = get_split_model('web.md').parse(response.content)
- # 插入
- DocumentSerializers.Create(data={'dataset_id': dataset_id}).save(
- {'name': source_url, 'paragraphs': paragraphs,
- 'meta': {'source_url': source_url, 'selector': selector},
- 'type': Type.web}, with_valid=True)
- except Exception as e:
- logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
- else:
- Document(name=source_url,
- meta={'source_url': source_url, 'selector': selector},
- type=Type.web,
- char_length=0,
- status=Status.error).save()
-
- return handler
+ with_valid=True), document_id, dataset_id
def save_web(self, instance: Dict, with_valid=True):
if with_valid:
@@ -528,8 +937,7 @@ def save_web(self, instance: Dict, with_valid=True):
dataset_id = self.data.get('dataset_id')
source_url_list = instance.get('source_url_list')
selector = instance.get('selector')
- args = SyncWebDocumentArgs(source_url_list, selector, self.get_sync_handler(dataset_id))
- ListenerManagement.sync_web_document_signal.send(args)
+ sync_web_document.delay(dataset_id, source_url_list, selector)
@staticmethod
def get_paragraph_model(document_model, paragraph_list: List):
@@ -539,35 +947,15 @@ def get_paragraph_model(document_model, paragraph_list: List):
dataset_id, document_model.id, paragraph) for paragraph in paragraph_list]
paragraph_model_list = []
- problem_model_list = []
- problem_paragraph_mapping_list = []
+ problem_paragraph_object_list = []
for paragraphs in paragraph_model_dict_list:
paragraph = paragraphs.get('paragraph')
- for problem_model in paragraphs.get('problem_model_list'):
- problem_model_list.append(problem_model)
- for problem_paragraph_mapping in paragraphs.get('problem_paragraph_mapping_list'):
- problem_paragraph_mapping_list.append(problem_paragraph_mapping)
+ for problem_model in paragraphs.get('problem_paragraph_object_list'):
+ problem_paragraph_object_list.append(problem_model)
paragraph_model_list.append(paragraph)
- problem_model_list, problem_paragraph_mapping_list = DocumentSerializers.Create.reset_problem_model(
- problem_model_list, problem_paragraph_mapping_list)
-
return {'document': document_model, 'paragraph_model_list': paragraph_model_list,
- 'problem_model_list': problem_model_list,
- 'problem_paragraph_mapping_list': problem_paragraph_mapping_list}
-
- @staticmethod
- def reset_problem_model(problem_model_list, problem_paragraph_mapping_list):
- new_problem_model_list = [x for i, x in enumerate(problem_model_list) if
- len([item for item in problem_model_list[:i] if item.content == x.content]) <= 0]
-
- for new_problem_model in new_problem_model_list:
- old_model_list = [problem.id for problem in problem_model_list if
- problem.content == new_problem_model.content]
- for problem_paragraph_mapping in problem_paragraph_mapping_list:
- if old_model_list.__contains__(problem_paragraph_mapping.problem_id):
- problem_paragraph_mapping.problem_id = new_problem_model.id
- return new_problem_model_list, problem_paragraph_mapping_list
+ 'problem_paragraph_object_list': problem_paragraph_object_list}
@staticmethod
def get_document_paragraph_model(dataset_id, instance: Dict):
@@ -595,31 +983,31 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id')
+ description=_('document id'))
]
class Split(ApiMixin, serializers.Serializer):
file = serializers.ListField(required=True, error_messages=ErrMessage.list(
- "文件列表"))
+ _('file list')))
limit = serializers.IntegerField(required=False, error_messages=ErrMessage.integer(
- "分段长度"))
+ _('limit')))
patterns = serializers.ListField(required=False,
child=serializers.CharField(required=True, error_messages=ErrMessage.char(
- "分段标识")),
- error_messages=ErrMessage.uuid(
- "分段标识列表"))
+ _('patterns'))),
+ error_messages=ErrMessage.list(
+ _('patterns')))
with_filter = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(
- "自动清洗"))
+ _('Auto Clean')))
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)
files = self.data.get('file')
for f in files:
if f.size > 1024 * 1024 * 100:
- raise AppApiException(500, "上传文件最大不能超过100MB")
+ raise AppApiException(500, _('The maximum size of the uploaded file cannot exceed 100MB'))
@staticmethod
def get_request_params_api():
@@ -629,27 +1017,28 @@ def get_request_params_api():
type=openapi.TYPE_ARRAY,
items=openapi.Items(type=openapi.TYPE_FILE),
required=True,
- description='上传文件'),
+ description=_('file list')),
openapi.Parameter(name='limit',
in_=openapi.IN_FORM,
required=False,
- type=openapi.TYPE_INTEGER, title="分段长度", description="分段长度"),
+ type=openapi.TYPE_INTEGER, title=_('limit'), description=_('limit')),
openapi.Parameter(name='patterns',
in_=openapi.IN_FORM,
required=False,
type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_STRING),
- title="分段正则列表", description="分段正则列表"),
+ title=_('Segmented regular list'), description=_('Segmented regular list')),
openapi.Parameter(name='with_filter',
in_=openapi.IN_FORM,
required=False,
- type=openapi.TYPE_BOOLEAN, title="是否清除特殊字符", description="是否清除特殊字符"),
+ type=openapi.TYPE_BOOLEAN, title=_('Whether to clear special characters'),
+ description=_('Whether to clear special characters')),
]
def parse(self):
file_list = self.data.get("file")
- return list(
- map(lambda f: file_to_paragraph(f, self.data.get("patterns", None), self.data.get("with_filter", None),
- self.data.get("limit", None)), file_list))
+ return reduce(lambda x, y: [*x, *y],
+ [file_to_paragraph(f, self.data.get("patterns", None), self.data.get("with_filter", None),
+ self.data.get("limit", 4096)) for f in file_list], [])
class SplitPattern(ApiMixin, serializers.Serializer):
@staticmethod
@@ -661,22 +1050,23 @@ def list():
{'key': '#####', 'value': "(?<=\\n)(? 0 else None
# 批量插入段落
- QuerySet(Paragraph).bulk_create(paragraph_model_list) if len(paragraph_model_list) > 0 else None
+ bulk_create_in_batches(Paragraph, paragraph_model_list, batch_size=1000)
# 批量插入问题
- QuerySet(Problem).bulk_create(problem_model_list) if len(problem_model_list) > 0 else None
+ bulk_create_in_batches(Problem, problem_model_list, batch_size=1000)
# 批量插入关联问题
- QuerySet(ProblemParagraphMapping).bulk_create(problem_paragraph_mapping_list) if len(
- problem_paragraph_mapping_list) > 0 else None
+ bulk_create_in_batches(ProblemParagraphMapping, problem_paragraph_mapping_list, batch_size=1000)
# 查询文档
query_set = QuerySet(model=Document)
+ if len(document_model_list) == 0:
+ return [], dataset_id
query_set = query_set.filter(**{'id__in': [d.id for d in document_model_list]})
- return native_search(query_set, select_string=get_file_content(
- os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_document.sql')), with_search_one=False),
+ return native_search({
+ 'document_custom_sql': query_set,
+ 'order_by_query': QuerySet(Document).order_by('-create_time', 'id')
+ }, select_string=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_document.sql')),
+ with_search_one=False), dataset_id
@staticmethod
def _batch_sync(document_id_list: List[str]):
@@ -739,19 +1134,41 @@ def batch_delete(self, instance: Dict, with_valid=True):
document_id_list = instance.get("id_list")
QuerySet(Document).filter(id__in=document_id_list).delete()
QuerySet(Paragraph).filter(document_id__in=document_id_list).delete()
- QuerySet(ProblemParagraphMapping).filter(document_id__in=document_id_list).delete()
+ delete_problems_and_mappings(document_id_list)
# 删除向量库
- ListenerManagement.delete_embedding_by_document_list_signal.send(document_id_list)
+ delete_embedding_by_document_list(document_id_list)
return True
+ def batch_cancel(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ BatchCancelInstanceSerializer(data=instance).is_valid(raise_exception=True)
+ document_id_list = instance.get("id_list")
+ ListenerManagement.update_status(QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
+ 1),
+ ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
+ document_id__in=document_id_list).values('id'),
+ TaskType(instance.get('type')),
+ State.REVOKE)
+ ListenerManagement.update_status(QuerySet(Document).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
+ 1),
+ ).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
+ id__in=document_id_list).values('id'),
+ TaskType(instance.get('type')),
+ State.REVOKE)
+
def batch_edit_hit_handling(self, instance: Dict, with_valid=True):
if with_valid:
BatchSerializer(data=instance).is_valid(model=Document, raise_exception=True)
hit_handling_method = instance.get('hit_handling_method')
if hit_handling_method is None:
- raise AppApiException(500, '命中处理方式必填')
+ raise AppApiException(500, _('Hit handling method is required'))
if hit_handling_method != 'optimization' and hit_handling_method != 'directly_return':
- raise AppApiException(500, '命中处理方式必须为directly_return|optimization')
+ raise AppApiException(500, _('The hit processing method must be directly_return|optimization'))
self.is_valid(raise_exception=True)
document_id_list = instance.get("id_list")
hit_handling_method = instance.get('hit_handling_method')
@@ -761,6 +1178,73 @@ def batch_edit_hit_handling(self, instance: Dict, with_valid=True):
update_dict['directly_return_similarity'] = directly_return_similarity
QuerySet(Document).filter(id__in=document_id_list).update(**update_dict)
+ def batch_refresh(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document_id_list = instance.get("id_list")
+ state_list = instance.get("state_list")
+ dataset_id = self.data.get('dataset_id')
+ for document_id in document_id_list:
+ try:
+ DocumentSerializers.Operate(
+ data={'dataset_id': dataset_id, 'document_id': document_id}).refresh(state_list)
+ except AlreadyQueued as e:
+ pass
+
+ class GenerateRelated(ApiMixin, serializers.Serializer):
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
+
+ def is_valid(self, *, raise_exception=False):
+ super().is_valid(raise_exception=True)
+ document_id = self.data.get('document_id')
+ if not QuerySet(Document).filter(id=document_id).exists():
+ raise AppApiException(500, _('document id not exist'))
+
+ def generate_related(self, model_id, prompt, state_list=None, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document_id = self.data.get('document_id')
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status(document_id)()
+ try:
+ generate_related_by_document_id.delay(document_id, model_id, prompt, state_list)
+ except AlreadyQueued as e:
+ raise AppApiException(500, _('The task is being executed, please do not send it again.'))
+
+ class BatchGenerateRelated(ApiMixin, serializers.Serializer):
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
+
+ def batch_generate_related(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ document_id_list = instance.get("document_id_list")
+ model_id = instance.get("model_id")
+ prompt = instance.get("prompt")
+ state_list = instance.get('state_list')
+ ListenerManagement.update_status(QuerySet(Document).filter(id__in=document_id_list),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType.GENERATE_PROBLEM.value,
+ 1),
+ ).filter(task_type_status__in=state_list, document_id__in=document_id_list)
+ .values('id'),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status_by_query_set(
+ QuerySet(Document).filter(id__in=document_id_list))()
+ try:
+ for document_id in document_id_list:
+ generate_related_by_document_id.delay(document_id, model_id, prompt, state_list)
+ except AlreadyQueued as e:
+ pass
+
class FileBufferHandle:
buffer = None
@@ -772,16 +1256,46 @@ def get_buffer(self, file):
default_split_handle = TextSplitHandle()
-split_handles = [DocSplitHandle(), PdfSplitHandle(), default_split_handle]
+split_handles = [HTMLSplitHandle(), DocSplitHandle(), PdfSplitHandle(), XlsxSplitHandle(), XlsSplitHandle(),
+ CsvSplitHandle(),
+ ZipSplitHandle(),
+ default_split_handle]
def save_image(image_list):
- QuerySet(Image).bulk_create(image_list)
+ if image_list is not None and len(image_list) > 0:
+ exist_image_list = [str(i.get('id')) for i in
+ QuerySet(Image).filter(id__in=[i.id for i in image_list]).values('id')]
+ save_image_list = [image for image in image_list if not exist_image_list.__contains__(str(image.id))]
+ save_image_list = list({img.id: img for img in save_image_list}.values())
+ if len(save_image_list) > 0:
+ QuerySet(Image).bulk_create(save_image_list)
def file_to_paragraph(file, pattern_list: List, with_filter: bool, limit: int):
get_buffer = FileBufferHandle().get_buffer
for split_handle in split_handles:
if split_handle.support(file, get_buffer):
- return split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image)
- return default_split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image)
+ result = split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image)
+ if isinstance(result, list):
+ return result
+ return [result]
+ result = default_split_handle.handle(file, pattern_list, with_filter, limit, get_buffer, save_image)
+ if isinstance(result, list):
+ return result
+ return [result]
+
+
+def delete_problems_and_mappings(document_ids):
+ problem_paragraph_mappings = ProblemParagraphMapping.objects.filter(document_id__in=document_ids)
+ problem_ids = set(problem_paragraph_mappings.values_list('problem_id', flat=True))
+
+ if problem_ids:
+ problem_paragraph_mappings.delete()
+ remaining_problem_counts = ProblemParagraphMapping.objects.filter(problem_id__in=problem_ids).values(
+ 'problem_id').annotate(count=Count('problem_id'))
+ remaining_problem_ids = {pc['problem_id'] for pc in remaining_problem_counts}
+ problem_ids_to_delete = problem_ids - remaining_problem_ids
+ Problem.objects.filter(id__in=problem_ids_to_delete).delete()
+ else:
+ problem_paragraph_mappings.delete()
diff --git a/apps/dataset/serializers/file_serializers.py b/apps/dataset/serializers/file_serializers.py
new file mode 100644
index 00000000000..899c8a088de
--- /dev/null
+++ b/apps/dataset/serializers/file_serializers.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: image_serializers.py
+ @date:2024/4/22 16:36
+ @desc:
+"""
+import uuid
+
+from django.db.models import QuerySet
+from django.http import HttpResponse
+from rest_framework import serializers
+
+from common.exception.app_exception import NotFound404
+from common.field.common import UploadedFileField
+from common.util.field_message import ErrMessage
+from dataset.models import File
+from django.utils.translation import gettext_lazy as _
+
+mime_types = {"html": "text/html", "htm": "text/html", "shtml": "text/html", "css": "text/css", "xml": "text/xml",
+ "gif": "image/gif", "jpeg": "image/jpeg", "jpg": "image/jpeg", "js": "application/javascript",
+ "atom": "application/atom+xml", "rss": "application/rss+xml", "mml": "text/mathml", "txt": "text/plain",
+ "jad": "text/vnd.sun.j2me.app-descriptor", "wml": "text/vnd.wap.wml", "htc": "text/x-component",
+ "avif": "image/avif", "png": "image/png", "svg": "image/svg+xml", "svgz": "image/svg+xml",
+ "tif": "image/tiff", "tiff": "image/tiff", "wbmp": "image/vnd.wap.wbmp", "webp": "image/webp",
+ "ico": "image/x-icon", "jng": "image/x-jng", "bmp": "image/x-ms-bmp", "woff": "font/woff",
+ "woff2": "font/woff2", "jar": "application/java-archive", "war": "application/java-archive",
+ "ear": "application/java-archive", "json": "application/json", "hqx": "application/mac-binhex40",
+ "doc": "application/msword", "pdf": "application/pdf", "ps": "application/postscript",
+ "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ "eps": "application/postscript", "ai": "application/postscript", "rtf": "application/rtf",
+ "m3u8": "application/vnd.apple.mpegurl", "kml": "application/vnd.google-earth.kml+xml",
+ "kmz": "application/vnd.google-earth.kmz", "xls": "application/vnd.ms-excel",
+ "eot": "application/vnd.ms-fontobject", "ppt": "application/vnd.ms-powerpoint",
+ "odg": "application/vnd.oasis.opendocument.graphics",
+ "odp": "application/vnd.oasis.opendocument.presentation",
+ "ods": "application/vnd.oasis.opendocument.spreadsheet", "odt": "application/vnd.oasis.opendocument.text",
+ "wmlc": "application/vnd.wap.wmlc", "wasm": "application/wasm", "7z": "application/x-7z-compressed",
+ "cco": "application/x-cocoa", "jardiff": "application/x-java-archive-diff",
+ "jnlp": "application/x-java-jnlp-file", "run": "application/x-makeself", "pl": "application/x-perl",
+ "pm": "application/x-perl", "prc": "application/x-pilot", "pdb": "application/x-pilot",
+ "rar": "application/x-rar-compressed", "rpm": "application/x-redhat-package-manager",
+ "sea": "application/x-sea", "swf": "application/x-shockwave-flash", "sit": "application/x-stuffit",
+ "tcl": "application/x-tcl", "tk": "application/x-tcl", "der": "application/x-x509-ca-cert",
+ "pem": "application/x-x509-ca-cert", "crt": "application/x-x509-ca-cert",
+ "xpi": "application/x-xpinstall", "xhtml": "application/xhtml+xml", "xspf": "application/xspf+xml",
+ "zip": "application/zip", "bin": "application/octet-stream", "exe": "application/octet-stream",
+ "dll": "application/octet-stream", "deb": "application/octet-stream", "dmg": "application/octet-stream",
+ "iso": "application/octet-stream", "img": "application/octet-stream", "msi": "application/octet-stream",
+ "msp": "application/octet-stream", "msm": "application/octet-stream", "mid": "audio/midi",
+ "midi": "audio/midi", "kar": "audio/midi", "mp3": "audio/mpeg", "ogg": "audio/ogg", "m4a": "audio/x-m4a",
+ "ra": "audio/x-realaudio", "3gpp": "video/3gpp", "3gp": "video/3gpp", "ts": "video/mp2t",
+ "mp4": "video/mp4", "mpeg": "video/mpeg", "mpg": "video/mpeg", "mov": "video/quicktime",
+ "webm": "video/webm", "flv": "video/x-flv", "m4v": "video/x-m4v", "mng": "video/x-mng",
+ "asx": "video/x-ms-asf", "asf": "video/x-ms-asf", "wmv": "video/x-ms-wmv", "avi": "video/x-msvideo"}
+
+
+class FileSerializer(serializers.Serializer):
+ file = UploadedFileField(required=True, error_messages=ErrMessage.image(_('file')))
+ meta = serializers.JSONField(required=False, allow_null=True)
+
+ def upload(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ meta = self.data.get('meta', None)
+ if not meta:
+ meta = {'debug': True}
+ file_id = meta.get('file_id', uuid.uuid1())
+ file = File(id=file_id, file_name=self.data.get('file').name, meta=meta)
+ file.save(self.data.get('file').read())
+ return f'/api/file/{file_id}'
+
+ class Operate(serializers.Serializer):
+ id = serializers.UUIDField(required=True)
+
+ def get(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ file_id = self.data.get('id')
+ file = QuerySet(File).filter(id=file_id).first()
+ if file is None:
+ raise NotFound404(404, _('File not found'))
+ # 如果是音频文件,直接返回文件流
+ file_type = file.file_name.split(".")[-1]
+ if file_type in ['mp3', 'wav', 'ogg', 'aac']:
+ return HttpResponse(file.get_byte(), status=200, headers={'Content-Type': f'audio/{file_type}',
+ 'Content-Disposition': 'attachment; filename="{}"'.format(
+ file.file_name)})
+ return HttpResponse(file.get_byte(), status=200,
+ headers={'Content-Type': mime_types.get(file_type, 'text/plain')})
diff --git a/apps/dataset/serializers/image_serializers.py b/apps/dataset/serializers/image_serializers.py
index 46a1d72bc7a..a5ac289b07f 100644
--- a/apps/dataset/serializers/image_serializers.py
+++ b/apps/dataset/serializers/image_serializers.py
@@ -16,10 +16,11 @@
from common.field.common import UploadedImageField
from common.util.field_message import ErrMessage
from dataset.models import Image
+from django.utils.translation import gettext_lazy as _
class ImageSerializer(serializers.Serializer):
- image = UploadedImageField(required=True, error_messages=ErrMessage.image("图片"))
+ image = UploadedImageField(required=True, error_messages=ErrMessage.image(_('image')))
def upload(self, with_valid=True):
if with_valid:
@@ -38,5 +39,10 @@ def get(self, with_valid=True):
image_id = self.data.get('id')
image = QuerySet(Image).filter(id=image_id).first()
if image is None:
- raise NotFound404(404, "不存在的图片")
+ raise NotFound404(404, _('Image not found'))
+ if image.image_name.endswith('.svg'):
+ return HttpResponse(image.image, status=200, headers={'Content-Type': 'image/svg+xml'})
+ # gif
+ elif image.image_name.endswith('.gif'):
+ return HttpResponse(image.image, status=200, headers={'Content-Type': 'image/gif'})
return HttpResponse(image.image, status=200, headers={'Content-Type': 'image/png'})
diff --git a/apps/dataset/serializers/paragraph_serializers.py b/apps/dataset/serializers/paragraph_serializers.py
index 3188766a782..3a63fd95cd0 100644
--- a/apps/dataset/serializers/paragraph_serializers.py
+++ b/apps/dataset/serializers/paragraph_serializers.py
@@ -9,21 +9,28 @@
import uuid
from typing import Dict
+from celery_once import AlreadyQueued
from django.db import transaction
-from django.db.models import QuerySet
+from django.db.models import QuerySet, Count
from drf_yasg import openapi
from rest_framework import serializers
from common.db.search import page_search
-from common.event.listener_manage import ListenerManagement, UpdateEmbeddingDocumentIdArgs
+from common.event import ListenerManagement
from common.exception.app_exception import AppApiException
from common.mixins.api_mixin import ApiMixin
from common.util.common import post
from common.util.field_message import ErrMessage
-from dataset.models import Paragraph, Problem, Document, ProblemParagraphMapping
-from dataset.serializers.common_serializers import update_document_char_length, BatchSerializer
+from dataset.models import Paragraph, Problem, Document, ProblemParagraphMapping, DataSet, TaskType, State
+from dataset.serializers.common_serializers import update_document_char_length, BatchSerializer, ProblemParagraphObject, \
+ ProblemParagraphManage, get_embedding_model_id_by_dataset_id
from dataset.serializers.problem_serializers import ProblemInstanceSerializer, ProblemSerializer, ProblemSerializers
from embedding.models import SourceType
+from embedding.task.embedding import embedding_by_problem as embedding_by_problem_task, embedding_by_problem, \
+ delete_embedding_by_source, enable_embedding_by_paragraph, disable_embedding_by_paragraph, embedding_by_paragraph, \
+ delete_embedding_by_paragraph, delete_embedding_by_paragraph_ids, update_embedding_document_id
+from dataset.task import generate_related_by_paragraph_id_list
+from django.utils.translation import gettext_lazy as _
class ParagraphSerializer(serializers.ModelSerializer):
@@ -37,17 +44,17 @@ class ParagraphInstanceSerializer(ApiMixin, serializers.Serializer):
"""
段落实例对象
"""
- content = serializers.CharField(required=True, error_messages=ErrMessage.char("段落内容"),
- max_length=4096,
+ content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('content')),
+ max_length=102400,
min_length=1,
allow_null=True, allow_blank=True)
- title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char("段落标题"),
+ title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char(_('section title')),
allow_null=True, allow_blank=True)
problem_list = ProblemInstanceSerializer(required=False, many=True)
- is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("段落是否可用"))
+ is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active')))
@staticmethod
def get_request_body_api():
@@ -55,16 +62,16 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['content'],
properties={
- 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title="分段内容",
- description="分段内容"),
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title=_('section content'),
+ description=_('section content')),
- 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title="分段标题",
- description="分段标题"),
+ 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title=_('section title'),
+ description=_('section title')),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), description=_('Is active')),
- 'problem_list': openapi.Schema(type=openapi.TYPE_ARRAY, title='问题列表',
- description="问题列表",
+ 'problem_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('problem list'),
+ description=_('problem list'),
items=ProblemInstanceSerializer.get_request_body_api())
}
)
@@ -72,30 +79,30 @@ def get_request_body_api():
class EditParagraphSerializers(serializers.Serializer):
title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char(
- "分段标题"), allow_null=True, allow_blank=True)
- content = serializers.CharField(required=False, max_length=4096, allow_null=True, allow_blank=True,
+ _('section title')), allow_null=True, allow_blank=True)
+ content = serializers.CharField(required=False, max_length=102400, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char(
- "分段内容"))
+ _('section title')))
problem_list = ProblemInstanceSerializer(required=False, many=True)
class ParagraphSerializers(ApiMixin, serializers.Serializer):
title = serializers.CharField(required=False, max_length=256, error_messages=ErrMessage.char(
- "分段标题"), allow_null=True, allow_blank=True)
- content = serializers.CharField(required=True, max_length=4096, error_messages=ErrMessage.char(
- "分段内容"))
+ _('section title')), allow_null=True, allow_blank=True)
+ content = serializers.CharField(required=True, max_length=102400, error_messages=ErrMessage.char(
+ _('section title')))
class Problem(ApiMixin, serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
- document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
- paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("段落id"))
+ paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('paragraph id')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if not QuerySet(Paragraph).filter(id=self.data.get('paragraph_id')).exists():
- raise AppApiException(500, "段落id不存在")
+ raise AppApiException(500, _('Paragraph id does not exist'))
def list(self, with_valid=False):
"""
@@ -112,7 +119,7 @@ def list(self, with_valid=False):
QuerySet(Problem).filter(id__in=[row.problem_id for row in problem_paragraph_mapping])]
@transaction.atomic
- def save(self, instance: Dict, with_valid=True, with_embedding=True):
+ def save(self, instance: Dict, with_valid=True, with_embedding=True, embedding_by_problem=None):
if with_valid:
self.is_valid()
ProblemInstanceSerializer(data=instance).is_valid(raise_exception=True)
@@ -124,22 +131,23 @@ def save(self, instance: Dict, with_valid=True, with_embedding=True):
problem.save()
if QuerySet(ProblemParagraphMapping).filter(dataset_id=self.data.get('dataset_id'), problem_id=problem.id,
paragraph_id=self.data.get('paragraph_id')).exists():
- raise AppApiException(500, "已经关联,请勿重复关联")
+ raise AppApiException(500, _('Already associated, please do not associate again'))
problem_paragraph_mapping = ProblemParagraphMapping(id=uuid.uuid1(),
problem_id=problem.id,
document_id=self.data.get('document_id'),
paragraph_id=self.data.get('paragraph_id'),
dataset_id=self.data.get('dataset_id'))
problem_paragraph_mapping.save()
+ model_id = get_embedding_model_id_by_dataset_id(self.data.get('dataset_id'))
if with_embedding:
- ListenerManagement.embedding_by_problem_signal.send({'text': problem.content,
- 'is_active': True,
- 'source_type': SourceType.PROBLEM,
- 'source_id': problem_paragraph_mapping.id,
- 'document_id': self.data.get('document_id'),
- 'paragraph_id': self.data.get('paragraph_id'),
- 'dataset_id': self.data.get('dataset_id'),
- })
+ embedding_by_problem_task({'text': problem.content,
+ 'is_active': True,
+ 'source_type': SourceType.PROBLEM,
+ 'source_id': problem_paragraph_mapping.id,
+ 'document_id': self.data.get('document_id'),
+ 'paragraph_id': self.data.get('paragraph_id'),
+ 'dataset_id': self.data.get('dataset_id'),
+ }, model_id)
return ProblemSerializers.Operate(
data={'dataset_id': self.data.get('dataset_id'),
@@ -151,17 +159,17 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
openapi.Parameter(name='document_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='文档id'),
+ description=_('document id')),
openapi.Parameter(name='paragraph_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='段落id')]
+ description=_('paragraph id'))]
@staticmethod
def get_request_body_api():
@@ -169,7 +177,7 @@ def get_request_body_api():
required=["content"],
properties={
'content': openapi.Schema(
- type=openapi.TYPE_STRING, title="内容")
+ type=openapi.TYPE_STRING, title=_('content'),)
})
@staticmethod
@@ -180,30 +188,30 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'content': openapi.Schema(type=openapi.TYPE_STRING, title="问题内容",
- description="问题内容", default='问题内容'),
- 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量",
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('question content'),
+ description=_('question content'), default=_('question content')),
+ 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('hit num'), description=_('hit num'),
default=1),
- 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id",
- description="知识库id", default='xxx'),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'),
+ description=_('dataset id'), default='xxx'),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
)
}
)
class Association(ApiMixin, serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
- problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("问题id"))
+ problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('problem id')))
- document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
- paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("段落id"))
+ paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('paragraph id')))
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)
@@ -211,9 +219,9 @@ def is_valid(self, *, raise_exception=True):
paragraph_id = self.data.get('paragraph_id')
problem_id = self.data.get("problem_id")
if not QuerySet(Paragraph).filter(dataset_id=dataset_id, id=paragraph_id).exists():
- raise AppApiException(500, "段落不存在")
+ raise AppApiException(500, _('Paragraph does not exist'))
if not QuerySet(Problem).filter(dataset_id=dataset_id, id=problem_id).exists():
- raise AppApiException(500, "问题不存在")
+ raise AppApiException(500, _('Problem does not exist'))
def association(self, with_valid=True, with_embedding=True):
if with_valid:
@@ -226,14 +234,15 @@ def association(self, with_valid=True, with_embedding=True):
problem_id=problem.id)
problem_paragraph_mapping.save()
if with_embedding:
- ListenerManagement.embedding_by_problem_signal.send({'text': problem.content,
- 'is_active': True,
- 'source_type': SourceType.PROBLEM,
- 'source_id': problem_paragraph_mapping.id,
- 'document_id': self.data.get('document_id'),
- 'paragraph_id': self.data.get('paragraph_id'),
- 'dataset_id': self.data.get('dataset_id'),
- })
+ model_id = get_embedding_model_id_by_dataset_id(self.data.get('dataset_id'))
+ embedding_by_problem({'text': problem.content,
+ 'is_active': True,
+ 'source_type': SourceType.PROBLEM,
+ 'source_id': problem_paragraph_mapping.id,
+ 'document_id': self.data.get('document_id'),
+ 'paragraph_id': self.data.get('paragraph_id'),
+ 'dataset_id': self.data.get('dataset_id'),
+ }, model_id)
def un_association(self, with_valid=True):
if with_valid:
@@ -245,7 +254,7 @@ def un_association(self, with_valid=True):
'problem_id')).first()
problem_paragraph_mapping_id = problem_paragraph_mapping.id
problem_paragraph_mapping.delete()
- ListenerManagement.delete_embedding_by_source_signal.send(problem_paragraph_mapping_id)
+ delete_embedding_by_source(problem_paragraph_mapping_id)
return True
@staticmethod
@@ -254,27 +263,27 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
openapi.Parameter(name='document_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='文档id')
+ description=_('document id'))
, openapi.Parameter(name='paragraph_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='段落id'),
+ description=_('paragraph id')),
openapi.Parameter(name='problem_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='问题id')
+ description=_('problem id'))
]
class Batch(serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
- document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
@transaction.atomic
def batch_delete(self, instance: Dict, with_valid=True):
@@ -283,20 +292,20 @@ def batch_delete(self, instance: Dict, with_valid=True):
self.is_valid(raise_exception=True)
paragraph_id_list = instance.get("id_list")
QuerySet(Paragraph).filter(id__in=paragraph_id_list).delete()
- QuerySet(ProblemParagraphMapping).filter(paragraph_id__in=paragraph_id_list).delete()
+ delete_problems_and_mappings(paragraph_id_list)
update_document_char_length(self.data.get('document_id'))
# 删除向量库
- ListenerManagement.delete_embedding_by_paragraph_ids(paragraph_id_list)
+ delete_embedding_by_paragraph_ids(paragraph_id_list)
return True
class Migrate(ApiMixin, serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
- document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
- target_dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("目标知识库id"))
- target_document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("目标文档id"))
- paragraph_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char("段落列表"),
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
+ target_dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('target dataset id')))
+ target_document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('target document id')))
+ paragraph_id_list = serializers.ListField(required=True, error_messages=ErrMessage.char(_('paragraph id list')),
child=serializers.UUIDField(required=True,
- error_messages=ErrMessage.uuid("段落id")))
+ error_messages=ErrMessage.uuid(_('paragraph id'))))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
@@ -305,12 +314,14 @@ def is_valid(self, *, raise_exception=False):
document_id = self.data.get('document_id')
target_document_id = self.data.get('target_document_id')
if document_id == target_document_id:
- raise AppApiException(5000, "需要迁移的文档和目标文档一致")
+ raise AppApiException(5000, _('The document to be migrated is consistent with the target document'))
if len([document for document in document_list if str(document.id) == self.data.get('document_id')]) < 1:
- raise AppApiException(5000, f"文档id不存在【{self.data.get('document_id')}】")
+ raise AppApiException(5000, _('The document id does not exist [{document_id}]').format(
+ document_id=self.data.get('document_id')))
if len([document for document in document_list if
str(document.id) == self.data.get('target_document_id')]) < 1:
- raise AppApiException(5000, f"目标文档id不存在【{self.data.get('target_document_id')}】")
+ raise AppApiException(5000, _('The target document id does not exist [{document_id}]').format(
+ document_id=self.data.get('target_document_id')))
@transaction.atomic
def migrate(self, with_valid=True):
@@ -335,10 +346,8 @@ def migrate(self, with_valid=True):
# 修改mapping
QuerySet(ProblemParagraphMapping).bulk_update(problem_paragraph_mapping_list,
['document_id'])
- # 修改向量段落信息
- ListenerManagement.update_embedding_document_id(UpdateEmbeddingDocumentIdArgs(
- [paragraph.id for paragraph in paragraph_list],
- target_document_id, target_dataset_id))
+ update_embedding_document_id([paragraph.id for paragraph in paragraph_list],
+ target_document_id, target_dataset_id, None)
# 修改段落信息
paragraph_list.update(document_id=target_document_id)
# 不同数据集迁移
@@ -365,12 +374,17 @@ def migrate(self, with_valid=True):
# 修改mapping
QuerySet(ProblemParagraphMapping).bulk_update(problem_paragraph_mapping_list,
['problem_id', 'dataset_id', 'document_id'])
- # 修改向量段落信息
- ListenerManagement.update_embedding_document_id(UpdateEmbeddingDocumentIdArgs(
- [paragraph.id for paragraph in paragraph_list],
- target_document_id, target_dataset_id))
+ target_dataset = QuerySet(DataSet).filter(id=target_dataset_id).first()
+ dataset = QuerySet(DataSet).filter(id=dataset_id).first()
+ embedding_model_id = None
+ if target_dataset.embedding_mode_id != dataset.embedding_mode_id:
+ embedding_model_id = str(target_dataset.embedding_mode_id)
+ pid_list = [paragraph.id for paragraph in paragraph_list]
# 修改段落信息
paragraph_list.update(dataset_id=target_dataset_id, document_id=target_document_id)
+ # 修改向量段落信息
+ update_embedding_document_id(pid_list, target_document_id, target_dataset_id, embedding_model_id)
+
update_document_char_length(document_id)
update_document_char_length(target_document_id)
@@ -409,22 +423,22 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='文档id'),
+ description=_('document id')),
openapi.Parameter(name='document_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='文档id'),
+ description=_('document id')),
openapi.Parameter(name='target_dataset_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='目标知识库id'),
+ description=_('target dataset id')),
openapi.Parameter(name='target_document_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='目标知识库id')
+ description=_('target document id')),
]
@staticmethod
@@ -432,34 +446,35 @@ def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
- title='段落id列表',
- description="段落id列表"
+ title=_('paragraph id list'),
+ description=_('paragraph id list')
)
class Operate(ApiMixin, serializers.Serializer):
# 段落id
paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "段落id"))
+ _('paragraph id')))
# 知识库id
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
# 文档id
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "文档id"))
+ _('document id')))
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)
if not QuerySet(Paragraph).filter(id=self.data.get('paragraph_id')).exists():
- raise AppApiException(500, "段落id不存在")
+ raise AppApiException(500, _('Paragraph id does not exist'))
@staticmethod
- def post_embedding(paragraph, instance):
+ def post_embedding(paragraph, instance, dataset_id):
if 'is_active' in instance and instance.get('is_active') is not None:
- s = (ListenerManagement.enable_embedding_by_paragraph_signal if instance.get(
- 'is_active') else ListenerManagement.disable_embedding_by_paragraph_signal)
- s.send(paragraph.get('id'))
+ (enable_embedding_by_paragraph if instance.get(
+ 'is_active') else disable_embedding_by_paragraph)(paragraph.get('id'))
+
else:
- ListenerManagement.embedding_by_paragraph_signal.send(paragraph.get('id'))
+ model_id = get_embedding_model_id_by_dataset_id(dataset_id)
+ embedding_by_paragraph(paragraph.get('id'), model_id)
return paragraph
@post(post_embedding)
@@ -485,7 +500,7 @@ def edit(self, instance: Dict):
# 校验前端 携带过来的id
for update_problem in update_problem_list:
if not set([str(row.id) for row in problem_list]).__contains__(update_problem.get('id')):
- raise AppApiException(500, update_problem.get('id') + '问题id不存在')
+ raise AppApiException(500, _('Problem id does not exist'))
# 对比需要删除的问题
delete_problem_list = list(filter(
lambda row: not [str(update_row.get('id')) for update_row in update_problem_list].__contains__(
@@ -507,7 +522,7 @@ def edit(self, instance: Dict):
_paragraph.save()
update_document_char_length(self.data.get('document_id'))
- return self.one(), instance
+ return self.one(), instance, self.data.get('dataset_id')
def get_problem_list(self):
ProblemParagraphMapping(ProblemParagraphMapping)
@@ -528,10 +543,11 @@ def delete(self, with_valid=False):
if with_valid:
self.is_valid(raise_exception=True)
paragraph_id = self.data.get('paragraph_id')
- QuerySet(Paragraph).filter(id=paragraph_id).delete()
- QuerySet(ProblemParagraphMapping).filter(paragraph_id=paragraph_id).delete()
+ Paragraph.objects.filter(id=paragraph_id).delete()
+ delete_problems_and_mappings([paragraph_id])
+
update_document_char_length(self.data.get('document_id'))
- ListenerManagement.delete_embedding_by_paragraph_signal.send(paragraph_id)
+ delete_embedding_by_paragraph(paragraph_id)
@staticmethod
def get_request_body_api():
@@ -544,20 +560,20 @@ def get_response_body_api():
@staticmethod
def get_request_params_api():
return [openapi.Parameter(type=openapi.TYPE_STRING, in_=openapi.IN_PATH, name='paragraph_id',
- description="段落id")]
+ description=_('paragraph id'))]
class Create(ApiMixin, serializers.Serializer):
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "文档id"))
+ _('document id')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if not QuerySet(Document).filter(id=self.data.get('document_id'),
dataset_id=self.data.get('dataset_id')).exists():
- raise AppApiException(500, "文档id不正确")
+ raise AppApiException(500, _('The document id is incorrect'))
def save(self, instance: Dict, with_valid=True, with_embedding=True):
if with_valid:
@@ -567,8 +583,10 @@ def save(self, instance: Dict, with_valid=True, with_embedding=True):
document_id = self.data.get('document_id')
paragraph_problem_model = self.get_paragraph_problem_model(dataset_id, document_id, instance)
paragraph = paragraph_problem_model.get('paragraph')
- problem_model_list = paragraph_problem_model.get('problem_model_list')
- problem_paragraph_mapping_list = paragraph_problem_model.get('problem_paragraph_mapping_list')
+ problem_paragraph_object_list = paragraph_problem_model.get('problem_paragraph_object_list')
+ problem_model_list, problem_paragraph_mapping_list = (ProblemParagraphManage(problem_paragraph_object_list,
+ dataset_id).
+ to_problem_model_list())
# 插入段落
paragraph_problem_model.get('paragraph').save()
# 插入問題
@@ -579,7 +597,8 @@ def save(self, instance: Dict, with_valid=True, with_embedding=True):
# 修改长度
update_document_char_length(document_id)
if with_embedding:
- ListenerManagement.embedding_by_paragraph_signal.send(str(paragraph.id))
+ model_id = get_embedding_model_id_by_dataset_id(dataset_id)
+ embedding_by_paragraph(str(paragraph.id), model_id)
return ParagraphSerializers.Operate(
data={'paragraph_id': str(paragraph.id), 'dataset_id': dataset_id, 'document_id': document_id}).one(
with_valid=True)
@@ -591,30 +610,12 @@ def get_paragraph_problem_model(dataset_id: str, document_id: str, instance: Dic
content=instance.get("content"),
dataset_id=dataset_id,
title=instance.get("title") if 'title' in instance else '')
- problem_list = instance.get('problem_list')
- exists_problem_list = []
- if 'problem_list' in instance and len(problem_list) > 0:
- exists_problem_list = QuerySet(Problem).filter(dataset_id=dataset_id,
- content__in=[p.get('content') for p in
- problem_list]).all()
-
- problem_model_list = [
- ParagraphSerializers.Create.or_get(exists_problem_list, problem.get('content'), dataset_id) for
- problem in (
- instance.get('problem_list') if 'problem_list' in instance else [])]
- # 问题去重
- problem_model_list = [x for i, x in enumerate(problem_model_list) if
- len([item for item in problem_model_list[:i] if item.content == x.content]) <= 0]
-
- problem_paragraph_mapping_list = [
- ProblemParagraphMapping(id=uuid.uuid1(), document_id=document_id, problem_id=problem_model.id,
- paragraph_id=paragraph.id,
- dataset_id=dataset_id) for
- problem_model in problem_model_list]
+ problem_paragraph_object_list = [
+ ProblemParagraphObject(dataset_id, document_id, paragraph.id, problem.get('content')) for problem in
+ (instance.get('problem_list') if 'problem_list' in instance else [])]
+
return {'paragraph': paragraph,
- 'problem_model_list': [problem_model for problem_model in problem_model_list if
- not list(exists_problem_list).__contains__(problem_model)],
- 'problem_paragraph_mapping_list': problem_paragraph_mapping_list}
+ 'problem_paragraph_object_list': problem_paragraph_object_list}
@staticmethod
def or_get(exists_problem_list, content, dataset_id):
@@ -634,22 +635,22 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
openapi.Parameter(name='document_id', in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description="文档id")
+ description=_('document id'))
]
class Query(ApiMixin, serializers.Serializer):
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "知识库id"))
+ _('dataset id')))
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
- "文档id"))
+ _('document id')))
title = serializers.CharField(required=False, error_messages=ErrMessage.char(
- "段落标题"))
+ _('section title')))
content = serializers.CharField(required=False)
@@ -662,6 +663,7 @@ def get_query_set(self):
**{'title__icontains': self.data.get('title')})
if 'content' in self.data:
query_set = query_set.filter(**{'content__icontains': self.data.get('content')})
+ query_set.order_by('-create_time', 'id')
return query_set
def list(self):
@@ -677,17 +679,17 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='文档id'),
+ description=_('document id')),
openapi.Parameter(name='title',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='标题'),
+ description=_('title')),
openapi.Parameter(name='content',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='内容')
+ description=_('content'))
]
@staticmethod
@@ -700,28 +702,67 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容",
- description="段落内容", default='段落内容'),
- 'title': openapi.Schema(type=openapi.TYPE_STRING, title="标题",
- description="标题", default="xxx的描述"),
- 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量",
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('content'),
+ description=_('content'), default=_('content')),
+ 'title': openapi.Schema(type=openapi.TYPE_STRING, title=_('title'),
+ description=_('title'), default="xxx"),
+ 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('hit num'), description=_('hit num'),
default=1),
- 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点赞数量",
- description="点赞数量", default=1),
- 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点踩数量",
- description="点踩数", default=1),
- 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id",
- description="知识库id", default='xxx'),
- 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id",
- description="文档id", default='xxx'),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用",
- description="是否可用", default=True),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of likes'),
+ description=_('Number of likes'), default=1),
+ 'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('Number of dislikes'),
+ description=_('Number of dislikes'), default=1),
+ 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'),
+ description=_('dataset id'), default='xxx'),
+ 'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('document id'),
+ description=_('document id'), default='xxx'),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active'), default=True),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
)
}
)
+
+ class BatchGenerateRelated(ApiMixin, serializers.Serializer):
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
+
+ def batch_generate_related(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ paragraph_id_list = instance.get("paragraph_id_list")
+ model_id = instance.get("model_id")
+ prompt = instance.get("prompt")
+ document_id = self.data.get('document_id')
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id__in=paragraph_id_list),
+ TaskType.GENERATE_PROBLEM,
+ State.PENDING)
+ ListenerManagement.get_aggregation_document_status(document_id)()
+ try:
+ generate_related_by_paragraph_id_list.delay(document_id, paragraph_id_list, model_id,
+ prompt)
+ except AlreadyQueued as e:
+ raise AppApiException(500, _('The task is being executed, please do not send it again.'))
+
+
+def delete_problems_and_mappings(paragraph_ids):
+ problem_paragraph_mappings = ProblemParagraphMapping.objects.filter(paragraph_id__in=paragraph_ids)
+ problem_ids = set(problem_paragraph_mappings.values_list('problem_id', flat=True))
+
+ if problem_ids:
+ problem_paragraph_mappings.delete()
+ remaining_problem_counts = ProblemParagraphMapping.objects.filter(problem_id__in=problem_ids).values(
+ 'problem_id').annotate(count=Count('problem_id'))
+ remaining_problem_ids = {pc['problem_id'] for pc in remaining_problem_counts}
+ problem_ids_to_delete = problem_ids - remaining_problem_ids
+ Problem.objects.filter(id__in=problem_ids_to_delete).delete()
+ else:
+ problem_paragraph_mappings.delete()
diff --git a/apps/dataset/serializers/problem_serializers.py b/apps/dataset/serializers/problem_serializers.py
index 5d00d5be4cc..c4b7a3c14cd 100644
--- a/apps/dataset/serializers/problem_serializers.py
+++ b/apps/dataset/serializers/problem_serializers.py
@@ -8,6 +8,7 @@
"""
import os
import uuid
+from functools import reduce
from typing import Dict, List
from django.db import transaction
@@ -16,12 +17,15 @@
from rest_framework import serializers
from common.db.search import native_search, native_page_search
-from common.event import ListenerManagement, UpdateProblemArgs
from common.mixins.api_mixin import ApiMixin
from common.util.field_message import ErrMessage
from common.util.file_util import get_file_content
-from dataset.models import Problem, Paragraph, ProblemParagraphMapping
+from dataset.models import Problem, Paragraph, ProblemParagraphMapping, DataSet
+from dataset.serializers.common_serializers import get_embedding_model_id_by_dataset_id
+from embedding.models import SourceType
+from embedding.task import delete_embedding_by_source_ids, update_problem_embedding, embedding_by_data_list
from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext_lazy as _
class ProblemSerializer(serializers.ModelSerializer):
@@ -32,9 +36,9 @@ class Meta:
class ProblemInstanceSerializer(ApiMixin, serializers.Serializer):
- id = serializers.CharField(required=False, error_messages=ErrMessage.char("问题id"))
+ id = serializers.CharField(required=False, error_messages=ErrMessage.char(_('problem id')))
- content = serializers.CharField(required=True, max_length=256, error_messages=ErrMessage.char("问题内容"))
+ content = serializers.CharField(required=True, max_length=256, error_messages=ErrMessage.char(_('content')))
@staticmethod
def get_request_body_api():
@@ -43,18 +47,48 @@ def get_request_body_api():
properties={
'id': openapi.Schema(
type=openapi.TYPE_STRING,
- title="问题id,修改的时候传递,创建的时候不传"),
+ title=_('Issue ID is passed when modifying, not when creating.')),
'content': openapi.Schema(
- type=openapi.TYPE_STRING, title="内容")
+ type=openapi.TYPE_STRING, title=_('content'),)
})
+class AssociationParagraph(serializers.Serializer):
+ paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('paragraph id')))
+ document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('document id')))
+
+
+class BatchAssociation(serializers.Serializer):
+ problem_id_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_('problem id list')),
+ child=serializers.UUIDField(required=True,
+ error_messages=ErrMessage.uuid(_('problem id'))))
+ paragraph_list = AssociationParagraph(many=True)
+
+
+def is_exits(exits_problem_paragraph_mapping_list, new_paragraph_mapping):
+ filter_list = [exits_problem_paragraph_mapping for exits_problem_paragraph_mapping in
+ exits_problem_paragraph_mapping_list if
+ str(exits_problem_paragraph_mapping.paragraph_id) == new_paragraph_mapping.paragraph_id
+ and str(exits_problem_paragraph_mapping.problem_id) == new_paragraph_mapping.problem_id
+ and str(exits_problem_paragraph_mapping.dataset_id) == new_paragraph_mapping.dataset_id]
+ return len(filter_list) > 0
+
+
+def to_problem_paragraph_mapping(problem, document_id: str, paragraph_id: str, dataset_id: str):
+ return ProblemParagraphMapping(id=uuid.uuid1(),
+ document_id=document_id,
+ paragraph_id=paragraph_id,
+ dataset_id=dataset_id,
+ problem_id=str(problem.id)), problem
+
+
class ProblemSerializers(ApiMixin, serializers.Serializer):
class Create(serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
- problem_list = serializers.ListField(required=True, error_messages=ErrMessage.list("问题列表"),
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
+ problem_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_('problem list')),
child=serializers.CharField(required=True,
- error_messages=ErrMessage.char("问题")))
+ max_length=256,
+ error_messages=ErrMessage.char(_('problem'))))
def batch(self, with_valid=True):
if with_valid:
@@ -75,8 +109,8 @@ def batch(self, with_valid=True):
return [ProblemSerializer(problem_instance).data for problem_instance in problem_instance_list]
class Query(serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
- content = serializers.CharField(required=False, error_messages=ErrMessage.char("问题"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
+ content = serializers.CharField(required=False, error_messages=ErrMessage.char(_('content')))
def get_query_set(self):
query_set = QuerySet(model=Problem)
@@ -98,7 +132,7 @@ def page(self, current_page, page_size):
os.path.join(PROJECT_DIR, "apps", "dataset", 'sql', 'list_problem.sql')))
class BatchOperate(serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
def delete(self, problem_id_list: List, with_valid=True):
if with_valid:
@@ -110,13 +144,54 @@ def delete(self, problem_id_list: List, with_valid=True):
source_ids = [row.id for row in problem_paragraph_mapping_list]
problem_paragraph_mapping_list.delete()
QuerySet(Problem).filter(id__in=problem_id_list).delete()
- ListenerManagement.delete_embedding_by_source_ids_signal.send(source_ids)
+ delete_embedding_by_source_ids(source_ids)
return True
+ def association(self, instance: Dict, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ BatchAssociation(data=instance).is_valid(raise_exception=True)
+ dataset_id = self.data.get('dataset_id')
+ paragraph_list = instance.get('paragraph_list')
+ problem_id_list = instance.get('problem_id_list')
+ problem_list = QuerySet(Problem).filter(id__in=problem_id_list)
+ exits_problem_paragraph_mapping = QuerySet(ProblemParagraphMapping).filter(problem_id__in=problem_id_list,
+ paragraph_id__in=[
+ p.get('paragraph_id')
+ for p in
+ paragraph_list])
+ problem_paragraph_mapping_list = [(problem_paragraph_mapping, problem) for
+ problem_paragraph_mapping, problem in reduce(lambda x, y: [*x, *y],
+ [[
+ to_problem_paragraph_mapping(
+ problem,
+ paragraph.get(
+ 'document_id'),
+ paragraph.get(
+ 'paragraph_id'),
+ dataset_id) for
+ paragraph in
+ paragraph_list]
+ for problem in
+ problem_list], []) if
+ not is_exits(exits_problem_paragraph_mapping, problem_paragraph_mapping)]
+ QuerySet(ProblemParagraphMapping).bulk_create(
+ [problem_paragraph_mapping for problem_paragraph_mapping, problem in problem_paragraph_mapping_list])
+ data_list = [{'text': problem.content,
+ 'is_active': True,
+ 'source_type': SourceType.PROBLEM,
+ 'source_id': str(problem_paragraph_mapping.id),
+ 'document_id': str(problem_paragraph_mapping.document_id),
+ 'paragraph_id': str(problem_paragraph_mapping.paragraph_id),
+ 'dataset_id': dataset_id,
+ } for problem_paragraph_mapping, problem in problem_paragraph_mapping_list]
+ model_id = get_embedding_model_id_by_dataset_id(self.data.get('dataset_id'))
+ embedding_by_data_list(data_list, model_id=model_id)
+
class Operate(serializers.Serializer):
- dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
+ dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('dataset id')))
- problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("问题id"))
+ problem_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('problem id')))
def list_paragraph(self, with_valid=True):
if with_valid:
@@ -145,7 +220,7 @@ def delete(self, with_valid=True):
source_ids = [row.id for row in problem_paragraph_mapping_list]
problem_paragraph_mapping_list.delete()
QuerySet(Problem).filter(id=self.data.get('problem_id')).delete()
- ListenerManagement.delete_embedding_by_source_ids_signal.send(source_ids)
+ delete_embedding_by_source_ids(source_ids)
return True
@transaction.atomic
@@ -157,6 +232,8 @@ def edit(self, instance: Dict, with_valid=True):
content = instance.get('content')
problem = QuerySet(Problem).filter(id=problem_id,
dataset_id=dataset_id).first()
+ QuerySet(DataSet).filter(id=dataset_id)
problem.content = content
problem.save()
- ListenerManagement.update_problem_signal.send(UpdateProblemArgs(problem_id, content))
+ model_id = get_embedding_model_id_by_dataset_id(dataset_id)
+ update_problem_embedding(problem_id, content, model_id)
diff --git a/apps/dataset/sql/list_document.sql b/apps/dataset/sql/list_document.sql
index 818d783c834..8b7891bf6e6 100644
--- a/apps/dataset/sql/list_document.sql
+++ b/apps/dataset/sql/list_document.sql
@@ -1,6 +1,11 @@
+SELECT * from (
SELECT
"document".* ,
to_json("document"."meta") as meta,
+ to_json("document"."status_meta") as status_meta,
(SELECT "count"("id") FROM "paragraph" WHERE document_id="document"."id") as "paragraph_count"
FROM
"document" "document"
+${document_custom_sql}
+) temp
+${order_by_query}
\ No newline at end of file
diff --git a/apps/dataset/sql/list_paragraph_document_name.sql b/apps/dataset/sql/list_paragraph_document_name.sql
new file mode 100644
index 00000000000..a95209bf5b8
--- /dev/null
+++ b/apps/dataset/sql/list_paragraph_document_name.sql
@@ -0,0 +1,5 @@
+SELECT
+ (SELECT "name" FROM "document" WHERE "id"=document_id) as document_name,
+ *
+FROM
+ "paragraph"
diff --git a/apps/dataset/sql/list_problem_mapping.sql b/apps/dataset/sql/list_problem_mapping.sql
new file mode 100644
index 00000000000..8c8ac3c3005
--- /dev/null
+++ b/apps/dataset/sql/list_problem_mapping.sql
@@ -0,0 +1,2 @@
+SELECT "problem"."content",problem_paragraph_mapping.paragraph_id FROM problem problem
+LEFT JOIN problem_paragraph_mapping problem_paragraph_mapping ON problem_paragraph_mapping.problem_id=problem."id"
\ No newline at end of file
diff --git a/apps/dataset/sql/update_document_char_length.sql b/apps/dataset/sql/update_document_char_length.sql
index a09c8cabba1..2781809b23d 100644
--- a/apps/dataset/sql/update_document_char_length.sql
+++ b/apps/dataset/sql/update_document_char_length.sql
@@ -1,4 +1,8 @@
UPDATE "document"
-SET "char_length" = ( SELECT "sum" ( "char_length" ( "content" ) ) FROM paragraph WHERE "document_id" = %s )
+SET "char_length" = ( SELECT CASE WHEN
+ "sum" ( "char_length" ( "content" ) ) IS NULL THEN
+ 0 ELSE "sum" ( "char_length" ( "content" ) )
+ END FROM paragraph WHERE "document_id" = %s ),
+ "update_time" = CURRENT_TIMESTAMP
WHERE
"id" = %s
\ No newline at end of file
diff --git a/apps/dataset/sql/update_document_status_meta.sql b/apps/dataset/sql/update_document_status_meta.sql
new file mode 100644
index 00000000000..6065931ff75
--- /dev/null
+++ b/apps/dataset/sql/update_document_status_meta.sql
@@ -0,0 +1,25 @@
+UPDATE "document" "document"
+SET status_meta = jsonb_set ( "document".status_meta, '{aggs}', tmp.status_meta )
+FROM
+ (
+ SELECT COALESCE
+ ( jsonb_agg ( jsonb_delete ( ( row_to_json ( record ) :: JSONB ), 'document_id' ) ), '[]' :: JSONB ) AS status_meta,
+ document_id AS document_id
+ FROM
+ (
+ SELECT
+ "paragraph".status,
+ "count" ( "paragraph"."id" ),
+ "document"."id" AS document_id
+ FROM
+ "document" "document"
+ LEFT JOIN "paragraph" "paragraph" ON "document"."id" = paragraph.document_id
+ ${document_custom_sql}
+ GROUP BY
+ "paragraph".status,
+ "document"."id"
+ ) record
+ GROUP BY
+ document_id
+ ) tmp
+WHERE "document".id="tmp".document_id
\ No newline at end of file
diff --git a/apps/dataset/sql/update_paragraph_status.sql b/apps/dataset/sql/update_paragraph_status.sql
new file mode 100644
index 00000000000..1e2fc6f0864
--- /dev/null
+++ b/apps/dataset/sql/update_paragraph_status.sql
@@ -0,0 +1,13 @@
+UPDATE "${table_name}"
+SET status = reverse (
+ SUBSTRING ( reverse ( LPAD( status, ${bit_number}, 'n' ) ) :: TEXT FROM 1 FOR ${up_index} ) || ${status_number} || SUBSTRING ( reverse ( LPAD( status, ${bit_number}, 'n' ) ) :: TEXT FROM ${next_index} )
+),
+status_meta = jsonb_set (
+ "${table_name}".status_meta,
+ '{state_time,${current_index}}',
+ jsonb_set (
+ COALESCE ( "${table_name}".status_meta #> '{state_time,${current_index}}', jsonb_build_object ( '${status_number}', '${current_time}' ) ),
+ '{${status_number}}',
+ CONCAT ( '"', '${current_time}', '"' ) :: JSONB
+ )
+ )
\ No newline at end of file
diff --git a/apps/dataset/swagger_api/document_api.py b/apps/dataset/swagger_api/document_api.py
index 637a7e5098a..eeac9cdc8c6 100644
--- a/apps/dataset/swagger_api/document_api.py
+++ b/apps/dataset/swagger_api/document_api.py
@@ -9,6 +9,7 @@
from drf_yasg import openapi
from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
class DocumentApi(ApiMixin):
@@ -19,10 +20,48 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
properties={
'id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
- title="主键id列表",
- description="主键id列表"),
- 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title="命中处理方式",
+ title=_('id list'),
+ description=_('id list')),
+ 'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title=_('hit handling method'),
description="directly_return|optimization"),
- 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title="直接返回相似度")
+ 'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('directly return similarity'))
+ }
+ )
+
+ class Cancel(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ properties={
+ 'type': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('task type'),
+ description=_('1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents'))
+ }
+ )
+
+ class BatchCancel(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ properties={
+ 'id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
+ title=_('id list'),
+ description=_('id list')),
+ 'type': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('task type'),
+ description=_('1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents'), default=1)
+ }
+ )
+
+ class EmbeddingState(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ properties={
+ 'state_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ items=openapi.Schema(type=openapi.TYPE_STRING),
+ title=_('state list'),
+ description=_('state list'))
}
)
diff --git a/apps/dataset/swagger_api/image_api.py b/apps/dataset/swagger_api/image_api.py
index f69b94719f2..f2124cced88 100644
--- a/apps/dataset/swagger_api/image_api.py
+++ b/apps/dataset/swagger_api/image_api.py
@@ -9,6 +9,7 @@
from drf_yasg import openapi
from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
class ImageApi(ApiMixin):
@@ -18,5 +19,5 @@ def get_request_params_api():
in_=openapi.IN_FORM,
type=openapi.TYPE_FILE,
required=True,
- description='上传图片文件')
+ description=_('image file'))
]
diff --git a/apps/dataset/swagger_api/problem_api.py b/apps/dataset/swagger_api/problem_api.py
index a7397aaaff5..da0256b35b6 100644
--- a/apps/dataset/swagger_api/problem_api.py
+++ b/apps/dataset/swagger_api/problem_api.py
@@ -9,6 +9,7 @@
from drf_yasg import openapi
from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
class ProblemApi(ApiMixin):
@@ -20,22 +21,52 @@ def get_response_body_api():
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
- 'content': openapi.Schema(type=openapi.TYPE_STRING, title="问题内容",
- description="问题内容", default='问题内容'),
- 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量",
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('content'),
+ description=_('content'), default=_('content')),
+ 'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('hit num'), description=_('hit num'),
default=1),
- 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id",
- description="知识库id", default='xxx'),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id'),
+ description=_('dataset id'), default='xxx'),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
)
}
)
+ class BatchAssociation(ApiMixin):
+ @staticmethod
+ def get_request_params_api():
+ return ProblemApi.BatchOperate.get_request_params_api()
+
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['problem_id_list'],
+ properties={
+ 'problem_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('problem id list'),
+ description=_('problem id list'),
+ items=openapi.Schema(type=openapi.TYPE_STRING)),
+ 'paragraph_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Associated paragraph information list'),
+ description=_('Associated paragraph information list'),
+ items=openapi.Schema(type=openapi.TYPE_OBJECT,
+ required=['paragraph_id', 'document_id'],
+ properties={
+ 'paragraph_id': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('paragraph id')),
+ 'document_id': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('document id'))
+ }))
+
+ }
+ )
+
class BatchOperate(ApiMixin):
@staticmethod
def get_request_params_api():
@@ -43,14 +74,14 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
]
@staticmethod
def get_request_body_api():
return openapi.Schema(
- title="问题id列表",
- description="问题id列表",
+ title=_('problem id list'),
+ description=_('problem id list'),
type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING)
)
@@ -62,12 +93,12 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
openapi.Parameter(name='problem_id',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='问题id')]
+ description=_('problem id'))]
@staticmethod
def get_request_body_api():
@@ -75,8 +106,8 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['content'],
properties={
- 'content': openapi.Schema(type=openapi.TYPE_STRING, title="问题内容",
- description="问题内容"),
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, title=_('content'),
+ description=_('content')),
}
)
@@ -92,17 +123,17 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['content'],
properties={
- 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title="分段内容",
- description="分段内容"),
- 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title="分段标题",
- description="分段标题"),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"),
- 'hit_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="命中次数", description="命中次数"),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
- description="修改时间",
+ 'content': openapi.Schema(type=openapi.TYPE_STRING, max_length=4096, title=_('content'),
+ description=_('content')),
+ 'title': openapi.Schema(type=openapi.TYPE_STRING, max_length=256, title=_('Section title'),
+ description=_('Section title')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), description=_('Is active')),
+ 'hit_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('Hit num'), description=_('Hit num')),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time'),
default="1970-01-01 00:00:00"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
- description="创建时间",
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time'),
default="1970-01-01 00:00:00"
),
}
@@ -115,12 +146,12 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id'),
+ description=_('dataset id')),
openapi.Parameter(name='content',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='问题')]
+ description=_('content')),]
class BatchCreate(ApiMixin):
@staticmethod
@@ -135,7 +166,7 @@ def get_request_params_api():
class Create(ApiMixin):
@staticmethod
def get_request_body_api():
- return openapi.Schema(type=openapi.TYPE_STRING, description="问题文本")
+ return openapi.Schema(type=openapi.TYPE_STRING, description=_('content'), title=_('content'))
@staticmethod
def get_request_params_api():
@@ -143,4 +174,4 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='知识库id')]
+ description=_('dataset id'))]
diff --git a/apps/dataset/task/__init__.py b/apps/dataset/task/__init__.py
new file mode 100644
index 00000000000..7bb1839d3aa
--- /dev/null
+++ b/apps/dataset/task/__init__.py
@@ -0,0 +1,10 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py
+ @date:2024/8/21 9:57
+ @desc:
+"""
+from .sync import *
+from .generate import *
diff --git a/apps/dataset/task/generate.py b/apps/dataset/task/generate.py
new file mode 100644
index 00000000000..53b0c71ff06
--- /dev/null
+++ b/apps/dataset/task/generate.py
@@ -0,0 +1,139 @@
+import logging
+import traceback
+
+from celery_once import QueueOnce
+from django.db.models import QuerySet
+from django.db.models.functions import Reverse, Substr
+from langchain_core.messages import HumanMessage
+
+from common.config.embedding_config import ModelManage
+from common.event import ListenerManagement
+from common.util.page_utils import page, page_desc
+from dataset.models import Paragraph, Document, Status, TaskType, State
+from dataset.task.tools import save_problem
+from ops import celery_app
+from setting.models import Model
+from setting.models_provider import get_model
+from django.utils.translation import gettext_lazy as _
+
+max_kb_error = logging.getLogger("max_kb_error")
+max_kb = logging.getLogger("max_kb")
+
+
+def get_llm_model(model_id):
+ model = QuerySet(Model).filter(id=model_id).first()
+ return ModelManage.get_model(model_id, lambda _id: get_model(model))
+
+
+def generate_problem_by_paragraph(paragraph, llm_model, prompt):
+ try:
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph.id), TaskType.GENERATE_PROBLEM,
+ State.STARTED)
+ res = llm_model.invoke(
+ [HumanMessage(content=prompt.replace('{data}', paragraph.content).replace('{title}', paragraph.title))])
+ if (res.content is None) or (len(res.content) == 0):
+ return
+ problems = res.content.split('\n')
+ for problem in problems:
+ save_problem(paragraph.dataset_id, paragraph.document_id, paragraph.id, problem)
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph.id), TaskType.GENERATE_PROBLEM,
+ State.SUCCESS)
+ except Exception as e:
+ ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph.id), TaskType.GENERATE_PROBLEM,
+ State.FAILURE)
+
+
+def get_generate_problem(llm_model, prompt, post_apply=lambda: None, is_the_task_interrupted=lambda: False):
+ def generate_problem(paragraph_list):
+ for paragraph in paragraph_list:
+ if is_the_task_interrupted():
+ return
+ generate_problem_by_paragraph(paragraph, llm_model, prompt)
+ post_apply()
+
+ return generate_problem
+
+
+def get_is_the_task_interrupted(document_id):
+ def is_the_task_interrupted():
+ document = QuerySet(Document).filter(id=document_id).first()
+ if document is None or Status(document.status)[TaskType.GENERATE_PROBLEM] == State.REVOKE:
+ return True
+ return False
+
+ return is_the_task_interrupted
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']},
+ name='celery:generate_related_by_dataset')
+def generate_related_by_dataset_id(dataset_id, model_id, prompt, state_list=None):
+ document_list = QuerySet(Document).filter(dataset_id=dataset_id)
+ for document in document_list:
+ try:
+ generate_related_by_document_id.delay(document.id, model_id, prompt, state_list)
+ except Exception as e:
+ pass
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['document_id']},
+ name='celery:generate_related_by_document')
+def generate_related_by_document_id(document_id, model_id, prompt, state_list=None):
+ if state_list is None:
+ state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value,
+ State.REVOKE.value,
+ State.REVOKED.value, State.IGNORED.value]
+ try:
+ is_the_task_interrupted = get_is_the_task_interrupted(document_id)
+ if is_the_task_interrupted():
+ return
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.GENERATE_PROBLEM,
+ State.STARTED)
+ llm_model = get_llm_model(model_id)
+
+ # 生成问题函数
+ generate_problem = get_generate_problem(llm_model, prompt,
+ ListenerManagement.get_aggregation_document_status(
+ document_id), is_the_task_interrupted)
+ query_set = QuerySet(Paragraph).annotate(
+ reversed_status=Reverse('status'),
+ task_type_status=Substr('reversed_status', TaskType.GENERATE_PROBLEM.value,
+ 1),
+ ).filter(task_type_status__in=state_list, document_id=document_id)
+ page_desc(query_set, 10, generate_problem, is_the_task_interrupted)
+ except Exception as e:
+ max_kb_error.error(f'根据文档生成问题:{document_id}出现错误{str(e)}{traceback.format_exc()}')
+ max_kb_error.error(_('Generate issue based on document: {document_id} error {error}{traceback}').format(
+ document_id=document_id, error=str(e), traceback=traceback.format_exc()))
+ finally:
+ ListenerManagement.post_update_document_status(document_id, TaskType.GENERATE_PROBLEM)
+ max_kb.info(_('End--->Generate problem: {document_id}').format(document_id=document_id))
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id_list']},
+ name='celery:generate_related_by_paragraph_list')
+def generate_related_by_paragraph_id_list(document_id, paragraph_id_list, model_id, prompt):
+ try:
+ is_the_task_interrupted = get_is_the_task_interrupted(document_id)
+ if is_the_task_interrupted():
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.GENERATE_PROBLEM,
+ State.REVOKED)
+ return
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id),
+ TaskType.GENERATE_PROBLEM,
+ State.STARTED)
+ llm_model = get_llm_model(model_id)
+ # 生成问题函数
+ generate_problem = get_generate_problem(llm_model, prompt, ListenerManagement.get_aggregation_document_status(
+ document_id))
+
+ def is_the_task_interrupted():
+ document = QuerySet(Document).filter(id=document_id).first()
+ if document is None or Status(document.status)[TaskType.GENERATE_PROBLEM] == State.REVOKE:
+ return True
+ return False
+
+ page(QuerySet(Paragraph).filter(id__in=paragraph_id_list), 10, generate_problem, is_the_task_interrupted)
+ finally:
+ ListenerManagement.post_update_document_status(document_id, TaskType.GENERATE_PROBLEM)
diff --git a/apps/dataset/task/sync.py b/apps/dataset/task/sync.py
new file mode 100644
index 00000000000..16add2db1e4
--- /dev/null
+++ b/apps/dataset/task/sync.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: sync.py
+ @date:2024/8/20 21:37
+ @desc:
+"""
+
+import logging
+import traceback
+from typing import List
+
+from celery_once import QueueOnce
+
+from common.util.fork import ForkManage, Fork
+from dataset.task.tools import get_save_handler, get_sync_web_document_handler, get_sync_handler
+
+from ops import celery_app
+from django.utils.translation import gettext_lazy as _
+
+max_kb_error = logging.getLogger("max_kb_error")
+max_kb = logging.getLogger("max_kb")
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, name='celery:sync_web_dataset')
+def sync_web_dataset(dataset_id: str, url: str, selector: str):
+ try:
+ max_kb.info(_('Start--->Start synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id))
+ ForkManage(url, selector.split(" ") if selector is not None else []).fork(2, set(),
+ get_save_handler(dataset_id,
+ selector))
+
+ max_kb.info(_('End--->End synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id))
+ except Exception as e:
+ max_kb_error.error(_('Synchronize web knowledge base:{dataset_id} error{error}{traceback}').format(
+ dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc()))
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, name='celery:sync_replace_web_dataset')
+def sync_replace_web_dataset(dataset_id: str, url: str, selector: str):
+ try:
+ max_kb.info(_('Start--->Start synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id))
+ ForkManage(url, selector.split(" ") if selector is not None else []).fork(2, set(),
+ get_sync_handler(dataset_id
+ ))
+ max_kb.info(_('End--->End synchronization web knowledge base:{dataset_id}').format(dataset_id=dataset_id))
+ except Exception as e:
+ max_kb_error.error(_('Synchronize web knowledge base:{dataset_id} error{error}{traceback}').format(
+ dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc()))
+
+
+@celery_app.task(name='celery:sync_web_document')
+def sync_web_document(dataset_id, source_url_list: List[str], selector: str):
+ handler = get_sync_web_document_handler(dataset_id)
+ for source_url in source_url_list:
+ try:
+ result = Fork(base_fork_url=source_url, selector_list=selector.split(' ')).fork()
+ handler(source_url, selector, result)
+ except Exception as e:
+ pass
diff --git a/apps/dataset/task/tools.py b/apps/dataset/task/tools.py
new file mode 100644
index 00000000000..84d3ac8d35f
--- /dev/null
+++ b/apps/dataset/task/tools.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: tools.py
+ @date:2024/8/20 21:48
+ @desc:
+"""
+
+import logging
+import re
+import traceback
+
+from django.db.models import QuerySet
+
+from common.util.fork import ChildLink, Fork
+from common.util.split_model import get_split_model
+from dataset.models import Type, Document, DataSet, Status
+from django.utils.translation import gettext_lazy as _
+
+max_kb_error = logging.getLogger("max_kb_error")
+max_kb = logging.getLogger("max_kb")
+
+
+def get_save_handler(dataset_id, selector):
+ from dataset.serializers.document_serializers import DocumentSerializers
+
+ def handler(child_link: ChildLink, response: Fork.Response):
+ if response.status == 200:
+ try:
+ document_name = child_link.tag.text if child_link.tag is not None and len(
+ child_link.tag.text.strip()) > 0 else child_link.url
+ paragraphs = get_split_model('web.md').parse(response.content)
+ DocumentSerializers.Create(data={'dataset_id': dataset_id}).save(
+ {'name': document_name, 'paragraphs': paragraphs,
+ 'meta': {'source_url': child_link.url, 'selector': selector},
+ 'type': Type.web}, with_valid=True)
+ except Exception as e:
+ logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
+
+ return handler
+
+
+def get_sync_handler(dataset_id):
+ from dataset.serializers.document_serializers import DocumentSerializers
+ dataset = QuerySet(DataSet).filter(id=dataset_id).first()
+
+ def handler(child_link: ChildLink, response: Fork.Response):
+ if response.status == 200:
+ try:
+
+ document_name = child_link.tag.text if child_link.tag is not None and len(
+ child_link.tag.text.strip()) > 0 else child_link.url
+ paragraphs = get_split_model('web.md').parse(response.content)
+ first = QuerySet(Document).filter(meta__source_url=child_link.url.strip(),
+ dataset=dataset).first()
+ if first is not None:
+ # 如果存在,使用文档同步
+ DocumentSerializers.Sync(data={'document_id': first.id}).sync()
+ else:
+ # 插入
+ DocumentSerializers.Create(data={'dataset_id': dataset.id}).save(
+ {'name': document_name, 'paragraphs': paragraphs,
+ 'meta': {'source_url': child_link.url.strip(), 'selector': dataset.meta.get('selector')},
+ 'type': Type.web}, with_valid=True)
+ except Exception as e:
+ logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
+
+ return handler
+
+
+def get_sync_web_document_handler(dataset_id):
+ from dataset.serializers.document_serializers import DocumentSerializers
+
+ def handler(source_url: str, selector, response: Fork.Response):
+ if response.status == 200:
+ try:
+ paragraphs = get_split_model('web.md').parse(response.content)
+ # 插入
+ DocumentSerializers.Create(data={'dataset_id': dataset_id}).save(
+ {'name': source_url[0:128], 'paragraphs': paragraphs,
+ 'meta': {'source_url': source_url, 'selector': selector},
+ 'type': Type.web}, with_valid=True)
+ except Exception as e:
+ logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
+ else:
+ Document(name=source_url[0:128],
+ dataset_id=dataset_id,
+ meta={'source_url': source_url, 'selector': selector},
+ type=Type.web,
+ char_length=0,
+ status=Status.error).save()
+
+ return handler
+
+
+def save_problem(dataset_id, document_id, paragraph_id, problem):
+ from dataset.serializers.paragraph_serializers import ParagraphSerializers
+ # print(f"dataset_id: {dataset_id}")
+ # print(f"document_id: {document_id}")
+ # print(f"paragraph_id: {paragraph_id}")
+ # print(f"problem: {problem}")
+ problem = re.sub(r"^\d+\.\s*", "", problem)
+ pattern = r"(.*?) "
+ match = re.search(pattern, problem)
+ problem = match.group(1) if match else None
+ if problem is None or len(problem) == 0:
+ return
+ try:
+ ParagraphSerializers.Problem(
+ data={"dataset_id": dataset_id, 'document_id': document_id,
+ 'paragraph_id': paragraph_id}).save(instance={"content": problem}, with_valid=True)
+ except Exception as e:
+ max_kb_error.error(_('Association problem failed {error}').format(error=str(e)))
diff --git a/apps/dataset/template/csv_template_en.csv b/apps/dataset/template/csv_template_en.csv
new file mode 100644
index 00000000000..7a036c37e6d
--- /dev/null
+++ b/apps/dataset/template/csv_template_en.csv
@@ -0,0 +1,5 @@
+Section title (optional), Section content (required,question answer), Question (optional,one per line in the cell)
+MaxKB product introduction,"MaxKB is a knowledge base question-answering system based on the LLM large language model. MaxKB = Max Knowledge Base,aims to become the most powerful brain of the enterprise。Out-of-the-box: supports direct document upload、automatic crawling of online documents、automatic text splitting and vectorization、and good intelligent question-answering interactive experience;Seamless embedding: supports zero-coding and rapid embedding into third-party business systems;Multi-model support: supports docking with mainstream large models,including Ollama local private large models (such as Llama 2、Llama 3、qwen)、Tongyi Qianwen、OpenAI、Azure OpenAI、Kimi、Zhipu AI、iFlytek Spark and Baidu Qianfan large models、etc.","What is MaxKB?
+MaxKB product introduction
+Large language model supported by MaxKB
+MaxKB advantages"
\ No newline at end of file
diff --git a/apps/dataset/template/csv_template_zh.csv b/apps/dataset/template/csv_template_zh.csv
new file mode 100644
index 00000000000..e9d9d8c3d32
--- /dev/null
+++ b/apps/dataset/template/csv_template_zh.csv
@@ -0,0 +1,8 @@
+分段标题(选填),分段内容(必填,问题答案)),问题(选填,单元格内一行一个)
+MaxKB产品介绍,"MaxKB 是一款基于 LLM 大语言模型的知识库问答系统。MaxKB = Max Knowledge Base,旨在成为企业的最强大脑。
+开箱即用:支持直接上传文档、自动爬取在线文档,支持文本自动拆分、向量化,智能问答交互体验好;
+无缝嵌入:支持零编码快速嵌入到第三方业务系统;
+多模型支持:支持对接主流的大模型,包括 Ollama 本地私有大模型(如 Llama 2、Llama 3、qwen)、通义千问、OpenAI、Azure OpenAI、Kimi、智谱 AI、讯飞星火和百度千帆大模型等。","MaxKB是什么?
+MaxKB产品介绍
+MaxKB支持的大语言模型
+MaxKB优势"
diff --git a/apps/dataset/template/csv_template_zh_Hant.csv b/apps/dataset/template/csv_template_zh_Hant.csv
new file mode 100644
index 00000000000..62eadb9947c
--- /dev/null
+++ b/apps/dataset/template/csv_template_zh_Hant.csv
@@ -0,0 +1,8 @@
+分段標題(選填),分段內容(必填,問題答案)),問題(選填,單元格內一行一個)
+MaxKB產品介紹,"MaxKB 是一款基於 LLM 大語言模型的知識庫問答系統。MaxKB = Max Knowledge Base,旨在成為企業的最強大大腦。
+開箱即用:支援直接上傳文檔、自動爬取線上文檔,支援文字自動分割、向量化,智慧問答互動體驗好;
+無縫嵌入:支援零編碼快速嵌入到第三方業務系統;
+多模型支援:支持對接主流的大模型,包括Ollama 本地私有大模型(如Llama 2、Llama 3、qwen)、通義千問、OpenAI、Azure OpenAI、Kimi、智譜AI、訊飛星火和百度千帆大模型等。 ","MaxKB是什麼?
+MaxKB產品介紹
+MaxKB支援的大語言模型
+MaxKB優勢"
\ No newline at end of file
diff --git a/apps/dataset/template/excel_template_en.xlsx b/apps/dataset/template/excel_template_en.xlsx
new file mode 100644
index 00000000000..26800ea0692
Binary files /dev/null and b/apps/dataset/template/excel_template_en.xlsx differ
diff --git a/apps/dataset/template/excel_template_zh.xlsx b/apps/dataset/template/excel_template_zh.xlsx
new file mode 100644
index 00000000000..fd896c18b67
Binary files /dev/null and b/apps/dataset/template/excel_template_zh.xlsx differ
diff --git a/apps/dataset/template/excel_template_zh_Hant.xlsx b/apps/dataset/template/excel_template_zh_Hant.xlsx
new file mode 100644
index 00000000000..5227b6963d0
Binary files /dev/null and b/apps/dataset/template/excel_template_zh_Hant.xlsx differ
diff --git a/apps/dataset/template/table_template_en.csv b/apps/dataset/template/table_template_en.csv
new file mode 100644
index 00000000000..bce374cd736
--- /dev/null
+++ b/apps/dataset/template/table_template_en.csv
@@ -0,0 +1,13 @@
+Position, Reimbursement type, First-tier city reimbursement standard (yuan), Second-tier city reimbursement standard (yuan), Third-tier city reimbursement standard (yuan)
+Ordinary employees, Accommodation expenses, 500, 400, 300
+Department head, Accommodation fee, 600, 500, 400
+Department director, Accommodation fee, 700, 600, 500
+Regional general manager, Accommodation fee, 800, 700, 600
+Ordinary employees, Food expenses, 50, 40, 30
+Department head, Food expenses, 50, 40, 30
+Department director, Food expenses, 50, 40, 30
+Regional general manager, Food expenses, 50, 40, 30
+Ordinary employees, Transportation expenses, 50, 40, 30
+Department head, Transportation expenses, 50, 40, 30
+Department director, Transportation expenses, 50, 40, 30
+Regional general manager, Transportation expenses, 50, 40, 30
\ No newline at end of file
diff --git a/apps/dataset/template/table_template_en.xlsx b/apps/dataset/template/table_template_en.xlsx
new file mode 100644
index 00000000000..1e445822664
Binary files /dev/null and b/apps/dataset/template/table_template_en.xlsx differ
diff --git a/apps/dataset/template/table_template_zh.csv b/apps/dataset/template/table_template_zh.csv
new file mode 100644
index 00000000000..7cf0f6306e0
--- /dev/null
+++ b/apps/dataset/template/table_template_zh.csv
@@ -0,0 +1,13 @@
+职务,报销类型,一线城市报销标准(元),二线城市报销标准(元),三线城市报销标准(元)
+普通员工,住宿费,500,400,300
+部门主管,住宿费,600,500,400
+部门总监,住宿费,700,600,500
+区域总经理,住宿费,800,700,600
+普通员工,伙食费,50,40,30
+部门主管,伙食费,50,40,30
+部门总监,伙食费,50,40,30
+区域总经理,伙食费,50,40,30
+普通员工,交通费,50,40,30
+部门主管,交通费,50,40,30
+部门总监,交通费,50,40,30
+区域总经理,交通费,50,40,30
diff --git a/apps/dataset/template/table_template_zh.xlsx b/apps/dataset/template/table_template_zh.xlsx
new file mode 100644
index 00000000000..2bc94a5b80d
Binary files /dev/null and b/apps/dataset/template/table_template_zh.xlsx differ
diff --git a/apps/dataset/template/table_template_zh_Hant.csv b/apps/dataset/template/table_template_zh_Hant.csv
new file mode 100644
index 00000000000..2e30ab49c33
--- /dev/null
+++ b/apps/dataset/template/table_template_zh_Hant.csv
@@ -0,0 +1,13 @@
+職務,報銷類型,一線城市報銷標準(元),二線城市報銷標準(元),三線城市報銷標準(元)
+普通員工,住宿費,500,400,300
+部門主管,住宿費,600,500,400
+部門總監,住宿費,700,600,500
+區域總經理,住宿費,800,700,600
+普通員工,伙食費,50,40,30
+部門主管,伙食費,50,40,30
+部門總監,伙食費,50,40,30
+區域總經理,伙食費,50,40,30
+普通員工,交通費,50,40,30
+部門主管,交通費,50,40,30
+部門總監,交通費,50,40,30
+區域總經理,交通費,50,40,30
\ No newline at end of file
diff --git a/apps/dataset/template/table_template_zh_Hant.xlsx b/apps/dataset/template/table_template_zh_Hant.xlsx
new file mode 100644
index 00000000000..53f34e4ed9d
Binary files /dev/null and b/apps/dataset/template/table_template_zh_Hant.xlsx differ
diff --git a/apps/dataset/urls.py b/apps/dataset/urls.py
index 237a81f5976..302b953ec36 100644
--- a/apps/dataset/urls.py
+++ b/apps/dataset/urls.py
@@ -6,16 +6,29 @@
urlpatterns = [
path('dataset', views.Dataset.as_view(), name="dataset"),
path('dataset/web', views.Dataset.CreateWebDataset.as_view()),
+ path('dataset/qa', views.Dataset.CreateQADataset.as_view()),
path('dataset/', views.Dataset.Operate.as_view(), name="dataset_key"),
+ path('dataset//export', views.Dataset.Export.as_view(), name="export"),
+ path('dataset//export_zip', views.Dataset.ExportZip.as_view(), name="export_zip"),
+ path('dataset//re_embedding', views.Dataset.Embedding.as_view(), name="dataset_key"),
+ path('dataset//generate_related', views.Dataset.GenerateRelated.as_view(),
+ name="dataset_generate_related"),
path('dataset//application', views.Dataset.Application.as_view()),
path('dataset//', views.Dataset.Page.as_view(), name="dataset"),
path('dataset//sync_web', views.Dataset.SyncWeb.as_view()),
path('dataset//hit_test', views.Dataset.HitTest.as_view()),
path('dataset//document', views.Document.as_view(), name='document'),
+ path('dataset//model', views.Dataset.Model.as_view()),
+ path('dataset/document/template/export', views.Template.as_view()),
+ path('dataset/document/table_template/export', views.TableTemplate.as_view()),
path('dataset//document/web', views.WebDocument.as_view()),
+ path('dataset//document/qa', views.QaDocument.as_view()),
+ path('dataset//document/table', views.TableDocument.as_view()),
path('dataset//document/_bach', views.Document.Batch.as_view()),
path('dataset//document/batch_hit_handling', views.Document.BatchEditHitHandling.as_view()),
path('dataset//document//', views.Document.Page.as_view()),
+ path('dataset//document/batch_refresh', views.Document.BatchRefresh.as_view()),
+ path('dataset//document/batch_generate_related', views.Document.BatchGenerateRelated.as_view()),
path('dataset//document/', views.Document.Operate.as_view(),
name="document_operate"),
path('dataset/document/split', views.Document.Split.as_view(),
@@ -23,14 +36,25 @@
path('dataset/document/split_pattern', views.Document.SplitPattern.as_view(),
name="document_operate"),
path('dataset//document/migrate/', views.Document.Migrate.as_view()),
+ path('dataset//document//export', views.Document.Export.as_view(),
+ name="document_export"),
+ path('dataset//document//export_zip', views.Document.ExportZip.as_view(),
+ name="document_export"),
+ path('dataset//document//sync', views.Document.SyncWeb.as_view()),
path('dataset//document//refresh', views.Document.Refresh.as_view()),
+ path('dataset//document//cancel_task', views.Document.CancelTask.as_view()),
+ path('dataset//document/cancel_task/_batch',
+ views.Document.CancelTask.Batch.as_view()),
path('dataset//document//paragraph', views.Paragraph.as_view()),
+ path('dataset//document/batch_generate_related', views.Document.BatchGenerateRelated.as_view()),
path(
'dataset//document//paragraph/migrate/dataset//document/',
views.Paragraph.BatchMigrate.as_view()),
path('dataset//document//paragraph/_batch', views.Paragraph.Batch.as_view()),
path('dataset//document//paragraph//',
views.Paragraph.Page.as_view(), name='paragraph_page'),
+ path('dataset//document//paragraph/batch_generate_related',
+ views.Paragraph.BatchGenerateRelated.as_view()),
path('dataset//document//paragraph/',
views.Paragraph.Operate.as_view()),
path('dataset//document//paragraph//problem',
@@ -47,5 +71,7 @@
path('dataset//problem/', views.Problem.Operate.as_view()),
path('dataset//problem//paragraph', views.Problem.Paragraph.as_view()),
path('image/', views.Image.Operate.as_view()),
- path('image', views.Image.as_view())
+ path('image', views.Image.as_view()),
+ path('file/', views.FileView.Operate.as_view()),
+ path('file', views.FileView.as_view())
]
diff --git a/apps/dataset/views/__init__.py b/apps/dataset/views/__init__.py
index 6b2abcfb16c..e434cec8622 100644
--- a/apps/dataset/views/__init__.py
+++ b/apps/dataset/views/__init__.py
@@ -11,3 +11,4 @@
from .paragraph import *
from .problem import *
from .image import *
+from .file import *
diff --git a/apps/dataset/views/common.py b/apps/dataset/views/common.py
new file mode 100644
index 00000000000..6637426e349
--- /dev/null
+++ b/apps/dataset/views/common.py
@@ -0,0 +1,56 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: common.py.py
+ @date:2025/3/25 15:43
+ @desc:
+"""
+from django.db.models import QuerySet
+
+from dataset.models import DataSet, Document
+
+
+def get_dataset_operation_object(dataset_id: str):
+ dataset_model = QuerySet(model=DataSet).filter(id=dataset_id).first()
+ if dataset_model is not None:
+ return {
+ "name": dataset_model.name,
+ "desc": dataset_model.desc,
+ "type": dataset_model.type,
+ "create_time": dataset_model.create_time,
+ "update_time": dataset_model.update_time
+ }
+ return {}
+
+
+def get_document_operation_object(document_id: str):
+ document_model = QuerySet(model=Document).filter(id=document_id).first()
+ if document_model is not None:
+ return {
+ "name": document_model.name,
+ "type": document_model.type,
+ }
+ return {}
+
+
+def get_document_operation_object_batch(document_id_list: str):
+ document_model_list = QuerySet(model=Document).filter(id__in=document_id_list)
+ if document_model_list is not None:
+ return {
+ "name": f'[{",".join([document_model.name for document_model in document_model_list])}]',
+ 'document_list': [{'name': document_model.name, 'type': document_model.type} for document_model in
+ document_model_list]
+ }
+ return {}
+
+
+def get_dataset_document_operation_object(dataset_dict: dict, document_dict: dict):
+ return {
+ 'name': f'{dataset_dict.get("name", "")}/{document_dict.get("name", "")}',
+ 'dataset_name': dataset_dict.get("name", ""),
+ 'dataset_desc': dataset_dict.get("desc", ""),
+ 'dataset_type': dataset_dict.get("type", ""),
+ 'document_name': document_dict.get("name", ""),
+ 'document_type': document_dict.get("type", ""),
+ }
diff --git a/apps/dataset/views/dataset.py b/apps/dataset/views/dataset.py
index d3720977b2f..40d9a0c6514 100644
--- a/apps/dataset/views/dataset.py
+++ b/apps/dataset/views/dataset.py
@@ -9,16 +9,23 @@
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
+from rest_framework.parsers import MultiPartParser
from rest_framework.views import APIView
from rest_framework.views import Request
+import dataset.models
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import PermissionConstants, CompareConstants, Permission, Group, Operate, \
ViewPermission, RoleConstants
+from common.log.log import log
from common.response import result
from common.response.result import get_page_request_params, get_page_api_response, get_api_response
from common.swagger_api.common_api import CommonApi
+from dataset.serializers.common_serializers import GenerateRelatedSerializer
from dataset.serializers.dataset_serializers import DataSetSerializers
+from dataset.views.common import get_dataset_operation_object
+from setting.serializers.provider_serializers import ModelSerializer
+from django.utils.translation import gettext_lazy as _
class Dataset(APIView):
@@ -28,34 +35,66 @@ class SyncWeb(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="同步Web站点知识库",
- operation_id="同步Web站点知识库",
+ @swagger_auto_schema(operation_summary=_("Synchronize the knowledge base of the website"),
+ operation_id=_("Synchronize the knowledge base of the website"),
manual_parameters=DataSetSerializers.SyncWeb.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库"])
+ tags=[_('Knowledge Base')])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=keywords.get('dataset_id'))],
compare=CompareConstants.AND), PermissionConstants.DATASET_EDIT,
compare=CompareConstants.AND)
+ @log(menu='Knowledge Base', operate="Synchronize the knowledge base of the website",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
def put(self, request: Request, dataset_id: str):
return result.success(DataSetSerializers.SyncWeb(
data={'sync_type': request.query_params.get('sync_type'), 'id': dataset_id,
'user_id': str(request.user.id)}).sync())
+ class CreateQADataset(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Create QA knowledge base"),
+ operation_id=_("Create QA knowledge base"),
+ manual_parameters=DataSetSerializers.Create.CreateQASerializers.get_request_params_api(),
+ responses=get_api_response(
+ DataSetSerializers.Create.CreateQASerializers.get_response_body_api()),
+ tags=[_('Knowledge Base')]
+ )
+ @has_permissions(PermissionConstants.DATASET_CREATE, compare=CompareConstants.AND)
+ @log(menu='Knowledge Base', operate="Create QA knowledge base",
+ get_operation_object=lambda r, keywords: {'name': r.data.get('name'), 'desc': r.data.get('desc'),
+ 'file_list': r.FILES.getlist('file')})
+ def post(self, request: Request):
+ return result.success(DataSetSerializers.Create(data={'user_id': request.user.id}).save_qa({
+ 'file_list': request.FILES.getlist('file'),
+ 'name': request.data.get('name'),
+ 'desc': request.data.get('desc')
+ }))
+
class CreateWebDataset(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建web站点知识库",
- operation_id="创建web站点知识库",
+ @swagger_auto_schema(operation_summary=_('Create a web site knowledge base'),
+ operation_id=_('Create a web site knowledge base'),
request_body=DataSetSerializers.Create.CreateWebSerializers.get_request_body_api(),
responses=get_api_response(
DataSetSerializers.Create.CreateWebSerializers.get_response_body_api()),
- tags=["知识库"]
+ tags=[_('Knowledge Base')]
)
@has_permissions(PermissionConstants.DATASET_CREATE, compare=CompareConstants.AND)
+ @log(menu='Knowledge Base', operate="Create a web site knowledge base",
+ get_operation_object=lambda r, keywords: {'name': r.data.get('name'), 'desc': r.data.get('desc'),
+ 'file_list': r.FILES.getlist('file'),
+ 'meta': {'source_url': r.data.get('source_url'),
+ 'selector': r.data.get('selector'),
+ 'embedding_mode_id': r.data.get('embedding_mode_id')}}
+ )
def post(self, request: Request):
return result.success(DataSetSerializers.Create(data={'user_id': request.user.id}).save_web(request.data))
@@ -63,36 +102,39 @@ class Application(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取知识库可用应用列表",
- operation_id="获取知识库可用应用列表",
+ @swagger_auto_schema(operation_summary=_('Get a list of applications available in the knowledge base'),
+ operation_id=_('Get a list of applications available in the knowledge base'),
manual_parameters=DataSetSerializers.Application.get_request_params_api(),
responses=result.get_api_array_response(
DataSetSerializers.Application.get_response_body_api()),
- tags=["知识库"])
+ tags=[_('Knowledge Base')])
def get(self, request: Request, dataset_id: str):
return result.success(DataSetSerializers.Operate(
data={'id': dataset_id, 'user_id': str(request.user.id)}).list_application())
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取知识库列表",
- operation_id="获取知识库列表",
+ @swagger_auto_schema(operation_summary=_('Get a list of knowledge bases'),
+ operation_id=_('Get a list of knowledge bases'),
manual_parameters=DataSetSerializers.Query.get_request_params_api(),
responses=result.get_api_array_response(DataSetSerializers.Query.get_response_body_api()),
- tags=["知识库"])
+ tags=[_('Knowledge Base')])
@has_permissions(PermissionConstants.DATASET_READ, compare=CompareConstants.AND)
def get(self, request: Request):
- d = DataSetSerializers.Query(data={**request.query_params, 'user_id': str(request.user.id)})
+ data = {key: str(value) for key, value in request.query_params.items()}
+ d = DataSetSerializers.Query(data={**data, 'user_id': str(request.user.id)})
d.is_valid()
return result.success(d.list())
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建知识库",
- operation_id="创建知识库",
+ @swagger_auto_schema(operation_summary=_('Create a knowledge base'),
+ operation_id=_('Create a knowledge base'),
request_body=DataSetSerializers.Create.get_request_body_api(),
responses=get_api_response(DataSetSerializers.Create.get_response_body_api()),
- tags=["知识库"]
+ tags=[_('Knowledge Base')]
)
@has_permissions(PermissionConstants.DATASET_CREATE, compare=CompareConstants.AND)
+ @log(menu='Knowledge Base', operate="Create a knowledge base",
+ get_operation_object=lambda r, keywords: {'name': r.data.get('name'), 'desc': r.data.get('desc')})
def post(self, request: Request):
return result.success(DataSetSerializers.Create(data={'user_id': request.user.id}).save(request.data))
@@ -100,10 +142,10 @@ class HitTest(APIView):
authentication_classes = [TokenAuth]
@action(methods="GET", detail=False)
- @swagger_auto_schema(operation_summary="命中测试列表", operation_id="命中测试列表",
+ @swagger_auto_schema(operation_summary=_('Hit test list'), operation_id=_('Hit test list'),
manual_parameters=CommonApi.HitTestApi.get_request_params_api(),
responses=result.get_api_array_response(CommonApi.HitTestApi.get_response_body_api()),
- tags=["知识库"])
+ tags=[_('Knowledge Base')])
@has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=keywords.get('dataset_id')))
def get(self, request: Request, dataset_id: str):
@@ -115,27 +157,96 @@ def get(self, request: Request, dataset_id: str):
'search_mode': request.query_params.get('search_mode')}).hit_test(
))
+ class Embedding(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods="PUT", detail=False)
+ @swagger_auto_schema(operation_summary=_('Re-vectorize'), operation_id=_('Re-vectorize'),
+ manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base')]
+ )
+ @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('dataset_id')))
+ @log(menu='Knowledge Base', operate="Re-vectorize",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
+ def put(self, request: Request, dataset_id: str):
+ return result.success(
+ DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).re_embedding())
+
+ class GenerateRelated(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Generate related'), operation_id=_('Generate related'),
+ manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
+ request_body=GenerateRelatedSerializer.get_request_body_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base')]
+ )
+ @log(menu='document', operate="Generate related documents",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))
+ )
+ def put(self, request: Request, dataset_id: str):
+ return result.success(
+ DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).generate_related(
+ request.data))
+
+ class Export(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods="GET", detail=False)
+ @swagger_auto_schema(operation_summary=_('Export knowledge base'), operation_id=_('Export knowledge base'),
+ manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
+ tags=[_('Knowledge Base')]
+ )
+ @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('dataset_id')))
+ @log(menu='Knowledge Base', operate="Export knowledge base",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
+ def get(self, request: Request, dataset_id: str):
+ return DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).export_excel()
+
+ class ExportZip(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods="GET", detail=False)
+ @swagger_auto_schema(operation_summary=_('Export knowledge base containing images'),
+ operation_id=_('Export knowledge base containing images'),
+ manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
+ tags=[_('Knowledge Base')]
+ )
+ @has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('dataset_id')))
+ @log(menu='Knowledge Base', operate="Export knowledge base containing images",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
+ def get(self, request: Request, dataset_id: str):
+ return DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).export_zip()
+
class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods="DELETE", detail=False)
- @swagger_auto_schema(operation_summary="删除知识库", operation_id="删除知识库",
+ @swagger_auto_schema(operation_summary=_('Delete knowledge base'), operation_id=_('Delete knowledge base'),
manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库"])
+ tags=[_('Knowledge Base')])
@has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=keywords.get('dataset_id')),
lambda r, k: Permission(group=Group.DATASET, operate=Operate.DELETE,
dynamic_tag=k.get('dataset_id')), compare=CompareConstants.AND)
+ @log(menu='Knowledge Base', operate="Delete knowledge base",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
def delete(self, request: Request, dataset_id: str):
operate = DataSetSerializers.Operate(data={'id': dataset_id})
return result.success(operate.delete())
@action(methods="GET", detail=False)
- @swagger_auto_schema(operation_summary="查询知识库详情根据知识库id", operation_id="查询知识库详情根据知识库id",
+ @swagger_auto_schema(operation_summary=_('Query knowledge base details based on knowledge base id'),
+ operation_id=_('Query knowledge base details based on knowledge base id'),
manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
responses=get_api_response(DataSetSerializers.Operate.get_response_body_api()),
- tags=["知识库"])
+ tags=[_('Knowledge Base')])
@has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=keywords.get('dataset_id')))
def get(self, request: Request, dataset_id: str):
@@ -143,14 +254,17 @@ def get(self, request: Request, dataset_id: str):
user_id=request.user.id))
@action(methods="PUT", detail=False)
- @swagger_auto_schema(operation_summary="修改知识库信息", operation_id="修改知识库信息",
+ @swagger_auto_schema(operation_summary=_('Modify knowledge base information'),
+ operation_id=_('Modify knowledge base information'),
manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
request_body=DataSetSerializers.Operate.get_request_body_api(),
responses=get_api_response(DataSetSerializers.Operate.get_response_body_api()),
- tags=["知识库"]
+ tags=[_('Knowledge Base')]
)
@has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=keywords.get('dataset_id')))
+ @log(menu='Knowledge Base', operate="Modify knowledge base information",
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
def put(self, request: Request, dataset_id: str):
return result.success(
DataSetSerializers.Operate(data={'id': dataset_id, 'user_id': request.user.id}).edit(request.data,
@@ -160,17 +274,34 @@ class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取知识库分页列表",
- operation_id="获取知识库分页列表",
+ @swagger_auto_schema(operation_summary=_('Get the knowledge base paginated list'),
+ operation_id=_('Get the knowledge base paginated list'),
manual_parameters=get_page_request_params(
DataSetSerializers.Query.get_request_params_api()),
responses=get_page_api_response(DataSetSerializers.Query.get_response_body_api()),
- tags=["知识库"]
+ tags=[_('Knowledge Base')]
)
@has_permissions(PermissionConstants.DATASET_READ, compare=CompareConstants.AND)
def get(self, request: Request, current_page, page_size):
d = DataSetSerializers.Query(
data={'name': request.query_params.get('name', None), 'desc': request.query_params.get("desc", None),
- 'user_id': str(request.user.id)})
+ 'user_id': str(request.user.id),
+ 'select_user_id': request.query_params.get('select_user_id', None)})
d.is_valid()
return result.success(d.page(current_page, page_size))
+
+ class Model(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=["GET"], detail=False)
+ @has_permissions(ViewPermission(
+ [RoleConstants.ADMIN, RoleConstants.USER],
+ [lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=keywords.get('dataset_id'))],
+ compare=CompareConstants.AND))
+ def get(self, request: Request, dataset_id: str):
+ return result.success(
+ ModelSerializer.Query(
+ data={'user_id': request.user.id, 'model_type': 'LLM'}).list(
+ with_valid=True)
+ )
diff --git a/apps/dataset/views/document.py b/apps/dataset/views/document.py
index a727a31fac7..05e741ad87a 100644
--- a/apps/dataset/views/document.py
+++ b/apps/dataset/views/document.py
@@ -7,6 +7,7 @@
@desc:
"""
+from django.utils.translation import gettext_lazy as _
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser
@@ -15,54 +16,140 @@
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import Permission, Group, Operate, CompareConstants
+from common.log.log import log
from common.response import result
from common.util.common import query_params_to_single_dict
from dataset.serializers.common_serializers import BatchSerializer
from dataset.serializers.document_serializers import DocumentSerializers, DocumentWebInstanceSerializer
from dataset.swagger_api.document_api import DocumentApi
+from dataset.views.common import get_dataset_document_operation_object, get_dataset_operation_object, \
+ get_document_operation_object_batch, get_document_operation_object
+
+
+class Template(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get QA template'),
+ operation_id=_('Get QA template'),
+ manual_parameters=DocumentSerializers.Export.get_request_params_api(),
+ tags=[_('Knowledge Base/Documentation')])
+ def get(self, request: Request):
+ return DocumentSerializers.Export(data={'type': request.query_params.get('type')}).export(with_valid=True)
+
+
+class TableTemplate(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get form template'),
+ operation_id=_('Get form template'),
+ manual_parameters=DocumentSerializers.Export.get_request_params_api(),
+ tags=[_('Knowledge Base/Documentation')])
+ def get(self, request: Request):
+ return DocumentSerializers.Export(data={'type': request.query_params.get('type')}).table_export(with_valid=True)
class WebDocument(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建Web站点文档",
- operation_id="创建Web站点文档",
+ @swagger_auto_schema(operation_summary=_('Create Web site documents'),
+ operation_id=_('Create Web site documents'),
request_body=DocumentWebInstanceSerializer.get_request_body_api(),
manual_parameters=DocumentSerializers.Create.get_request_params_api(),
responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Create Web site documents",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ {'name': f'[{",".join([url for url in r.data.get("source_url_list", [])])}]',
+ 'document_list': [{'name': url} for url in r.data.get("source_url_list", [])]}))
def post(self, request: Request, dataset_id: str):
return result.success(
DocumentSerializers.Create(data={'dataset_id': dataset_id}).save_web(request.data, with_valid=True))
+class QaDocument(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Import QA and create documentation'),
+ operation_id=_('Import QA and create documentation'),
+ manual_parameters=DocumentWebInstanceSerializer.get_request_params_api(),
+ responses=result.get_api_response(DocumentSerializers.Create.get_response_body_api()),
+ tags=[_('Knowledge Base/Documentation')])
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Import QA and create documentation",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ {'name': f'[{",".join([file.name for file in r.FILES.getlist("file")])}]',
+ 'document_list': [{'name': file.name} for file in r.FILES.getlist("file")]}))
+ def post(self, request: Request, dataset_id: str):
+ return result.success(
+ DocumentSerializers.Create(data={'dataset_id': dataset_id}).save_qa(
+ {'file_list': request.FILES.getlist('file')},
+ with_valid=True))
+
+
+class TableDocument(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Import tables and create documents'),
+ operation_id=_('Import tables and create documents'),
+ manual_parameters=DocumentWebInstanceSerializer.get_request_params_api(),
+ responses=result.get_api_response(DocumentSerializers.Create.get_response_body_api()),
+ tags=[_('Knowledge Base/Documentation')])
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Import tables and create documents",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ {'name': f'[{",".join([file.name for file in r.FILES.getlist("file")])}]',
+ 'document_list': [{'name': file.name} for file in r.FILES.getlist("file")]}))
+ def post(self, request: Request, dataset_id: str):
+ return result.success(
+ DocumentSerializers.Create(data={'dataset_id': dataset_id}).save_table(
+ {'file_list': request.FILES.getlist('file')},
+ with_valid=True))
+
+
class Document(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建文档",
- operation_id="创建文档",
+ @swagger_auto_schema(operation_summary=_('Create document'),
+ operation_id=_('Create document'),
request_body=DocumentSerializers.Create.get_request_body_api(),
manual_parameters=DocumentSerializers.Create.get_request_params_api(),
responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Create document",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ {'name': r.data.get('name')}))
def post(self, request: Request, dataset_id: str):
return result.success(
DocumentSerializers.Create(data={'dataset_id': dataset_id}).save(request.data, with_valid=True))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="文档列表",
- operation_id="文档列表",
+ @swagger_auto_schema(operation_summary=_('Document list'),
+ operation_id=_('Document list'),
manual_parameters=DocumentSerializers.Query.get_request_params_api(),
responses=result.get_api_response(DocumentSerializers.Query.get_response_body_api()),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -76,16 +163,20 @@ class BatchEditHitHandling(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="批量修改文档命中处理方式",
- operation_id="批量修改文档命中处理方式",
+ @swagger_auto_schema(operation_summary=_('Modify document hit processing methods in batches'),
+ operation_id=_('Modify document hit processing methods in batches'),
request_body=
DocumentApi.BatchEditHitHandlingApi.get_request_body_api(),
manual_parameters=DocumentSerializers.Create.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Modify document hit processing methods in batches",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data.get('id_list'))))
def put(self, request: Request, dataset_id: str):
return result.success(
DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_edit_hit_handling(request.data))
@@ -94,76 +185,195 @@ class Batch(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="批量创建文档",
- operation_id="批量创建文档",
+ @swagger_auto_schema(operation_summary=_('Create documents in batches'),
+ operation_id=_('Create documents in batches'),
request_body=
DocumentSerializers.Batch.get_request_body_api(),
manual_parameters=DocumentSerializers.Create.get_request_params_api(),
responses=result.get_api_array_response(
DocumentSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Create documents in batches",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ {'name': f'[{",".join([document.get("name") for document in r.data])}]',
+ 'document_list': r.data})
+ )
def post(self, request: Request, dataset_id: str):
return result.success(DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_save(request.data))
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="批量同步文档",
- operation_id="批量同步文档",
+ @swagger_auto_schema(operation_summary=_('Batch sync documents'),
+ operation_id=_('Batch sync documents'),
request_body=
BatchSerializer.get_request_body_api(),
manual_parameters=DocumentSerializers.Create.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Batch sync documents",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data.get('id_list')))
+ )
def put(self, request: Request, dataset_id: str):
return result.success(DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_sync(request.data))
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="批量删除文档",
- operation_id="批量删除文档",
+ @swagger_auto_schema(operation_summary=_('Delete documents in batches'),
+ operation_id=_('Delete documents in batches'),
request_body=
BatchSerializer.get_request_body_api(),
manual_parameters=DocumentSerializers.Create.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Delete documents in batches",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data.get('id_list'))))
def delete(self, request: Request, dataset_id: str):
return result.success(DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_delete(request.data))
+ class SyncWeb(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Synchronize web site types'),
+ operation_id=_('Synchronize web site types'),
+ manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base/Documentation')]
+ )
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Synchronize web site types",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ ))
+ def put(self, request: Request, dataset_id: str, document_id: str):
+ return result.success(
+ DocumentSerializers.Sync(data={'document_id': document_id, 'dataset_id': dataset_id}).sync(
+ ))
+
+ class CancelTask(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Cancel task'),
+ operation_id=_('Cancel task'),
+ manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
+ request_body=DocumentApi.Cancel.get_request_body_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base/Documentation')]
+ )
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Cancel task",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ ))
+ def put(self, request: Request, dataset_id: str, document_id: str):
+ return result.success(
+ DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).cancel(
+ request.data
+ ))
+
+ class Batch(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Cancel tasks in batches'),
+ operation_id=_('Cancel tasks in batches'),
+ request_body=DocumentApi.BatchCancel.get_request_body_api(),
+ manual_parameters=DocumentSerializers.Create.get_request_params_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base/Documentation')]
+ )
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Cancel tasks in batches",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data.get('id_list'))
+ )
+ )
+ def put(self, request: Request, dataset_id: str):
+ return result.success(
+ DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_cancel(request.data))
+
class Refresh(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="刷新文档向量库",
- operation_id="刷新文档向量库",
+ @swagger_auto_schema(operation_summary=_('Refresh document vector library'),
+ operation_id=_('Refresh document vector library'),
+ request_body=DocumentApi.EmbeddingState.get_request_body_api(),
manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档"]
+ tags=[_('Knowledge Base/Documentation')]
)
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Refresh document vector library",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def put(self, request: Request, dataset_id: str, document_id: str):
return result.success(
DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).refresh(
+ request.data.get('state_list')
))
+ class BatchRefresh(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Batch refresh document vector library'),
+ operation_id=_('Batch refresh document vector library'),
+ request_body=
+ DocumentApi.BatchEditHitHandlingApi.get_request_body_api(),
+ manual_parameters=DocumentSerializers.Create.get_request_params_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base/Documentation')])
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Batch refresh document vector library",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data.get('id_list'))
+ )
+ )
+ def put(self, request: Request, dataset_id: str):
+ return result.success(
+ DocumentSerializers.Batch(data={'dataset_id': dataset_id}).batch_refresh(request.data))
+
class Migrate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="批量迁移文档",
- operation_id="批量迁移文档",
+ @swagger_auto_schema(operation_summary=_('Migrate documents in batches'),
+ operation_id=_('Migrate documents in batches'),
manual_parameters=DocumentSerializers.Migrate.get_request_params_api(),
request_body=DocumentSerializers.Migrate.get_request_body_api(),
responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档"]
+ tags=[_('Knowledge Base/Documentation')]
)
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
@@ -172,6 +382,12 @@ class Migrate(APIView):
dynamic_tag=k.get('target_dataset_id')),
compare=CompareConstants.AND
)
+ @log(menu='document', operate="Migrate documents in batches",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data)
+ )
+ )
def put(self, request: Request, dataset_id: str, target_dataset_id: str):
return result.success(
DocumentSerializers.Migrate(
@@ -180,15 +396,55 @@ def put(self, request: Request, dataset_id: str, target_dataset_id: str):
))
+ class Export(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Export document'),
+ operation_id=_('Export document'),
+ manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
+ tags=[_('Knowledge Base/Documentation')])
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Export document",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
+ def get(self, request: Request, dataset_id: str, document_id: str):
+ return DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).export()
+
+ class ExportZip(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Export Zip document'),
+ operation_id=_('Export Zip document'),
+ manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
+ tags=[_('Knowledge Base/Documentation')])
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Export Zip document",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
+ def get(self, request: Request, dataset_id: str, document_id: str):
+ return DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).export_zip()
+
class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取文档详情",
- operation_id="获取文档详情",
+ @swagger_auto_schema(operation_summary=_('Get document details'),
+ operation_id=_('Get document details'),
manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -198,16 +454,22 @@ def get(self, request: Request, dataset_id: str, document_id: str):
return result.success(operate.one())
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改文档",
- operation_id="修改文档",
+ @swagger_auto_schema(operation_summary=_('Modify document'),
+ operation_id=_('Modify document'),
manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
request_body=DocumentSerializers.Operate.get_request_body_api(),
responses=result.get_api_response(DocumentSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档"]
+ tags=[_('Knowledge Base/Documentation')]
)
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Modify document",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def put(self, request: Request, dataset_id: str, document_id: str):
return result.success(
DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).edit(
@@ -215,37 +477,44 @@ def put(self, request: Request, dataset_id: str, document_id: str):
with_valid=True))
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除文档",
- operation_id="删除文档",
+ @swagger_auto_schema(operation_summary=_('Delete document'),
+ operation_id=_('Delete document'),
manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Delete document",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def delete(self, request: Request, dataset_id: str, document_id: str):
operate = DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id})
operate.is_valid(raise_exception=True)
return result.success(operate.delete())
class SplitPattern(APIView):
+ authentication_classes = [TokenAuth]
+
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取分段标识列表",
- operation_id="获取分段标识列表",
- tags=["知识库/文档"],
- security=[])
+ @swagger_auto_schema(operation_summary=_('Get a list of segment IDs'),
+ operation_id=_('Get a list of segment IDs'),
+ tags=[_('Knowledge Base/Documentation')])
def get(self, request: Request):
return result.success(DocumentSerializers.SplitPattern.list())
class Split(APIView):
+ authentication_classes = [TokenAuth]
parser_classes = [MultiPartParser]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="分段文档",
- operation_id="分段文档",
+ @swagger_auto_schema(operation_summary=_('Segmented document'),
+ operation_id=_('Segmented document'),
manual_parameters=DocumentSerializers.Split.get_request_params_api(),
- tags=["知识库/文档"],
- security=[])
+ tags=[_('Knowledge Base/Documentation')])
def post(self, request: Request):
split_data = {'file': request.FILES.getlist('file')}
request_data = request.data
@@ -265,11 +534,11 @@ class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取知识库分页列表",
- operation_id="获取知识库分页列表",
+ @swagger_auto_schema(operation_summary=_('Get the knowledge base paginated list'),
+ operation_id=_('Get the knowledge base paginated list'),
manual_parameters=DocumentSerializers.Query.get_request_params_api(),
responses=result.get_page_api_response(DocumentSerializers.Query.get_response_body_api()),
- tags=["知识库/文档"])
+ tags=[_('Knowledge Base/Documentation')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -278,3 +547,20 @@ def get(self, request: Request, dataset_id: str, current_page, page_size):
data={**query_params_to_single_dict(request.query_params), 'dataset_id': dataset_id})
d.is_valid(raise_exception=True)
return result.success(d.page(current_page, page_size))
+
+ class BatchGenerateRelated(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='document', operate="Batch generate related documents",
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object_batch(r.data.get('document_id_list'))
+ )
+ )
+ def put(self, request: Request, dataset_id: str):
+ return result.success(DocumentSerializers.BatchGenerateRelated(data={'dataset_id': dataset_id})
+ .batch_generate_related(request.data))
diff --git a/apps/dataset/views/file.py b/apps/dataset/views/file.py
new file mode 100644
index 00000000000..f2e1454bb6b
--- /dev/null
+++ b/apps/dataset/views/file.py
@@ -0,0 +1,46 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: image.py
+ @date:2024/4/22 16:23
+ @desc:
+"""
+from drf_yasg import openapi
+from drf_yasg.utils import swagger_auto_schema
+from rest_framework.decorators import action
+from rest_framework.parsers import MultiPartParser
+from rest_framework.views import APIView
+from rest_framework.views import Request
+
+from common.auth import TokenAuth
+from common.log.log import log
+from common.response import result
+from dataset.serializers.file_serializers import FileSerializer
+from django.utils.translation import gettext_lazy as _
+
+
+class FileView(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Upload file'),
+ operation_id=_('Upload file'),
+ manual_parameters=[openapi.Parameter(name='file',
+ in_=openapi.IN_FORM,
+ type=openapi.TYPE_FILE,
+ required=True,
+ description=_('Upload file'))],
+ tags=[_('file')])
+ @log(menu='file', operate='Upload file')
+ def post(self, request: Request):
+ return result.success(FileSerializer(data={'file': request.FILES.get('file')}).upload())
+
+ class Operate(APIView):
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get file'),
+ operation_id=_('Get file'),
+ tags=[_('file')])
+ def get(self, request: Request, file_id: str):
+ return FileSerializer.Operate(data={'id': file_id}).get()
diff --git a/apps/dataset/views/image.py b/apps/dataset/views/image.py
index 124336f877b..a165dbc1bc5 100644
--- a/apps/dataset/views/image.py
+++ b/apps/dataset/views/image.py
@@ -14,8 +14,10 @@
from rest_framework.views import Request
from common.auth import TokenAuth
+from common.log.log import log
from common.response import result
from dataset.serializers.image_serializers import ImageSerializer
+from django.utils.translation import gettext_lazy as _
class Image(APIView):
@@ -23,21 +25,21 @@ class Image(APIView):
parser_classes = [MultiPartParser]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="上传图片",
- operation_id="上传图片",
+ @swagger_auto_schema(operation_summary=_('Upload image'),
+ operation_id=_('Upload image'),
manual_parameters=[openapi.Parameter(name='file',
in_=openapi.IN_FORM,
type=openapi.TYPE_FILE,
required=True,
- description='上传文件')],
- tags=["图片"])
+ description=_('Upload image'))],
+ tags=[_('Image')])
def post(self, request: Request):
return result.success(ImageSerializer(data={'image': request.FILES.get('file')}).upload())
class Operate(APIView):
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取图片",
- operation_id="获取图片",
- tags=["图片"])
+ @swagger_auto_schema(operation_summary=_('Get Image'),
+ operation_id=_('Get Image'),
+ tags=[_('Image')])
def get(self, request: Request, image_id: str):
return ImageSerializer.Operate(data={'id': image_id}).get()
diff --git a/apps/dataset/views/paragraph.py b/apps/dataset/views/paragraph.py
index af968b8ab27..965f22f18bb 100644
--- a/apps/dataset/views/paragraph.py
+++ b/apps/dataset/views/paragraph.py
@@ -13,21 +13,26 @@
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import Permission, Group, Operate, CompareConstants
+from common.log.log import log
from common.response import result
from common.util.common import query_params_to_single_dict
from dataset.serializers.common_serializers import BatchSerializer
from dataset.serializers.paragraph_serializers import ParagraphSerializers
+from django.utils.translation import gettext_lazy as _
+
+from dataset.views import get_dataset_document_operation_object, get_dataset_operation_object, \
+ get_document_operation_object
class Paragraph(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="段落列表",
- operation_id="段落列表",
+ @swagger_auto_schema(operation_summary=_('Paragraph list'),
+ operation_id=_('Paragraph list'),
manual_parameters=ParagraphSerializers.Query.get_request_params_api(),
responses=result.get_api_array_response(ParagraphSerializers.Query.get_response_body_api()),
- tags=["知识库/文档/段落"]
+ tags=[_('Knowledge Base/Documentation/Paragraph')]
)
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
@@ -40,15 +45,21 @@ def get(self, request: Request, dataset_id: str, document_id: str):
return result.success(q.list())
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建段落",
- operation_id="创建段落",
+ @swagger_auto_schema(operation_summary=_('Create Paragraph'),
+ operation_id=_('Create Paragraph'),
manual_parameters=ParagraphSerializers.Create.get_request_params_api(),
request_body=ParagraphSerializers.Create.get_request_body_api(),
responses=result.get_api_response(ParagraphSerializers.Query.get_response_body_api()),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Create Paragraph',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def post(self, request: Request, dataset_id: str, document_id: str):
return result.success(
ParagraphSerializers.Create(data={'dataset_id': dataset_id, 'document_id': document_id}).save(request.data))
@@ -57,27 +68,33 @@ class Problem(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="添加关联问题",
- operation_id="添加段落关联问题",
+ @swagger_auto_schema(operation_summary=_('Add associated questions'),
+ operation_id=_('Add associated questions'),
manual_parameters=ParagraphSerializers.Problem.get_request_params_api(),
request_body=ParagraphSerializers.Problem.get_request_body_api(),
responses=result.get_api_response(ParagraphSerializers.Problem.get_response_body_api()),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Add associated questions',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def post(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str):
return result.success(ParagraphSerializers.Problem(
data={"dataset_id": dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id}).save(
request.data, with_valid=True))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取段落问题列表",
- operation_id="获取段落问题列表",
+ @swagger_auto_schema(operation_summary=_('Get a list of paragraph questions'),
+ operation_id=_('Get a list of paragraph questions'),
manual_parameters=ParagraphSerializers.Problem.get_request_params_api(),
responses=result.get_api_array_response(
ParagraphSerializers.Problem.get_response_body_api()),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -90,14 +107,20 @@ class UnAssociation(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="解除关联问题",
- operation_id="解除关联问题",
+ @swagger_auto_schema(operation_summary=_('Disassociation issue'),
+ operation_id=_('Disassociation issue'),
manual_parameters=ParagraphSerializers.Association.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Disassociation issue',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str, problem_id: str):
return result.success(ParagraphSerializers.Association(
data={'dataset_id': dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id,
@@ -107,14 +130,20 @@ class Association(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="关联问题",
- operation_id="关联问题",
+ @swagger_auto_schema(operation_summary=_('Related questions'),
+ operation_id=_('Related questions'),
manual_parameters=ParagraphSerializers.Association.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Related questions',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str, problem_id: str):
return result.success(ParagraphSerializers.Association(
data={'dataset_id': dataset_id, 'document_id': document_id, 'paragraph_id': paragraph_id,
@@ -124,15 +153,21 @@ class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['UPDATE'], detail=False)
- @swagger_auto_schema(operation_summary="修改段落数据",
- operation_id="修改段落数据",
+ @swagger_auto_schema(operation_summary=_('Modify paragraph data'),
+ operation_id=_('Modify paragraph data'),
manual_parameters=ParagraphSerializers.Operate.get_request_params_api(),
request_body=ParagraphSerializers.Operate.get_request_body_api(),
responses=result.get_api_response(ParagraphSerializers.Operate.get_response_body_api())
- , tags=["知识库/文档/段落"])
+ , tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Modify paragraph data',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str):
o = ParagraphSerializers.Operate(
data={"paragraph_id": paragraph_id, 'dataset_id': dataset_id, 'document_id': document_id})
@@ -140,11 +175,11 @@ def put(self, request: Request, dataset_id: str, document_id: str, paragraph_id:
return result.success(o.edit(request.data))
@action(methods=['UPDATE'], detail=False)
- @swagger_auto_schema(operation_summary="获取段落详情",
- operation_id="获取段落详情",
+ @swagger_auto_schema(operation_summary=_('Get paragraph details'),
+ operation_id=_('Get paragraph details'),
manual_parameters=ParagraphSerializers.Operate.get_request_params_api(),
responses=result.get_api_response(ParagraphSerializers.Operate.get_response_body_api()),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -155,14 +190,20 @@ def get(self, request: Request, dataset_id: str, document_id: str, paragraph_id:
return result.success(o.one())
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除段落",
- operation_id="删除段落",
+ @swagger_auto_schema(operation_summary=_('Delete paragraph'),
+ operation_id=_('Delete paragraph'),
manual_parameters=ParagraphSerializers.Operate.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Delete paragraph',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def delete(self, request: Request, dataset_id: str, document_id: str, paragraph_id: str):
o = ParagraphSerializers.Operate(
data={"dataset_id": dataset_id, 'document_id': document_id, "paragraph_id": paragraph_id})
@@ -173,16 +214,22 @@ class Batch(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="批量删除段落",
- operation_id="批量删除段落",
+ @swagger_auto_schema(operation_summary=_('Delete paragraphs in batches'),
+ operation_id=_('Delete paragraphs in batches'),
request_body=
BatchSerializer.get_request_body_api(),
manual_parameters=ParagraphSerializers.Create.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Delete paragraphs in batches',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def delete(self, request: Request, dataset_id: str, document_id: str):
return result.success(ParagraphSerializers.Batch(
data={"dataset_id": dataset_id, 'document_id': document_id}).batch_delete(request.data))
@@ -191,12 +238,12 @@ class BatchMigrate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="批量迁移段落",
- operation_id="批量迁移段落",
+ @swagger_auto_schema(operation_summary=_('Migrate paragraphs in batches'),
+ operation_id=_('Migrate paragraphs in batches'),
manual_parameters=ParagraphSerializers.Migrate.get_request_params_api(),
request_body=ParagraphSerializers.Migrate.get_request_body_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落"]
+ tags=[_('Knowledge Base/Documentation/Paragraph')]
)
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
@@ -205,6 +252,12 @@ class BatchMigrate(APIView):
dynamic_tag=k.get('target_dataset_id')),
compare=CompareConstants.AND
)
+ @log(menu='Paragraph', operate='Migrate paragraphs in batches',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
def put(self, request: Request, dataset_id: str, target_dataset_id: str, document_id: str, target_document_id):
return result.success(
ParagraphSerializers.Migrate(
@@ -217,12 +270,12 @@ class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="分页获取段落列表",
- operation_id="分页获取段落列表",
+ @swagger_auto_schema(operation_summary=_('Get paragraph list by pagination'),
+ operation_id=_('Get paragraph list by pagination'),
manual_parameters=result.get_page_request_params(
ParagraphSerializers.Query.get_request_params_api()),
responses=result.get_page_api_response(ParagraphSerializers.Query.get_response_body_api()),
- tags=["知识库/文档/段落"])
+ tags=[_('Knowledge Base/Documentation/Paragraph')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -232,3 +285,21 @@ def get(self, request: Request, dataset_id: str, document_id: str, current_page,
'document_id': document_id})
d.is_valid(raise_exception=True)
return result.success(d.page(current_page, page_size))
+
+ class BatchGenerateRelated(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='Paragraph', operate='Batch generate related',
+ get_operation_object=lambda r, keywords: get_dataset_document_operation_object(
+ get_dataset_operation_object(keywords.get('dataset_id')),
+ get_document_operation_object(keywords.get('document_id'))
+ )
+ )
+ def put(self, request: Request, dataset_id: str, document_id: str):
+ return result.success(
+ ParagraphSerializers.BatchGenerateRelated(data={'dataset_id': dataset_id, 'document_id': document_id})
+ .batch_generate_related(request.data))
diff --git a/apps/dataset/views/problem.py b/apps/dataset/views/problem.py
index beebcc67318..3619530f15f 100644
--- a/apps/dataset/views/problem.py
+++ b/apps/dataset/views/problem.py
@@ -13,21 +13,25 @@
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import Permission, Group, Operate
+from common.log.log import log
from common.response import result
from common.util.common import query_params_to_single_dict
from dataset.serializers.problem_serializers import ProblemSerializers
from dataset.swagger_api.problem_api import ProblemApi
+from django.utils.translation import gettext_lazy as _
+
+from dataset.views import get_dataset_operation_object
class Problem(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="问题列表",
- operation_id="问题列表",
+ @swagger_auto_schema(operation_summary=_('Question list'),
+ operation_id=_('Question list'),
manual_parameters=ProblemApi.Query.get_request_params_api(),
responses=result.get_api_array_response(ProblemApi.get_response_body_api()),
- tags=["知识库/文档/段落/问题"]
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')]
)
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
@@ -39,15 +43,18 @@ def get(self, request: Request, dataset_id: str):
return result.success(q.list())
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建问题",
- operation_id="创建问题",
+ @swagger_auto_schema(operation_summary=_('Create question'),
+ operation_id=_('Create question'),
manual_parameters=ProblemApi.BatchCreate.get_request_params_api(),
request_body=ProblemApi.BatchCreate.get_request_body_api(),
responses=result.get_api_response(ProblemApi.Query.get_response_body_api()),
- tags=["知识库/文档/段落/问题"])
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='problem', operate='Create question',
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id'))
+ )
def post(self, request: Request, dataset_id: str):
return result.success(
ProblemSerializers.Create(
@@ -57,11 +64,11 @@ class Paragraph(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取关联段落列表",
- operation_id="获取关联段落列表",
+ @swagger_auto_schema(operation_summary=_('Get a list of associated paragraphs'),
+ operation_id=_('Get a list of associated paragraphs'),
manual_parameters=ProblemApi.Paragraph.get_request_params_api(),
responses=result.get_api_array_response(ProblemApi.Paragraph.get_response_body_api()),
- tags=["知识库/文档/段落/问题"])
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
@@ -74,47 +81,69 @@ class OperateBatch(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="批量删除问题",
- operation_id="批量删除问题",
+ @swagger_auto_schema(operation_summary=_('Batch deletion issues'),
+ operation_id=_('Batch deletion issues'),
request_body=
ProblemApi.BatchOperate.get_request_body_api(),
manual_parameters=ProblemApi.BatchOperate.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落/问题"])
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='problem', operate='Batch deletion issues',
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
def delete(self, request: Request, dataset_id: str):
return result.success(
ProblemSerializers.BatchOperate(data={'dataset_id': dataset_id}).delete(request.data))
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Batch associated paragraphs'),
+ operation_id=_('Batch associated paragraphs'),
+ request_body=ProblemApi.BatchAssociation.get_request_body_api(),
+ manual_parameters=ProblemApi.BatchOperate.get_request_params_api(),
+ responses=result.get_default_response(),
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
+ @has_permissions(
+ lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
+ dynamic_tag=k.get('dataset_id')))
+ @log(menu='problem', operate='Batch associated paragraphs',
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
+ def post(self, request: Request, dataset_id: str):
+ return result.success(
+ ProblemSerializers.BatchOperate(data={'dataset_id': dataset_id}).association(request.data))
+
class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除问题",
- operation_id="删除问题",
+ @swagger_auto_schema(operation_summary=_('Delete question'),
+ operation_id=_('Delete question'),
manual_parameters=ProblemApi.Operate.get_request_params_api(),
responses=result.get_default_response(),
- tags=["知识库/文档/段落/问题"])
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='problem', operate='Delete question',
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
def delete(self, request: Request, dataset_id: str, problem_id: str):
return result.success(ProblemSerializers.Operate(
data={**query_params_to_single_dict(request.query_params), 'dataset_id': dataset_id,
'problem_id': problem_id}).delete())
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改问题",
- operation_id="修改问题",
+ @swagger_auto_schema(operation_summary=_('Modify question'),
+ operation_id=_('Modify question'),
manual_parameters=ProblemApi.Operate.get_request_params_api(),
request_body=ProblemApi.Operate.get_request_body_api(),
responses=result.get_api_response(ProblemApi.get_response_body_api()),
- tags=["知识库/文档/段落/问题"])
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
+ @log(menu='problem', operate='Modify question',
+ get_operation_object=lambda r, keywords: get_dataset_operation_object(keywords.get('dataset_id')))
def put(self, request: Request, dataset_id: str, problem_id: str):
return result.success(ProblemSerializers.Operate(
data={**query_params_to_single_dict(request.query_params), 'dataset_id': dataset_id,
@@ -124,12 +153,12 @@ class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="分页获取问题列表",
- operation_id="分页获取问题列表",
+ @swagger_auto_schema(operation_summary=_('Get the list of questions by page'),
+ operation_id=_('Get the list of questions by page'),
manual_parameters=result.get_page_request_params(
ProblemApi.Query.get_request_params_api()),
responses=result.get_page_api_response(ProblemApi.get_response_body_api()),
- tags=["知识库/文档/段落/问题"])
+ tags=[_('Knowledge Base/Documentation/Paragraph/Question')])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=k.get('dataset_id')))
diff --git a/apps/embedding/migrations/0003_alter_embedding_unique_together.py b/apps/embedding/migrations/0003_alter_embedding_unique_together.py
new file mode 100644
index 00000000000..9cb45061bfa
--- /dev/null
+++ b/apps/embedding/migrations/0003_alter_embedding_unique_together.py
@@ -0,0 +1,17 @@
+# Generated by Django 4.2.14 on 2024-07-23 18:14
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('embedding', '0002_embedding_search_vector'),
+ ]
+
+ operations = [
+ migrations.AlterUniqueTogether(
+ name='embedding',
+ unique_together=set(),
+ ),
+ ]
diff --git a/apps/embedding/models/embedding.py b/apps/embedding/models/embedding.py
index 24c78f41f05..5f954e36b6e 100644
--- a/apps/embedding/models/embedding.py
+++ b/apps/embedding/models/embedding.py
@@ -50,4 +50,3 @@ class Embedding(models.Model):
class Meta:
db_table = "embedding"
- unique_together = ['source_id', 'source_type']
diff --git a/apps/embedding/task/__init__.py b/apps/embedding/task/__init__.py
new file mode 100644
index 00000000000..e5e7dd3b408
--- /dev/null
+++ b/apps/embedding/task/__init__.py
@@ -0,0 +1 @@
+from .embedding import *
diff --git a/apps/embedding/task/embedding.py b/apps/embedding/task/embedding.py
new file mode 100644
index 00000000000..3b26bd7a1db
--- /dev/null
+++ b/apps/embedding/task/embedding.py
@@ -0,0 +1,261 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/8/19 14:13
+ @desc:
+"""
+import logging
+import traceback
+from typing import List
+
+from celery_once import QueueOnce
+from django.db.models import QuerySet
+
+from common.config.embedding_config import ModelManage
+from common.event import ListenerManagement, UpdateProblemArgs, UpdateEmbeddingDatasetIdArgs, \
+ UpdateEmbeddingDocumentIdArgs
+from dataset.models import Document, TaskType, State
+from ops import celery_app
+from setting.models import Model
+from setting.models_provider import get_model
+from django.utils.translation import gettext_lazy as _
+
+max_kb_error = logging.getLogger("max_kb_error")
+max_kb = logging.getLogger("max_kb")
+
+
+def get_embedding_model(model_id, exception_handler=lambda e: max_kb_error.error(
+ _('Failed to obtain vector model: {error} {traceback}').format(
+ error=str(e),
+ traceback=traceback.format_exc()
+ ))):
+ try:
+ model = QuerySet(Model).filter(id=model_id).first()
+ embedding_model = ModelManage.get_model(model_id,
+ lambda _id: get_model(model))
+ except Exception as e:
+ exception_handler(e)
+ raise e
+ return embedding_model
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id']}, name='celery:embedding_by_paragraph')
+def embedding_by_paragraph(paragraph_id, model_id):
+ embedding_model = get_embedding_model(model_id)
+ ListenerManagement.embedding_by_paragraph(paragraph_id, embedding_model)
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id_list']}, name='celery:embedding_by_paragraph_data_list')
+def embedding_by_paragraph_data_list(data_list, paragraph_id_list, model_id):
+ embedding_model = get_embedding_model(model_id)
+ ListenerManagement.embedding_by_paragraph_data_list(data_list, paragraph_id_list, embedding_model)
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['paragraph_id_list']}, name='celery:embedding_by_paragraph_list')
+def embedding_by_paragraph_list(paragraph_id_list, model_id):
+ embedding_model = get_embedding_model(model_id)
+ ListenerManagement.embedding_by_paragraph_list(paragraph_id_list, embedding_model)
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['document_id']}, name='celery:embedding_by_document')
+def embedding_by_document(document_id, model_id, state_list=None):
+ """
+ 向量化文档
+ @param state_list:
+ @param document_id: 文档id
+ @param model_id 向量模型
+ :return: None
+ """
+
+ if state_list is None:
+ state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value,
+ State.REVOKE.value,
+ State.REVOKED.value, State.IGNORED.value]
+
+ def exception_handler(e):
+ ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
+ State.FAILURE)
+ max_kb_error.error(
+ _('Failed to obtain vector model: {error} {traceback}').format(
+ error=str(e),
+ traceback=traceback.format_exc()
+ ))
+
+ embedding_model = get_embedding_model(model_id, exception_handler)
+ ListenerManagement.embedding_by_document(document_id, embedding_model, state_list)
+
+
+@celery_app.task(name='celery:embedding_by_document_list')
+def embedding_by_document_list(document_id_list, model_id):
+ """
+ 向量化文档
+ @param document_id_list: 文档id列表
+ @param model_id 向量模型
+ :return: None
+ """
+ for document_id in document_id_list:
+ embedding_by_document.delay(document_id, model_id)
+
+
+@celery_app.task(base=QueueOnce, once={'keys': ['dataset_id']}, name='celery:embedding_by_dataset')
+def embedding_by_dataset(dataset_id, model_id):
+ """
+ 向量化知识库
+ @param dataset_id: 知识库id
+ @param model_id 向量模型
+ :return: None
+ """
+ max_kb.info(_('Start--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id))
+ try:
+ ListenerManagement.delete_embedding_by_dataset(dataset_id)
+ document_list = QuerySet(Document).filter(dataset_id=dataset_id)
+ max_kb.info(_('Dataset documentation: {document_names}').format(
+ document_names=", ".join([d.name for d in document_list])))
+ for document in document_list:
+ try:
+ embedding_by_document.delay(document.id, model_id)
+ except Exception as e:
+ pass
+ except Exception as e:
+ max_kb_error.error(
+ _('Vectorized dataset: {dataset_id} error {error} {traceback}'.format(dataset_id=dataset_id,
+ error=str(e),
+ traceback=traceback.format_exc())))
+ finally:
+ max_kb.info(_('End--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id))
+
+
+def embedding_by_problem(args, model_id):
+ """
+ 向量话问题
+ @param args: 问题对象
+ @param model_id: 模型id
+ @return:
+ """
+ embedding_model = get_embedding_model(model_id)
+ ListenerManagement.embedding_by_problem(args, embedding_model)
+
+
+def embedding_by_data_list(args: List, model_id):
+ embedding_model = get_embedding_model(model_id)
+ ListenerManagement.embedding_by_data_list(args, embedding_model)
+
+
+def delete_embedding_by_document(document_id):
+ """
+ 删除指定文档id的向量
+ @param document_id: 文档id
+ @return: None
+ """
+
+ ListenerManagement.delete_embedding_by_document(document_id)
+
+
+def delete_embedding_by_document_list(document_id_list: List[str]):
+ """
+ 删除指定文档列表的向量数据
+ @param document_id_list: 文档id列表
+ @return: None
+ """
+ ListenerManagement.delete_embedding_by_document_list(document_id_list)
+
+
+def delete_embedding_by_dataset(dataset_id):
+ """
+ 删除指定数据集向量数据
+ @param dataset_id: 数据集id
+ @return: None
+ """
+ ListenerManagement.delete_embedding_by_dataset(dataset_id)
+
+
+def delete_embedding_by_paragraph(paragraph_id):
+ """
+ 删除指定段落的向量数据
+ @param paragraph_id: 段落id
+ @return: None
+ """
+ ListenerManagement.delete_embedding_by_paragraph(paragraph_id)
+
+
+def delete_embedding_by_source(source_id):
+ """
+ 删除指定资源id的向量数据
+ @param source_id: 资源id
+ @return: None
+ """
+ ListenerManagement.delete_embedding_by_source(source_id)
+
+
+def disable_embedding_by_paragraph(paragraph_id):
+ """
+ 禁用某个段落id的向量
+ @param paragraph_id: 段落id
+ @return: None
+ """
+ ListenerManagement.disable_embedding_by_paragraph(paragraph_id)
+
+
+def enable_embedding_by_paragraph(paragraph_id):
+ """
+ 开启某个段落id的向量数据
+ @param paragraph_id: 段落id
+ @return: None
+ """
+ ListenerManagement.enable_embedding_by_paragraph(paragraph_id)
+
+
+def delete_embedding_by_source_ids(source_ids: List[str]):
+ """
+ 删除向量根据source_id_list
+ @param source_ids:
+ @return:
+ """
+ ListenerManagement.delete_embedding_by_source_ids(source_ids)
+
+
+def update_problem_embedding(problem_id: str, problem_content: str, model_id):
+ """
+ 更新问题
+ @param problem_id:
+ @param problem_content:
+ @param model_id:
+ @return:
+ """
+ model = get_embedding_model(model_id)
+ ListenerManagement.update_problem(UpdateProblemArgs(problem_id, problem_content, model))
+
+
+def update_embedding_dataset_id(paragraph_id_list, target_dataset_id):
+ """
+ 修改向量数据到指定知识库
+ @param paragraph_id_list: 指定段落的向量数据
+ @param target_dataset_id: 知识库id
+ @return:
+ """
+
+ ListenerManagement.update_embedding_dataset_id(
+ UpdateEmbeddingDatasetIdArgs(paragraph_id_list, target_dataset_id))
+
+
+def delete_embedding_by_paragraph_ids(paragraph_ids: List[str]):
+ """
+ 删除指定段落列表的向量数据
+ @param paragraph_ids: 段落列表
+ @return: None
+ """
+ ListenerManagement.delete_embedding_by_paragraph_ids(paragraph_ids)
+
+
+def update_embedding_document_id(paragraph_id_list, target_document_id, target_dataset_id,
+ target_embedding_model_id=None):
+ target_embedding_model = get_embedding_model(
+ target_embedding_model_id) if target_embedding_model_id is not None else None
+ ListenerManagement.update_embedding_document_id(
+ UpdateEmbeddingDocumentIdArgs(paragraph_id_list, target_document_id, target_dataset_id, target_embedding_model))
+
+
+def delete_embedding_by_dataset_id_list(dataset_id_list):
+ ListenerManagement.delete_embedding_by_dataset_id_list(dataset_id_list)
diff --git a/apps/embedding/vector/base_vector.py b/apps/embedding/vector/base_vector.py
index 2bfd0e977d5..0aadef6c183 100644
--- a/apps/embedding/vector/base_vector.py
+++ b/apps/embedding/vector/base_vector.py
@@ -8,17 +8,31 @@
"""
import threading
from abc import ABC, abstractmethod
+from functools import reduce
from typing import List, Dict
-from langchain_community.embeddings import HuggingFaceEmbeddings
+from langchain_core.embeddings import Embeddings
-from common.config.embedding_config import EmbeddingModel
+from common.chunk import text_to_chunk
from common.util.common import sub_array
from embedding.models import SourceType, SearchMode
lock = threading.Lock()
+def chunk_data(data: Dict):
+ if str(data.get('source_type')) == SourceType.PARAGRAPH.value:
+ text = data.get('text')
+ chunk_list = text_to_chunk(text)
+ return [{**data, 'text': chunk} for chunk in chunk_list]
+ return [data]
+
+
+def chunk_data_list(data_list: List[Dict]):
+ result = [chunk_data(data) for data in data_list]
+ return reduce(lambda x, y: [*x, *y], result, [])
+
+
class BaseVectorStore(ABC):
vector_exists = False
@@ -51,7 +65,7 @@ def save_pre_handler(self):
def save(self, text, source_type: SourceType, dataset_id: str, document_id: str, paragraph_id: str, source_id: str,
is_active: bool,
- embedding=None):
+ embedding: Embeddings):
"""
插入向量数据
:param source_id: 资源id
@@ -64,52 +78,50 @@ def save(self, text, source_type: SourceType, dataset_id: str, document_id: str,
:param paragraph_id 段落id
:return: bool
"""
-
- if embedding is None:
- embedding = EmbeddingModel.get_embedding_model()
self.save_pre_handler()
- self._save(text, source_type, dataset_id, document_id, paragraph_id, source_id, is_active, embedding)
-
- def batch_save(self, data_list: List[Dict], embedding=None):
- # 获取锁
- lock.acquire()
- try:
- """
- 批量插入
- :param data_list: 数据列表
- :param embedding: 向量化处理器
- :return: bool
- """
- if embedding is None:
- embedding = EmbeddingModel.get_embedding_model()
- self.save_pre_handler()
- result = sub_array(data_list)
- for child_array in result:
- self._batch_save(child_array, embedding)
- finally:
- # 释放锁
- lock.release()
- return True
+ data = {'document_id': document_id, 'paragraph_id': paragraph_id, 'dataset_id': dataset_id,
+ 'is_active': is_active, 'source_id': source_id, 'source_type': source_type, 'text': text}
+ chunk_list = chunk_data(data)
+ result = sub_array(chunk_list)
+ for child_array in result:
+ self._batch_save(child_array, embedding, lambda: False)
+
+ def batch_save(self, data_list: List[Dict], embedding: Embeddings, is_the_task_interrupted):
+ """
+ 批量插入
+ @param data_list: 数据列表
+ @param embedding: 向量化处理器
+ @param is_the_task_interrupted: 判断是否中断任务
+ :return: bool
+ """
+ self.save_pre_handler()
+ chunk_list = chunk_data_list(data_list)
+ result = sub_array(chunk_list)
+ for child_array in result:
+ if not is_the_task_interrupted():
+ self._batch_save(child_array, embedding, is_the_task_interrupted)
+ else:
+ break
@abstractmethod
def _save(self, text, source_type: SourceType, dataset_id: str, document_id: str, paragraph_id: str, source_id: str,
is_active: bool,
- embedding: HuggingFaceEmbeddings):
+ embedding: Embeddings):
pass
@abstractmethod
- def _batch_save(self, text_list: List[Dict], embedding: HuggingFaceEmbeddings):
+ def _batch_save(self, text_list: List[Dict], embedding: Embeddings, is_the_task_interrupted):
pass
def search(self, query_text, dataset_id_list: list[str], exclude_document_id_list: list[str],
exclude_paragraph_list: list[str],
is_active: bool,
- embedding: HuggingFaceEmbeddings):
+ embedding: Embeddings):
if dataset_id_list is None or len(dataset_id_list) == 0:
return []
embedding_query = embedding.embed_query(query_text)
result = self.query(embedding_query, dataset_id_list, exclude_document_id_list, exclude_paragraph_list,
- is_active, 1, 0.65)
+ is_active, 1, 3, 0.65)
return result[0]
@abstractmethod
@@ -123,7 +135,7 @@ def query(self, query_text: str, query_embedding: List[float], dataset_id_list:
def hit_test(self, query_text, dataset_id: list[str], exclude_document_id_list: list[str], top_number: int,
similarity: float,
search_mode: SearchMode,
- embedding: HuggingFaceEmbeddings):
+ embedding: Embeddings):
pass
@abstractmethod
@@ -142,14 +154,6 @@ def update_by_source_id(self, source_id: str, instance: Dict):
def update_by_source_ids(self, source_ids: List[str], instance: Dict):
pass
- @abstractmethod
- def embed_documents(self, text_list: List[str]):
- pass
-
- @abstractmethod
- def embed_query(self, text: str):
- pass
-
@abstractmethod
def delete_by_dataset_id(self, dataset_id: str):
pass
@@ -159,7 +163,7 @@ def delete_by_document_id(self, document_id: str):
pass
@abstractmethod
- def delete_bu_document_id_list(self, document_id_list: List[str]):
+ def delete_by_document_id_list(self, document_id_list: List[str]):
pass
@abstractmethod
diff --git a/apps/embedding/vector/pg_vector.py b/apps/embedding/vector/pg_vector.py
index 5c0d045363b..7929685a37c 100644
--- a/apps/embedding/vector/pg_vector.py
+++ b/apps/embedding/vector/pg_vector.py
@@ -12,10 +12,11 @@
from abc import ABC, abstractmethod
from typing import Dict, List
-from django.db.models import QuerySet
-from langchain_community.embeddings import HuggingFaceEmbeddings
+import jieba
+from django.contrib.postgres.search import SearchVector
+from django.db.models import QuerySet, Value
+from langchain_core.embeddings import Embeddings
-from common.config.embedding_config import EmbeddingModel
from common.db.search import generate_sql_by_query_dict
from common.db.sql_execute import select_list
from common.util.file_util import get_file_content
@@ -28,19 +29,13 @@
class PGVector(BaseVectorStore):
def delete_by_source_ids(self, source_ids: List[str], source_type: str):
+ if len(source_ids) == 0:
+ return
QuerySet(Embedding).filter(source_id__in=source_ids, source_type=source_type).delete()
def update_by_source_ids(self, source_ids: List[str], instance: Dict):
QuerySet(Embedding).filter(source_id__in=source_ids).update(**instance)
- def embed_documents(self, text_list: List[str]):
- embedding = EmbeddingModel.get_embedding_model()
- return embedding.embed_documents(text_list)
-
- def embed_query(self, text: str):
- embedding = EmbeddingModel.get_embedding_model()
- return embedding.embed_query(text)
-
def vector_is_create(self) -> bool:
# 项目启动默认是创建好的 不需要再创建
return True
@@ -50,7 +45,7 @@ def vector_create(self):
def _save(self, text, source_type: SourceType, dataset_id: str, document_id: str, paragraph_id: str, source_id: str,
is_active: bool,
- embedding: HuggingFaceEmbeddings):
+ embedding: Embeddings):
text_embedding = embedding.embed_query(text)
embedding = Embedding(id=uuid.uuid1(),
dataset_id=dataset_id,
@@ -64,7 +59,7 @@ def _save(self, text, source_type: SourceType, dataset_id: str, document_id: str
embedding.save()
return True
- def _batch_save(self, text_list: List[Dict], embedding: HuggingFaceEmbeddings):
+ def _batch_save(self, text_list: List[Dict], embedding: Embeddings, is_the_task_interrupted):
texts = [row.get('text') for row in text_list]
embeddings = embedding.embed_documents(texts)
embedding_list = [Embedding(id=uuid.uuid1(),
@@ -75,15 +70,17 @@ def _batch_save(self, text_list: List[Dict], embedding: HuggingFaceEmbeddings):
source_id=text_list[index].get('source_id'),
source_type=text_list[index].get('source_type'),
embedding=embeddings[index],
- search_vector=to_ts_vector(text_list[index]['text'])) for index in
- range(0, len(text_list))]
- QuerySet(Embedding).bulk_create(embedding_list) if len(embedding_list) > 0 else None
+ search_vector=SearchVector(Value(to_ts_vector(text_list[index]['text'])))) for
+ index in
+ range(0, len(texts))]
+ if not is_the_task_interrupted():
+ QuerySet(Embedding).bulk_create(embedding_list) if len(embedding_list) > 0 else None
return True
def hit_test(self, query_text, dataset_id_list: list[str], exclude_document_id_list: list[str], top_number: int,
similarity: float,
search_mode: SearchMode,
- embedding: HuggingFaceEmbeddings):
+ embedding: Embeddings):
if dataset_id_list is None or len(dataset_id_list) == 0:
return []
exclude_dict = {}
@@ -105,9 +102,9 @@ def query(self, query_text: str, query_embedding: List[float], dataset_id_list:
return []
query_set = QuerySet(Embedding).filter(dataset_id__in=dataset_id_list, is_active=is_active)
if exclude_document_id_list is not None and len(exclude_document_id_list) > 0:
- exclude_dict.__setitem__('document_id__in', exclude_document_id_list)
+ query_set = query_set.exclude(document_id__in=exclude_document_id_list)
if exclude_paragraph_list is not None and len(exclude_paragraph_list) > 0:
- exclude_dict.__setitem__('paragraph_id__in', exclude_paragraph_list)
+ query_set = query_set.exclude(paragraph_id__in=exclude_paragraph_list)
query_set = query_set.exclude(**exclude_dict)
for search_handle in search_handle_list:
if search_handle.support(search_mode):
@@ -132,7 +129,9 @@ def delete_by_document_id(self, document_id: str):
QuerySet(Embedding).filter(document_id=document_id).delete()
return True
- def delete_bu_document_id_list(self, document_id_list: List[str]):
+ def delete_by_document_id_list(self, document_id_list: List[str]):
+ if len(document_id_list) == 0:
+ return True
return QuerySet(Embedding).filter(document_id__in=document_id_list).delete()
def delete_by_source_id(self, source_id: str, source_type: str):
diff --git a/apps/function_lib/__init__.py b/apps/function_lib/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/function_lib/admin.py b/apps/function_lib/admin.py
new file mode 100644
index 00000000000..8c38f3f3dad
--- /dev/null
+++ b/apps/function_lib/admin.py
@@ -0,0 +1,3 @@
+from django.contrib import admin
+
+# Register your models here.
diff --git a/apps/function_lib/apps.py b/apps/function_lib/apps.py
new file mode 100644
index 00000000000..11957d6cf2c
--- /dev/null
+++ b/apps/function_lib/apps.py
@@ -0,0 +1,6 @@
+from django.apps import AppConfig
+
+
+class FunctionLibConfig(AppConfig):
+ default_auto_field = 'django.db.models.BigAutoField'
+ name = 'function_lib'
diff --git a/apps/function_lib/migrations/0001_initial.py b/apps/function_lib/migrations/0001_initial.py
new file mode 100644
index 00000000000..bb2fd60e997
--- /dev/null
+++ b/apps/function_lib/migrations/0001_initial.py
@@ -0,0 +1,34 @@
+# Generated by Django 4.2.15 on 2024-08-13 10:04
+
+import django.contrib.postgres.fields
+from django.db import migrations, models
+import django.db.models.deletion
+import uuid
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('users', '0004_alter_user_email'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='FunctionLib',
+ fields=[
+ ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
+ ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
+ ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, verbose_name='主键id')),
+ ('name', models.CharField(max_length=64, verbose_name='函数名称')),
+ ('desc', models.CharField(max_length=128, verbose_name='描述')),
+ ('code', models.CharField(max_length=102400, verbose_name='python代码')),
+ ('input_field_list', django.contrib.postgres.fields.ArrayField(base_field=models.JSONField(default=dict, verbose_name='输入字段'), default=list, size=None, verbose_name='输入字段列表')),
+ ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.user', verbose_name='用户id')),
+ ],
+ options={
+ 'db_table': 'function_lib',
+ },
+ ),
+ ]
diff --git a/apps/function_lib/migrations/0002_functionlib_is_active_functionlib_permission_type.py b/apps/function_lib/migrations/0002_functionlib_is_active_functionlib_permission_type.py
new file mode 100644
index 00000000000..c665ef22a43
--- /dev/null
+++ b/apps/function_lib/migrations/0002_functionlib_is_active_functionlib_permission_type.py
@@ -0,0 +1,23 @@
+# Generated by Django 4.2.15 on 2024-09-14 11:23
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('function_lib', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='functionlib',
+ name='is_active',
+ field=models.BooleanField(default=True),
+ ),
+ migrations.AddField(
+ model_name='functionlib',
+ name='permission_type',
+ field=models.CharField(choices=[('PUBLIC', '公开'), ('PRIVATE', '私有')], default='PRIVATE', max_length=20, verbose_name='权限类型'),
+ ),
+ ]
diff --git a/apps/function_lib/migrations/0003_functionlib_function_type_functionlib_icon_and_more.py b/apps/function_lib/migrations/0003_functionlib_function_type_functionlib_icon_and_more.py
new file mode 100644
index 00000000000..8df64e28197
--- /dev/null
+++ b/apps/function_lib/migrations/0003_functionlib_function_type_functionlib_icon_and_more.py
@@ -0,0 +1,194 @@
+# Generated by Django 4.2.15 on 2025-03-13 07:21
+
+from django.db import migrations, models
+
+function_template = '''
+INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-03-10 06:20:35.945414 +00:00', '2025-03-10 09:19:23.608026 +00:00', 'c75cb48e-fd77-11ef-84d2-5618c4394482', '博查搜索', '从博查搜索任何信息和网页URL', e'def bocha_search(query, apikey):
+ import requests
+ import json
+ url = "https://api.bochaai.com/v1/web-search"
+ payload = json.dumps({
+ "query": query,
+ "Boolean": "true",
+ "count": 8
+ })
+
+ headers = {
+ "Authorization": "Bearer " + apikey, #鉴权参数,示例:Bearer xxxxxx,API KEY请先前往博查AI开放平台(https://open.bochaai.com)> API KEY 管理中获取。
+ "Content-Type": "application/json"
+ }
+
+ response = requests.request("POST", url, headers=headers, data=payload)
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise Exception(f"API请求失败: {response.status_code}, 错误信息: {response.text}")
+ return (response.text)', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', TRUE, 'PUBLIC', 'INTERNAL', '/ui/fx/bochaai/icon.png', '[{"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "apikey", "label": "API Key", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "API Key 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "API Key 长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', '', NULL);
+INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-02-26 03:36:48.187286 +00:00', '2025-03-11 07:23:46.123972 +00:00', 'e89ad2ae-f3f2-11ef-ad09-0242ac110002', 'Google Search', 'Google Web Search', e'def google_search(query, apikey, cx):
+ import requests
+ import json
+ url = "https://customsearch.googleapis.com/customsearch/v1"
+ params = {
+ "q": query,
+ "key": apikey,
+ "cx": cx,
+ "num": 10, # 每次最多返回10条
+ }
+
+ response = requests.get(url, params=params)
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise Exception(f"API请求失败: {response.status_code}, 错误信息: {response.text}")
+ return (response.text)', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', TRUE, 'PUBLIC', 'INTERNAL', '/ui/fx/google_search/icon.png', '[{"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "apikey", "label": "API Key", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "API Key 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "API Key 长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}, {"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "cx", "label": "cx", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "cx 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "cx长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', '', NULL);
+INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-02-25 07:44:40.141515 +00:00', '2025-03-11 06:33:53.248495 +00:00', '5e912f00-f34c-11ef-8a9c-5618c4394482', 'LangSearch', e'A Web Search tool supporting natural language search
+', e'
+def langsearch(query, apikey):
+ import json
+ import requests
+
+ url = "https://api.langsearch.com/v1/web-search"
+ payload = json.dumps({
+ "query": query,
+ "summary": True,
+ "freshness": "noLimit",
+ "livecrawl": True,
+ "count": 20
+ })
+ headers = {
+ "Authorization": apikey,
+ "Content-Type": "application/json"
+ }
+ # key从官网申请 https://langsearch.com/
+ response = requests.request("POST", url, headers=headers, data=payload)
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise Exception(f"API请求失败: {response.status_code}, 错误信息: {response.text}")
+ return (response.text)', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', TRUE, 'PUBLIC', 'INTERNAL', '/ui/fx/langsearch/icon.png', '[{"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "apikey", "label": "API Key", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "API Key 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "API Key 长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', '', NULL);
+INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-03-17 08:16:32.626245 +00:00', '2025-03-17 08:16:32.626308 +00:00', '22c21b76-0308-11f0-9694-5618c4394482', 'MySQL 查询', '一个连接MySQL数据库执行SQL查询的工具', e'
+def query_mysql(host,port, user, password, database, sql):
+ import pymysql
+ import json
+ from pymysql.cursors import DictCursor
+
+ try:
+ # 创建连接
+ db = pymysql.connect(
+ host=host,
+ port=int(port),
+ user=user,
+ password=password,
+ database=database,
+ cursorclass=DictCursor # 使用字典游标
+ )
+
+ # 使用 cursor() 方法创建一个游标对象 cursor
+ cursor = db.cursor()
+
+ # 使用 execute() 方法执行 SQL 查询
+ cursor.execute(sql)
+
+ # 使用 fetchall() 方法获取所有数据
+ data = cursor.fetchall()
+
+ # 处理 bytes 类型的数据
+ for row in data:
+ for key, value in row.items():
+ if isinstance(value, bytes):
+ row[key] = value.decode("utf-8") # 转换为字符串
+
+ # 将数据序列化为 JSON
+ json_data = json.dumps(data, ensure_ascii=False)
+ return json_data
+
+ # 关闭数据库连接
+ db.close()
+
+ except Exception as e:
+ print(f"Error while connecting to MySQL: {e}")
+ raise e', '{"{\\"name\\": \\"sql\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', true, 'PUBLIC', 'INTERNAL', '/ui/fx/mysql/icon.png', '[{"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "host", "label": "host", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "host 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "host长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}, {"attrs": {"maxlength": 20, "minlength": 1, "show-word-limit": true}, "field": "port", "label": "port", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "port 为必填属性", "required": true}, {"max": 20, "min": 1, "message": "port长度在 1 到 20 个字符", "trigger": "blur"}]}, "default_value": "3306", "show_default_value": false}, {"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "user", "label": "user", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "user 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "user长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "root", "show_default_value": false}, {"attrs": {"type": "password", "maxlength": 200, "minlength": 1, "show-password": true, "show-word-limit": true}, "field": "password", "label": "password", "required": true, "input_type": "PasswordInput", "props_info": {"rules": [{"message": "password 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "password长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}, {"attrs": {"maxlength": 200, "minlength": 1, "show-word-limit": true}, "field": "database", "label": "database", "required": true, "input_type": "TextInput", "props_info": {"rules": [{"message": "database 为必填属性", "required": true}, {"max": 200, "min": 1, "message": "database长度在 1 到 200 个字符", "trigger": "blur"}]}, "default_value": "x", "show_default_value": false}]', null, null);
+INSERT INTO function_lib (create_time, update_time, id, name, "desc", code, input_field_list, user_id, is_active, permission_type, function_type, icon, init_field_list, init_params, template_id) VALUES ('2025-03-17 07:37:54.620836 +00:00', '2025-03-17 07:37:54.620887 +00:00', 'bd1e8b88-0302-11f0-87bb-5618c4394482', 'PostgreSQL 查询', '一个连接PostgreSQL数据库执行SQL查询的工具', e'
+def queryPgSQL(database, user, password, host, port, query):
+ import psycopg2
+ import json
+ from datetime import datetime
+
+ # 自定义 JSON 序列化函数
+ def default_serializer(obj):
+ if isinstance(obj, datetime):
+ return obj.isoformat() # 将 datetime 转换为 ISO 格式字符串
+ raise TypeError(f"Type {type(obj)} not serializable")
+
+ # 数据库连接信息
+ conn_params = {
+ "dbname": database,
+ "user": user,
+ "password": password,
+ "host": host,
+ "port": port
+ }
+ try:
+ # 建立连接
+ conn = psycopg2.connect(**conn_params)
+ print("连接成功!")
+ # 创建游标对象
+ cursor = conn.cursor()
+ # 执行查询语句
+ cursor.execute(query)
+ # 获取查询结果
+ rows = cursor.fetchall()
+ # 处理 bytes 类型的数据
+ columns = [desc[0] for desc in cursor.description]
+ result = [dict(zip(columns, row)) for row in rows]
+ # 转换为 JSON 格式
+ json_result = json.dumps(result, default=default_serializer, ensure_ascii=False)
+ return json_result
+ except Exception as e:
+ print(f"发生错误:{e}")
+ raise e
+ finally:
+ # 关闭游标和连接
+ if cursor:
+ cursor.close()
+ if conn:
+ conn.close()', '{"{\\"name\\": \\"query\\", \\"type\\": \\"string\\", \\"source\\": \\"reference\\", \\"is_required\\": true}"}', 'f0dd8f71-e4ee-11ee-8c84-a8a1595801ab', true, 'PUBLIC', 'INTERNAL', '/ui/fx/postgresql/icon.png', '[{"attrs":{"maxlength":200,"minlength":1,"show-word-limit":true},"field":"host","label":"host","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"host 为必填属性","required":true},{"max":200,"min":1,"message":"host长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"x","show_default_value":false},{"attrs":{"maxlength":20,"minlength":1,"show-word-limit":true},"field":"port","label":"port","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"port 为必填属性","required":true},{"max":20,"min":1,"message":"port长度在 1 到 20 个字符","trigger":"blur"}]},"default_value":"5432","show_default_value":false},{"attrs":{"maxlength":200,"minlength":1,"show-word-limit":true},"field":"user","label":"user","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"user 为必填属性","required":true},{"max":200,"min":1,"message":"user长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"root","show_default_value":false},{"attrs":{"type":"password","maxlength":200,"minlength":1,"show-password":true,"show-word-limit":true},"field":"password","label":"password","required":true,"input_type":"PasswordInput","props_info":{"rules":[{"message":"password 为必填属性","required":true},{"max":200,"min":1,"message":"password长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"x","show_default_value":false},{"attrs":{"maxlength":200,"minlength":1,"show-word-limit":true},"field":"database","label":"database","required":true,"input_type":"TextInput","props_info":{"rules":[{"message":"database 为必填属性","required":true},{"max":200,"min":1,"message":"database长度在 1 到 200 个字符","trigger":"blur"}]},"default_value":"x","show_default_value":false}]', null, null);
+
+'''
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ('function_lib', '0002_functionlib_is_active_functionlib_permission_type'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='functionlib',
+ name='function_type',
+ field=models.CharField(choices=[('INTERNAL', '内置'), ('PUBLIC', '公开')],
+ default='PUBLIC', max_length=20, verbose_name='函数类型'),
+ ),
+ migrations.AddField(
+ model_name='functionlib',
+ name='icon',
+ field=models.CharField(default='/ui/favicon.ico', max_length=256,
+ verbose_name='函数库icon'),
+ ),
+ migrations.AddField(
+ model_name='functionlib',
+ name='init_field_list',
+ field=models.JSONField(default=list, verbose_name='启动字段列表'),
+ ),
+ migrations.AddField(
+ model_name='functionlib',
+ name='init_params',
+ field=models.CharField(max_length=102400, null=True, verbose_name='初始化参数'),
+ ),
+ migrations.AddField(
+ model_name='functionlib',
+ name='template_id',
+ field=models.UUIDField(default=None, null=True, verbose_name='模版id'),
+ ),
+ migrations.RunSQL(function_template)
+ ]
diff --git a/apps/function_lib/migrations/__init__.py b/apps/function_lib/migrations/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/function_lib/models/__init__.py b/apps/function_lib/models/__init__.py
new file mode 100644
index 00000000000..a68550e90ef
--- /dev/null
+++ b/apps/function_lib/models/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/8/2 14:55
+ @desc:
+"""
diff --git a/apps/function_lib/models/function.py b/apps/function_lib/models/function.py
new file mode 100644
index 00000000000..037f5099527
--- /dev/null
+++ b/apps/function_lib/models/function.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: function_lib.py
+ @date:2024/8/2 14:59
+ @desc:
+"""
+import uuid
+
+from django.contrib.postgres.fields import ArrayField
+from django.db import models
+
+from common.mixins.app_model_mixin import AppModelMixin
+from users.models import User
+
+
+class PermissionType(models.TextChoices):
+ PUBLIC = "PUBLIC", '公开'
+ PRIVATE = "PRIVATE", "私有"
+
+class FunctionType(models.TextChoices):
+ INTERNAL = "INTERNAL", '内置'
+ PUBLIC = "PUBLIC", "公开"
+
+
+class FunctionLib(AppModelMixin):
+ id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
+ user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户id")
+ name = models.CharField(max_length=64, verbose_name="函数名称")
+ desc = models.CharField(max_length=128, verbose_name="描述")
+ code = models.CharField(max_length=102400, verbose_name="python代码")
+ input_field_list = ArrayField(verbose_name="输入字段列表",
+ base_field=models.JSONField(verbose_name="输入字段", default=dict)
+ , default=list)
+ init_field_list = models.JSONField(verbose_name="启动字段列表", default=list)
+ icon = models.CharField(max_length=256, verbose_name="函数库icon", default="/ui/favicon.ico")
+ is_active = models.BooleanField(default=True)
+ permission_type = models.CharField(max_length=20, verbose_name='权限类型', choices=PermissionType.choices,
+ default=PermissionType.PRIVATE)
+ function_type = models.CharField(max_length=20, verbose_name='函数类型', choices=FunctionType.choices,
+ default=FunctionType.PUBLIC)
+ template_id = models.UUIDField(max_length=128, verbose_name="模版id", null=True, default=None)
+ init_params = models.CharField(max_length=102400, verbose_name="初始化参数", null=True)
+
+ class Meta:
+ db_table = "function_lib"
diff --git a/apps/function_lib/serializers/__init__.py b/apps/function_lib/serializers/__init__.py
new file mode 100644
index 00000000000..a68550e90ef
--- /dev/null
+++ b/apps/function_lib/serializers/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/8/2 14:55
+ @desc:
+"""
diff --git a/apps/function_lib/serializers/function_lib_serializer.py b/apps/function_lib/serializers/function_lib_serializer.py
new file mode 100644
index 00000000000..440eb22c786
--- /dev/null
+++ b/apps/function_lib/serializers/function_lib_serializer.py
@@ -0,0 +1,426 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: function_lib_serializer.py
+ @date:2024/8/2 17:35
+ @desc:
+"""
+import io
+import json
+import pickle
+import re
+import uuid
+
+from django.core import validators
+from django.db import transaction
+from django.db.models import QuerySet, Q, OuterRef, Exists
+from django.http import HttpResponse
+from django.utils.translation import gettext_lazy as _
+from rest_framework import serializers, status
+
+from common.db.search import page_search
+from common.exception.app_exception import AppApiException
+from common.field.common import UploadedFileField, UploadedImageField
+from common.response import result
+from common.util.common import restricted_loads
+from common.util.field_message import ErrMessage
+from common.util.function_code import FunctionExecutor
+from common.util.rsa_util import rsa_long_decrypt, rsa_long_encrypt
+from dataset.models import File
+from function_lib.models.function import FunctionLib, PermissionType, FunctionType
+from smartdoc.const import CONFIG
+
+function_executor = FunctionExecutor(CONFIG.get('SANDBOX'))
+
+class FlibInstance:
+ def __init__(self, function_lib: dict, version: str):
+ self.function_lib = function_lib
+ self.version = version
+
+def encryption(message: str):
+ """
+ 加密敏感字段数据 加密方式是 如果密码是 1234567890 那么给前端则是 123******890
+ :param message:
+ :return:
+ """
+ if type(message) != str:
+ return message
+ if message == "":
+ return ""
+ max_pre_len = 8
+ max_post_len = 4
+ message_len = len(message)
+ pre_len = int(message_len / 5 * 2)
+ post_len = int(message_len / 5 * 1)
+ pre_str = "".join([message[index] for index in
+ range(0,
+ max_pre_len if pre_len > max_pre_len else 1 if pre_len <= 0 else int(
+ pre_len))])
+ end_str = "".join(
+ [message[index] for index in
+ range(message_len - (int(post_len) if pre_len < max_post_len else max_post_len),
+ message_len)])
+ content = "***************"
+ return pre_str + content + end_str
+
+
+class FunctionLibModelSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = FunctionLib
+ fields = ['id', 'name', 'icon', 'desc', 'code', 'input_field_list','init_field_list', 'init_params', 'permission_type', 'is_active', 'user_id', 'template_id',
+ 'create_time', 'update_time']
+
+
+class FunctionLibInputField(serializers.Serializer):
+ name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('variable name')))
+ is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('required')))
+ type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')), validators=[
+ validators.RegexValidator(regex=re.compile("^string|int|dict|array|float$"),
+ message=_('fields only support string|int|dict|array|float'), code=500)
+ ])
+ source = serializers.CharField(required=True, error_messages=ErrMessage.char(_('source')), validators=[
+ validators.RegexValidator(regex=re.compile("^custom|reference$"),
+ message=_('The field only supports custom|reference'), code=500)
+ ])
+
+
+class DebugField(serializers.Serializer):
+ name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('variable name')))
+ value = serializers.CharField(required=False, allow_blank=True, allow_null=True,
+ error_messages=ErrMessage.char(_('variable value')))
+
+
+class DebugInstance(serializers.Serializer):
+ debug_field_list = DebugField(required=True, many=True)
+ input_field_list = FunctionLibInputField(required=True, many=True)
+ init_field_list = serializers.ListField(required=False, default=list)
+ init_params = serializers.JSONField(required=False, default=dict)
+ code = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function content')))
+
+
+class EditFunctionLib(serializers.Serializer):
+ name = serializers.CharField(required=False, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function name')))
+
+ desc = serializers.CharField(required=False, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function description')))
+
+ code = serializers.CharField(required=False, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function content')))
+
+ input_field_list = FunctionLibInputField(required=False, many=True)
+
+ init_field_list = serializers.ListField(required=False, default=list)
+
+ is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active')))
+
+
+class CreateFunctionLib(serializers.Serializer):
+ name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function name')))
+
+ desc = serializers.CharField(required=False, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function description')))
+
+ code = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function content')))
+
+ input_field_list = FunctionLibInputField(required=True, many=True)
+
+ init_field_list = serializers.ListField(required=False, default=list)
+
+ permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('permission')), validators=[
+ validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"),
+ message="权限只支持PUBLIC|PRIVATE", code=500)
+ ])
+ is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active')))
+
+
+class FunctionLibSerializer(serializers.Serializer):
+ class Query(serializers.Serializer):
+ name = serializers.CharField(required=False, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function name')))
+
+ desc = serializers.CharField(required=False, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function description')))
+ is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active')))
+
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id')))
+ select_user_id = serializers.CharField(required=False, allow_null=True, allow_blank=True)
+ function_type = serializers.CharField(required=False, allow_null=True, allow_blank=True)
+
+
+ def get_query_set(self):
+ query_set = QuerySet(FunctionLib).filter(
+ (Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC')))
+ if self.data.get('name') is not None:
+ query_set = query_set.filter(name__icontains=self.data.get('name'))
+ if self.data.get('desc') is not None:
+ query_set = query_set.filter(desc__contains=self.data.get('desc'))
+ if self.data.get('is_active') is not None:
+ query_set = query_set.filter(is_active=self.data.get('is_active'))
+ if self.data.get('select_user_id') is not None:
+ query_set = query_set.filter(user_id=self.data.get('select_user_id'))
+ if self.data.get('function_type') is not None:
+ query_set = query_set.filter(function_type=self.data.get('function_type'))
+ query_set = query_set.order_by("-create_time")
+
+ return query_set
+
+ def list(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ rs = []
+ for item in self.get_query_set():
+ data = {**FunctionLibModelSerializer(item).data, 'init_params': None}
+ rs.append(data)
+ return rs
+
+ def page(self, current_page: int, page_size: int, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+
+ def post_records_handler(row):
+ return {
+ **FunctionLibModelSerializer(row).data,
+ 'init_params': None
+ }
+
+ return page_search(current_page, page_size, self.get_query_set(),
+ post_records_handler=post_records_handler)
+
+ class Create(serializers.Serializer):
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id')))
+
+ def insert(self, instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ CreateFunctionLib(data=instance).is_valid(raise_exception=True)
+ function_lib = FunctionLib(id=uuid.uuid1(), name=instance.get('name'), desc=instance.get('desc'),
+ code=instance.get('code'),
+ user_id=self.data.get('user_id'),
+ input_field_list=instance.get('input_field_list'),
+ init_field_list=instance.get('init_field_list'),
+ permission_type=instance.get('permission_type'),
+ is_active=False)
+ function_lib.save()
+ return FunctionLibModelSerializer(function_lib).data
+
+ class Debug(serializers.Serializer):
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id')))
+
+ def debug(self, debug_instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ DebugInstance(data=debug_instance).is_valid(raise_exception=True)
+ input_field_list = debug_instance.get('input_field_list')
+ code = debug_instance.get('code')
+ debug_field_list = debug_instance.get('debug_field_list')
+ init_params = debug_instance.get('init_params')
+ params = {field.get('name'): self.convert_value(field.get('name'), field.get('value'), field.get('type'),
+ field.get('is_required'))
+ for field in
+ [{'value': self.get_field_value(debug_field_list, field.get('name'), field.get('is_required')),
+ **field} for field in
+ input_field_list]}
+ # 合并初始化参数
+ if init_params is not None:
+ all_params = init_params | params
+ else:
+ all_params = params
+ return function_executor.exec_code(code, all_params)
+
+ @staticmethod
+ def get_field_value(debug_field_list, name, is_required):
+ result = [field for field in debug_field_list if field.get('name') == name]
+ if len(result) > 0:
+ return result[-1].get('value')
+ if is_required:
+ raise AppApiException(500, f"{name}" + _('field has no value set'))
+ return None
+
+ @staticmethod
+ def convert_value(name: str, value: str, _type: str, is_required: bool):
+ if not is_required and value is None:
+ return None
+ try:
+ if _type == 'int':
+ return int(value)
+ if _type == 'float':
+ return float(value)
+ if _type == 'dict':
+ v = json.loads(value)
+ if isinstance(v, dict):
+ return v
+ raise Exception(_('type error'))
+ if _type == 'array':
+ v = json.loads(value)
+ if isinstance(v, list):
+ return v
+ raise Exception(_('type error'))
+ return value
+ except Exception as e:
+ raise AppApiException(500, _('Field: {name} Type: {_type} Value: {value} Type conversion error').format(
+ name=name, type=_type, value=value
+ ))
+
+ class Operate(serializers.Serializer):
+ id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('function id')))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id')))
+
+ def is_valid(self, *, raise_exception=False):
+ super().is_valid(raise_exception=True)
+ if not QuerySet(FunctionLib).filter(id=self.data.get('id')).exists():
+ raise AppApiException(500, _('Function does not exist'))
+
+ def delete(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ fun = QuerySet(FunctionLib).filter(id=self.data.get('id')).first()
+ if fun.template_id is None and fun.icon != '/ui/favicon.ico':
+ QuerySet(File).filter(id=fun.icon.split('/')[-1]).delete()
+ QuerySet(FunctionLib).filter(id=self.data.get('id')).delete()
+ return True
+
+ def edit(self, instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ EditFunctionLib(data=instance).is_valid(raise_exception=True)
+ edit_field_list = ['name', 'desc', 'code', 'icon', 'input_field_list', 'init_field_list', 'init_params', 'permission_type', 'is_active']
+ edit_dict = {field: instance.get(field) for field in edit_field_list if (
+ field in instance and instance.get(field) is not None)}
+
+ function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first()
+ if 'init_params' in edit_dict:
+ if edit_dict['init_field_list'] is not None:
+ rm_key = []
+ for key in edit_dict['init_params']:
+ if key not in [field['field'] for field in edit_dict['init_field_list']]:
+ rm_key.append(key)
+ for key in rm_key:
+ edit_dict['init_params'].pop(key)
+ if function_lib.init_params:
+ old_init_params = json.loads(rsa_long_decrypt(function_lib.init_params))
+ for key in edit_dict['init_params']:
+ if key in old_init_params and edit_dict['init_params'][key] == encryption(old_init_params[key]):
+ edit_dict['init_params'][key] = old_init_params[key]
+ edit_dict['init_params'] = rsa_long_encrypt(json.dumps(edit_dict['init_params']))
+ QuerySet(FunctionLib).filter(id=self.data.get('id')).update(**edit_dict)
+ return self.one(False)
+
+ def one(self, with_valid=True):
+ if with_valid:
+ super().is_valid(raise_exception=True)
+ if not QuerySet(FunctionLib).filter(id=self.data.get('id')).filter(
+ Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC')).exists():
+ raise AppApiException(500, _('Function does not exist'))
+ function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first()
+ if function_lib.init_params:
+ function_lib.init_params = json.loads(rsa_long_decrypt(function_lib.init_params))
+ if function_lib.init_field_list:
+ password_fields = [i["field"] for i in function_lib.init_field_list if i.get("input_type") == "PasswordInput"]
+ if function_lib.init_params:
+ for k in function_lib.init_params:
+ if k in password_fields and function_lib.init_params[k]:
+ function_lib.init_params[k] = encryption(function_lib.init_params[k])
+ return {**FunctionLibModelSerializer(function_lib).data, 'init_params': function_lib.init_params}
+
+ def export(self, with_valid=True):
+ try:
+ if with_valid:
+ self.is_valid()
+ id = self.data.get('id')
+ function_lib = QuerySet(FunctionLib).filter(id=id).first()
+ application_dict = FunctionLibModelSerializer(function_lib).data
+ mk_instance = FlibInstance(application_dict, 'v1')
+ application_pickle = pickle.dumps(mk_instance)
+ response = HttpResponse(content_type='text/plain', content=application_pickle)
+ response['Content-Disposition'] = f'attachment; filename="{function_lib.name}.fx"'
+ return response
+ except Exception as e:
+ return result.error(str(e), response_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ class Import(serializers.Serializer):
+ file = UploadedFileField(required=True, error_messages=ErrMessage.image(_("file")))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
+
+ @transaction.atomic
+ def import_(self, with_valid=True):
+ if with_valid:
+ self.is_valid()
+ user_id = self.data.get('user_id')
+ flib_instance_bytes = self.data.get('file').read()
+ try:
+ flib_instance = restricted_loads(flib_instance_bytes)
+ except Exception as e:
+ raise AppApiException(1001, _("Unsupported file format"))
+ function_lib = flib_instance.function_lib
+ function_lib_model = FunctionLib(id=uuid.uuid1(), name=function_lib.get('name'),
+ desc=function_lib.get('desc'),
+ code=function_lib.get('code'),
+ user_id=user_id,
+ input_field_list=function_lib.get('input_field_list'),
+ init_field_list=function_lib.get('init_field_list', []),
+ permission_type='PRIVATE',
+ is_active=False)
+ function_lib_model.save()
+ return True
+
+ class IconOperate(serializers.Serializer):
+ id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("function ID")))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
+ image = UploadedImageField(required=True, error_messages=ErrMessage.image(_("picture")))
+
+ def edit(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ functionLib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first()
+ if functionLib is None:
+ raise AppApiException(500, _('Function does not exist'))
+ # 删除旧的图片
+ if functionLib.icon != '/ui/favicon.ico':
+ QuerySet(File).filter(id=functionLib.icon.split('/')[-1]).delete()
+ if self.data.get('image') is None:
+ functionLib.icon = '/ui/favicon.ico'
+ else:
+ meta = {
+ 'debug': False
+ }
+ file_id = uuid.uuid1()
+ file = File(id=file_id, file_name=self.data.get('image').name, meta=meta)
+ file.save(self.data.get('image').read())
+
+ functionLib.icon = f'/api/file/{file_id}'
+ functionLib.save()
+
+ return functionLib.icon
+
+ class InternalFunction(serializers.Serializer):
+ id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("function ID")))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
+ name = serializers.CharField(required=True, error_messages=ErrMessage.char(_("function name")))
+
+ def add(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+
+ internal_function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first()
+ if internal_function_lib is None:
+ raise AppApiException(500, _('Function does not exist'))
+
+ function_lib = FunctionLib(
+ id=uuid.uuid1(),
+ name=self.data.get('name'),
+ desc=internal_function_lib.desc,
+ code=internal_function_lib.code,
+ user_id=self.data.get('user_id'),
+ input_field_list=internal_function_lib.input_field_list,
+ init_field_list=internal_function_lib.init_field_list,
+ permission_type=PermissionType.PRIVATE,
+ template_id=internal_function_lib.id,
+ function_type=FunctionType.PUBLIC,
+ icon=internal_function_lib.icon,
+ is_active=False
+ )
+ function_lib.save()
+
+ return FunctionLibModelSerializer(function_lib).data
diff --git a/apps/function_lib/serializers/py_lint_serializer.py b/apps/function_lib/serializers/py_lint_serializer.py
new file mode 100644
index 00000000000..6fa6d4c44a3
--- /dev/null
+++ b/apps/function_lib/serializers/py_lint_serializer.py
@@ -0,0 +1,59 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: py_lint_serializer.py
+ @date:2024/9/30 15:38
+ @desc:
+"""
+import os
+import uuid
+
+from pylint.lint import Run
+from pylint.reporters import JSON2Reporter
+from rest_framework import serializers
+
+from common.util.field_message import ErrMessage
+from smartdoc.const import PROJECT_DIR
+from django.utils.translation import gettext_lazy as _
+
+
+class PyLintInstance(serializers.Serializer):
+ code = serializers.CharField(required=True, allow_null=True, allow_blank=True,
+ error_messages=ErrMessage.char(_('function content')))
+
+
+def to_dict(message, file_name):
+ return {'line': message.line,
+ 'column': message.column,
+ 'endLine': message.end_line,
+ 'endColumn': message.end_column,
+ 'message': (message.msg or "").replace(file_name, 'code'),
+ 'type': message.category}
+
+
+def get_file_name():
+ file_name = f"{uuid.uuid1()}"
+ py_lint_dir = os.path.join(PROJECT_DIR, 'data', 'py_lint')
+ if not os.path.exists(py_lint_dir):
+ os.makedirs(py_lint_dir)
+ return os.path.join(py_lint_dir, file_name)
+
+
+class PyLintSerializer(serializers.Serializer):
+
+ def pylint(self, instance, is_valid=True):
+ if is_valid:
+ self.is_valid(raise_exception=True)
+ PyLintInstance(data=instance).is_valid(raise_exception=True)
+ code = instance.get('code')
+ file_name = get_file_name()
+ with open(file_name, 'w') as file:
+ file.write(code)
+ reporter = JSON2Reporter()
+ Run([file_name,
+ "--disable=line-too-long",
+ '--module-rgx=[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'],
+ reporter=reporter, exit=False)
+ os.remove(file_name)
+ return [to_dict(m, os.path.basename(file_name)) for m in reporter.messages]
diff --git a/apps/function_lib/swagger_api/__init__.py b/apps/function_lib/swagger_api/__init__.py
new file mode 100644
index 00000000000..a68550e90ef
--- /dev/null
+++ b/apps/function_lib/swagger_api/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/8/2 14:55
+ @desc:
+"""
diff --git a/apps/function_lib/swagger_api/function_lib_api.py b/apps/function_lib/swagger_api/function_lib_api.py
new file mode 100644
index 00000000000..f0d409efaf8
--- /dev/null
+++ b/apps/function_lib/swagger_api/function_lib_api.py
@@ -0,0 +1,264 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: function_lib_api.py
+ @date:2024/8/2 17:11
+ @desc:
+"""
+from drf_yasg import openapi
+
+from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
+
+
+class FunctionLibApi(ApiMixin):
+ @staticmethod
+ def get_response_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['id', 'name', 'desc', 'code', 'input_field_list', 'create_time',
+ 'update_time'],
+ properties={
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')),
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
+ description=_('function name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
+ description=_('function description')),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
+ description=_('function content')),
+ 'input_field_list': openapi.Schema(type=openapi.TYPE_STRING, title=_('input field'),
+ description=_('input field')),
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
+ description=_('create time')),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
+ description=_('update time')),
+ }
+ )
+
+ class Query(ApiMixin):
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='name',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_STRING,
+ required=False,
+ description=_('function name')),
+ openapi.Parameter(name='desc',
+ in_=openapi.IN_QUERY,
+ type=openapi.TYPE_STRING,
+ required=False,
+ description=_('function description')),
+ ]
+
+ class Debug(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=[],
+ properties={
+ 'debug_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ description=_('Input variable list'),
+ items=openapi.Schema(type=openapi.TYPE_OBJECT,
+ required=[],
+ properties={
+ 'name': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('variable name'),
+ description=_('variable name')),
+ 'value': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('variable value'),
+ description=_('variable value')),
+ })),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
+ description=_('function content')),
+ 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ description=_('Input variable list'),
+ items=openapi.Schema(type=openapi.TYPE_OBJECT,
+ required=['name', 'is_required', 'source'],
+ properties={
+ 'name': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('variable name'),
+ description=_('variable name')),
+ 'is_required': openapi.Schema(
+ type=openapi.TYPE_BOOLEAN,
+ title=_('required'),
+ description=_('required')),
+ 'type': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('type'),
+ description=_(
+ 'Field type string|int|dict|array|float')
+ ),
+ 'source': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('source'),
+ description=_(
+ 'The source only supports custom|reference')),
+
+ }))
+ }
+ )
+
+ class Edit(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=[],
+ properties={
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
+ description=_('function name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
+ description=_('function description')),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
+ description=_('function content')),
+ 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
+ description=_('permission')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active')),
+ 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ description=_('Input variable list'),
+ items=openapi.Schema(type=openapi.TYPE_OBJECT,
+ required=[],
+ properties={
+ 'name': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('variable name'),
+ description=_('variable name')),
+ 'is_required': openapi.Schema(
+ type=openapi.TYPE_BOOLEAN,
+ title=_('required'),
+ description=_('required')),
+ 'type': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('type'),
+ description=_(
+ 'Field type string|int|dict|array|float')
+ ),
+ 'source': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('source'),
+ description=_(
+ 'The source only supports custom|reference')),
+
+ }))
+ }
+ )
+
+ class Create(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['name', 'code', 'input_field_list', 'permission_type'],
+ properties={
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
+ description=_('function name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
+ description=_('function description')),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
+ description=_('function content')),
+ 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
+ description=_('permission')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active')),
+ 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ description=_('Input variable list'),
+ items=openapi.Schema(type=openapi.TYPE_OBJECT,
+ required=['name', 'is_required', 'source'],
+ properties={
+ 'name': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('variable name'),
+ description=_('variable name')),
+ 'is_required': openapi.Schema(
+ type=openapi.TYPE_BOOLEAN,
+ title=_('required'),
+ description=_('required')),
+ 'type': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('type'),
+ description=_(
+ 'Field type string|int|dict|array|float')
+ ),
+ 'source': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('source'),
+ description=_(
+ 'The source only supports custom|reference')),
+
+ }))
+ }
+ )
+
+ @staticmethod
+ def get_response_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['id', 'name', 'code', 'input_field_list', 'permission_type'],
+ properties={
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')),
+
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
+ description=_('function name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
+ description=_('function description')),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
+ description=_('function content')),
+ 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
+ description=_('permission')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
+ description=_('Is active')),
+ 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
+ description=_('Input variable list'),
+ items=openapi.Schema(type=openapi.TYPE_OBJECT,
+ required=['name', 'is_required', 'source'],
+ properties={
+ 'name': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('variable name'),
+ description=_('variable name')),
+ 'is_required': openapi.Schema(
+ type=openapi.TYPE_BOOLEAN,
+ title=_('required'),
+ description=_('required')),
+ 'type': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('type'),
+ description=_(
+ 'Field type string|int|dict|array|float')
+ ),
+ 'source': openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title=_('source'),
+ description=_(
+ 'The source only supports custom|reference')),
+
+ }))
+ }
+ )
+
+ class Export(ApiMixin):
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='id',
+ in_=openapi.IN_PATH,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('ID')),
+
+ ]
+
+ class Import(ApiMixin):
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='file',
+ in_=openapi.IN_FORM,
+ type=openapi.TYPE_FILE,
+ required=True,
+ description=_('Upload image files'))
+ ]
diff --git a/apps/function_lib/swagger_api/py_lint_api.py b/apps/function_lib/swagger_api/py_lint_api.py
new file mode 100644
index 00000000000..1577dfe60a1
--- /dev/null
+++ b/apps/function_lib/swagger_api/py_lint_api.py
@@ -0,0 +1,25 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: py_lint_api.py
+ @date:2024/9/30 15:48
+ @desc:
+"""
+from drf_yasg import openapi
+
+from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
+
+
+class PyLintApi(ApiMixin):
+ @staticmethod
+ def get_request_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['code'],
+ properties={
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
+ description=_('function content'))
+ }
+ )
diff --git a/apps/function_lib/task/__init__.py b/apps/function_lib/task/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/function_lib/tests.py b/apps/function_lib/tests.py
new file mode 100644
index 00000000000..7ce503c2dd9
--- /dev/null
+++ b/apps/function_lib/tests.py
@@ -0,0 +1,3 @@
+from django.test import TestCase
+
+# Create your tests here.
diff --git a/apps/function_lib/urls.py b/apps/function_lib/urls.py
new file mode 100644
index 00000000000..036d6892601
--- /dev/null
+++ b/apps/function_lib/urls.py
@@ -0,0 +1,17 @@
+from django.urls import path
+
+from . import views
+
+app_name = "function_lib"
+urlpatterns = [
+ path('function_lib', views.FunctionLibView.as_view()),
+ path('function_lib/debug', views.FunctionLibView.Debug.as_view()),
+ path('function_lib//export', views.FunctionLibView.Export.as_view()),
+ path('function_lib/import', views.FunctionLibView.Import.as_view()),
+ path('function_lib//edit_icon', views.FunctionLibView.EditIcon.as_view()),
+ path('function_lib//add_internal_fun', views.FunctionLibView.AddInternalFun.as_view()),
+ path('function_lib/pylint', views.PyLintView.as_view()),
+ path('function_lib/', views.FunctionLibView.Operate.as_view()),
+ path("function_lib//", views.FunctionLibView.Page.as_view(),
+ name="function_lib_page")
+]
diff --git a/apps/function_lib/views/__init__.py b/apps/function_lib/views/__init__.py
new file mode 100644
index 00000000000..ad3240be184
--- /dev/null
+++ b/apps/function_lib/views/__init__.py
@@ -0,0 +1,10 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py
+ @date:2024/8/2 14:53
+ @desc:
+"""
+from .function_lib_views import *
+from .py_lint import *
diff --git a/apps/function_lib/views/common.py b/apps/function_lib/views/common.py
new file mode 100644
index 00000000000..5509964f57f
--- /dev/null
+++ b/apps/function_lib/views/common.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: common.py
+ @date:2025/3/25 17:27
+ @desc:
+"""
+from django.db.models import QuerySet
+
+from function_lib.models.function import FunctionLib
+
+
+def get_function_lib_operation_object(function_lib_id):
+ function_lib_model = QuerySet(model=FunctionLib).filter(id=function_lib_id).first()
+ if function_lib_model is not None:
+ return {
+ "name": function_lib_model.name
+ }
+ return {}
diff --git a/apps/function_lib/views/function_lib_views.py b/apps/function_lib/views/function_lib_views.py
new file mode 100644
index 00000000000..e865566e3be
--- /dev/null
+++ b/apps/function_lib/views/function_lib_views.py
@@ -0,0 +1,184 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: function_lib_views.py
+ @date:2024/8/2 17:08
+ @desc:
+"""
+from django.utils.translation import gettext_lazy as _
+from drf_yasg.utils import swagger_auto_schema
+from rest_framework.decorators import action
+from rest_framework.parsers import MultiPartParser
+from rest_framework.request import Request
+from rest_framework.views import APIView
+
+from common.auth import TokenAuth, has_permissions
+from common.constants.permission_constants import RoleConstants
+from common.log.log import log
+from common.response import result
+from function_lib.serializers.function_lib_serializer import FunctionLibSerializer
+from function_lib.swagger_api.function_lib_api import FunctionLibApi
+from function_lib.views.common import get_function_lib_operation_object
+
+
+class FunctionLibView(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=["GET"], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get function list'),
+ operation_id=_('Get function list'),
+ tags=[_('Function')],
+ manual_parameters=FunctionLibApi.Query.get_request_params_api())
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Get function list")
+ def get(self, request: Request):
+ return result.success(
+ FunctionLibSerializer.Query(
+ data={'name': request.query_params.get('name'),
+ 'desc': request.query_params.get('desc'),
+ 'function_type': request.query_params.get('function_type'),
+ 'user_id': request.user.id}).list())
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Create function'),
+ operation_id=_('Create function'),
+ request_body=FunctionLibApi.Create.get_request_body_api(),
+ responses=result.get_api_response(FunctionLibApi.Create.get_response_body_api()),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Create function",
+ get_operation_object=lambda r, k: r.data.get('name'))
+ def post(self, request: Request):
+ return result.success(FunctionLibSerializer.Create(data={'user_id': request.user.id}).insert(request.data))
+
+ class Debug(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Debug function'),
+ operation_id=_('Debug function'),
+ request_body=FunctionLibApi.Debug.get_request_body_api(),
+ responses=result.get_default_response(),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ def post(self, request: Request):
+ return result.success(
+ FunctionLibSerializer.Debug(data={'user_id': request.user.id}).debug(
+ request.data))
+
+ class Operate(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Update function'),
+ operation_id=_('Update function'),
+ request_body=FunctionLibApi.Edit.get_request_body_api(),
+ responses=result.get_api_response(FunctionLibApi.Edit.get_request_body_api()),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Update function",
+ get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('function_lib_id')))
+ def put(self, request: Request, function_lib_id: str):
+ return result.success(
+ FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).edit(
+ request.data))
+
+ @action(methods=['DELETE'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Delete function'),
+ operation_id=_('Delete function'),
+ responses=result.get_default_response(),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Delete function",
+ get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('function_lib_id')))
+ def delete(self, request: Request, function_lib_id: str):
+ return result.success(
+ FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).delete())
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get function details'),
+ operation_id=_('Get function details'),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ def get(self, request: Request, function_lib_id: str):
+ return result.success(
+ FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).one())
+
+ class Page(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get function list by pagination'),
+ operation_id=_('Get function list by pagination'),
+ manual_parameters=result.get_page_request_params(
+ FunctionLibApi.Query.get_request_params_api()),
+ responses=result.get_page_api_response(FunctionLibApi.get_response_body_api()),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ def get(self, request: Request, current_page: int, page_size: int):
+ return result.success(
+ FunctionLibSerializer.Query(
+ data={'name': request.query_params.get('name'),
+ 'desc': request.query_params.get('desc'),
+ 'function_type': request.query_params.get('function_type'),
+ 'user_id': request.user.id,
+ 'select_user_id': request.query_params.get('select_user_id')}).page(
+ current_page, page_size))
+
+ class Import(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods="POST", detail=False)
+ @swagger_auto_schema(operation_summary=_("Import function"), operation_id=_("Import function"),
+ manual_parameters=FunctionLibApi.Import.get_request_params_api(),
+ tags=[_("function")]
+ )
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Import function")
+ def post(self, request: Request):
+ return result.success(FunctionLibSerializer.Import(
+ data={'user_id': request.user.id, 'file': request.FILES.get('file')}).import_())
+
+ class Export(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods="GET", detail=False)
+ @swagger_auto_schema(operation_summary=_("Export function"), operation_id=_("Export function"),
+ manual_parameters=FunctionLibApi.Export.get_request_params_api(),
+ tags=[_("function")]
+ )
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Export function",
+ get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('id')))
+ def get(self, request: Request, id: str):
+ return FunctionLibSerializer.Operate(
+ data={'id': id, 'user_id': request.user.id}).export()
+
+ class EditIcon(APIView):
+ authentication_classes = [TokenAuth]
+ parser_classes = [MultiPartParser]
+
+ @action(methods=['PUT'], detail=False)
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Edit icon",
+ get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('id')))
+ def put(self, request: Request, id: str):
+ return result.success(
+ FunctionLibSerializer.IconOperate(
+ data={'id': id, 'user_id': request.user.id,
+ 'image': request.FILES.get('file')}).edit(request.data))
+
+ class AddInternalFun(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ @log(menu='Function', operate="Add internal function",
+ get_operation_object=lambda r, k: get_function_lib_operation_object(k.get('id')))
+ def post(self, request: Request, id: str):
+ return result.success(
+ FunctionLibSerializer.InternalFunction(
+ data={'id': id, 'user_id': request.user.id, 'name': request.data.get('name')})
+ .add())
diff --git a/apps/function_lib/views/py_lint.py b/apps/function_lib/views/py_lint.py
new file mode 100644
index 00000000000..a0bee2a4c02
--- /dev/null
+++ b/apps/function_lib/views/py_lint.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: py_lint.py
+ @date:2024/9/30 15:35
+ @desc:
+"""
+from drf_yasg.utils import swagger_auto_schema
+from rest_framework.decorators import action
+from rest_framework.request import Request
+from rest_framework.views import APIView
+
+from common.auth import TokenAuth, has_permissions
+from common.constants.permission_constants import RoleConstants
+from common.response import result
+from function_lib.serializers.py_lint_serializer import PyLintSerializer
+from function_lib.swagger_api.py_lint_api import PyLintApi
+from django.utils.translation import gettext_lazy as _
+
+
+class PyLintView(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Check code'),
+ operation_id=_('Check code'),
+ request_body=PyLintApi.get_request_body_api(),
+ responses=result.get_api_response(PyLintApi.get_request_body_api()),
+ tags=[_('Function')])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ def post(self, request: Request):
+ return result.success(PyLintSerializer(data={'user_id': request.user.id}).pylint(request.data))
diff --git a/apps/locales/en_US/LC_MESSAGES/django.po b/apps/locales/en_US/LC_MESSAGES/django.po
new file mode 100644
index 00000000000..e068ff410a0
--- /dev/null
+++ b/apps/locales/en_US/LC_MESSAGES/django.po
@@ -0,0 +1,7502 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR , YEAR.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2025-03-20 14:22+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language-Team: LANGUAGE \n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: apps/xpack/auth/user_key.py:26
+#: apps/xpack/serializers/license_serializers.py:96
+#: apps/xpack/serializers/license_tools.py:109
+msgid "The license is invalid"
+msgstr ""
+
+#: apps/xpack/auth/user_key.py:32 apps/xpack/auth/user_key.py:34
+msgid "secret_key is invalid"
+msgstr "Secret key is invalid"
+
+#: apps/xpack/middleware/swagger_middleware.py:19
+msgid "The license has not been uploaded or the license has expired"
+msgstr ""
+
+#: apps/xpack/serializers/application_setting_serializer.py:20
+msgid "theme color"
+msgstr "Theme color"
+
+#: apps/xpack/serializers/application_setting_serializer.py:22
+msgid "header font color"
+msgstr "Header font color"
+
+#: apps/xpack/serializers/application_setting_serializer.py:26
+msgid "float location type"
+msgstr "Float location type"
+
+#: apps/xpack/serializers/application_setting_serializer.py:27
+msgid "float location value"
+msgstr "Float location value"
+
+#: apps/xpack/serializers/application_setting_serializer.py:31
+msgid "float location x"
+msgstr "Float location x"
+
+#: apps/xpack/serializers/application_setting_serializer.py:32
+msgid "float location y"
+msgstr "Float location y"
+
+#: apps/xpack/serializers/application_setting_serializer.py:36
+#: apps/xpack/swagger_api/application_setting_api.py:23
+msgid "show source"
+msgstr "Show Source"
+
+#: apps/xpack/serializers/application_setting_serializer.py:37
+#: community/apps/application/serializers/application_serializers.py:354
+#: community/apps/application/swagger_api/application_api.py:169
+#: community/apps/application/swagger_api/application_api.py:170
+#: community/apps/users/serializers/user_serializers.py:273
+#: community/apps/users/views/user.py:85 community/apps/users/views/user.py:86
+msgid "language"
+msgstr "Language"
+
+#: apps/xpack/serializers/application_setting_serializer.py:38
+#: apps/xpack/swagger_api/application_setting_api.py:30
+msgid "show history"
+msgstr "Show History"
+
+#: apps/xpack/serializers/application_setting_serializer.py:39
+#: apps/xpack/swagger_api/application_setting_api.py:37
+msgid "draggable"
+msgstr "Draggable"
+
+#: apps/xpack/serializers/application_setting_serializer.py:40
+#: apps/xpack/swagger_api/application_setting_api.py:44
+msgid "show guide"
+msgstr "Show Guide"
+
+#: apps/xpack/serializers/application_setting_serializer.py:41
+#: apps/xpack/swagger_api/application_setting_api.py:51
+msgid "avatar"
+msgstr "Avatar"
+
+#: apps/xpack/serializers/application_setting_serializer.py:42
+msgid "avatar url"
+msgstr "Avatar URL"
+
+#: apps/xpack/serializers/application_setting_serializer.py:43
+#: apps/xpack/swagger_api/application_setting_api.py:86
+msgid "user avatar"
+msgstr "User avatar"
+
+#: apps/xpack/serializers/application_setting_serializer.py:44
+msgid "user avatar url"
+msgstr "User avatar URL"
+
+#: apps/xpack/serializers/application_setting_serializer.py:45
+#: apps/xpack/swagger_api/application_setting_api.py:58
+msgid "float icon"
+msgstr "Float icon"
+
+#: apps/xpack/serializers/application_setting_serializer.py:46
+msgid "float icon url"
+msgstr "Float icon URL"
+
+#: apps/xpack/serializers/application_setting_serializer.py:47
+#: apps/xpack/swagger_api/application_setting_api.py:65
+msgid "disclaimer"
+msgstr "Disclaimer"
+
+#: apps/xpack/serializers/application_setting_serializer.py:48
+#: apps/xpack/swagger_api/application_setting_api.py:72
+msgid "disclaimer value"
+msgstr "Disclaimer value"
+
+#: apps/xpack/serializers/application_setting_serializer.py:70
+#: apps/xpack/serializers/dataset_lark_serializer.py:373
+#: community/apps/dataset/serializers/dataset_serializers.py:548
+msgid "application id"
+msgstr "Application ID"
+
+#: apps/xpack/serializers/application_setting_serializer.py:96
+#: apps/xpack/serializers/platform_serializer.py:83
+#: apps/xpack/serializers/platform_serializer.py:105
+#: apps/xpack/serializers/platform_serializer.py:174
+#: apps/xpack/serializers/platform_serializer.py:185
+#: community/apps/application/serializers/application_serializers.py:1237
+#: community/apps/application/serializers/chat_message_serializers.py:424
+#: community/apps/application/serializers/chat_serializers.py:294
+#: community/apps/application/serializers/chat_serializers.py:396
+msgid "Application does not exist"
+msgstr ""
+
+#: apps/xpack/serializers/application_setting_serializer.py:116
+msgid "Float location field type error"
+msgstr ""
+
+#: apps/xpack/serializers/application_setting_serializer.py:122
+msgid "Custom theme field type error"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:19
+msgid "LDAP server cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:20
+msgid "Base DN cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:21
+msgid "Password cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:22
+msgid "OU cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:23
+msgid "LDAP filter cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:24
+msgid "LDAP mapping cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:29
+msgid "Authorization address cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:31
+msgid "Token address cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:33
+msgid "User information address cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:34
+msgid "Scope cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:35
+msgid "Client ID cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:36
+msgid "Client secret cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:38
+msgid "Redirect address cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:39
+msgid "Field mapping cannot be empty"
+msgstr ""
+
+#: apps/xpack/serializers/auth_config_serializer.py:166
+#: apps/xpack/serializers/qr_login/qr_login.py:33
+#: community/apps/users/serializers/user_serializers.py:89
+msgid "The user has been disabled, please contact the administrator!"
+msgstr ""
+
+#: apps/xpack/serializers/cas.py:32
+msgid "HttpClient query failed: "
+msgstr ""
+
+#: apps/xpack/serializers/cas.py:56
+msgid "CAS authentication failed"
+msgstr ""
+
+#: apps/xpack/serializers/channel/chat_manage.py:76
+#: apps/xpack/serializers/channel/chat_manage.py:134
+msgid ""
+"Sorry, no relevant content was found. Please re-describe your problem or "
+"provide more information. "
+msgstr ""
+
+#: apps/xpack/serializers/channel/chat_manage.py:82
+msgid "Think: "
+msgstr ""
+
+#: apps/xpack/serializers/channel/chat_manage.py:85
+#: apps/xpack/serializers/channel/chat_manage.py:87
+msgid "AI reply: "
+msgstr ""
+
+#: apps/xpack/serializers/channel/chat_manage.py:298
+msgid "Thinking, please wait a moment!"
+msgstr "Thinking, please wait a moment......"
+
+#: apps/xpack/serializers/channel/ding_talk.py:19
+#: apps/xpack/serializers/channel/wechat.py:89
+#: apps/xpack/serializers/channel/wechat.py:130
+#: apps/xpack/serializers/channel/wecom.py:76
+#: apps/xpack/serializers/channel/wecom.py:259
+msgid "The corresponding platform configuration was not found"
+msgstr ""
+
+#: apps/xpack/serializers/channel/ding_talk.py:27
+#: apps/xpack/serializers/channel/feishu.py:112
+msgid "Currently only text messages are supported"
+msgstr ""
+
+#: apps/xpack/serializers/channel/ding_talk.py:91
+#: apps/xpack/serializers/channel/wechat.py:161
+#: apps/xpack/serializers/channel/wecom.py:189
+msgid "Image download failed, check network"
+msgstr ""
+
+#: apps/xpack/serializers/channel/ding_talk.py:92
+#: apps/xpack/serializers/channel/wechat.py:159
+#: apps/xpack/serializers/channel/wecom.py:185
+msgid "Please analyze the content of the image."
+msgstr ""
+
+#: apps/xpack/serializers/channel/ding_talk.py:95
+#, python-brace-format
+msgid "DingTalk application: {user}"
+msgstr ""
+
+#: apps/xpack/serializers/channel/ding_talk.py:106
+#: apps/xpack/serializers/channel/ding_talk.py:151
+msgid "Content generated by AI"
+msgstr ""
+
+#: apps/xpack/serializers/channel/feishu.py:87
+#: apps/xpack/serializers/channel/feishu.py:107
+msgid "Lark application: "
+msgstr ""
+
+#: apps/xpack/serializers/channel/slack.py:116
+msgid "The corresponding platform configuration for Slack was not found"
+msgstr ""
+
+#: apps/xpack/serializers/channel/slack.py:206
+msgid "Thinking..."
+msgstr ""
+
+#: apps/xpack/serializers/channel/slack.py:321
+msgid "Invalid json format."
+msgstr ""
+
+#: apps/xpack/serializers/channel/slack.py:327
+msgid "Invalid Slack request"
+msgstr ""
+
+#: apps/xpack/serializers/channel/slack.py:335
+#| msgid "application name"
+msgid "Slack application: {user}"
+msgstr ""
+
+#: apps/xpack/serializers/channel/slack.py:471
+msgid "Stop"
+msgstr ""
+
+#: apps/xpack/serializers/channel/wechat.py:141
+#, python-brace-format
+msgid "WeChat Official Account: {account}"
+msgstr ""
+
+#: apps/xpack/serializers/channel/wechat.py:148
+#: apps/xpack/serializers/channel/wecom.py:171
+#: apps/xpack/serializers/channel/wecom.py:175
+msgid ""
+"The app does not enable the speech-to-text function or the speech-to-text "
+"function fails."
+msgstr ""
+
+#: apps/xpack/serializers/channel/wechat.py:187
+msgid "Message types not supported yet"
+msgstr ""
+
+#: apps/xpack/serializers/channel/wechat.py:194
+msgid "Welcome to subscribe"
+msgstr ""
+
+#: apps/xpack/serializers/channel/wecom.py:84
+msgid "Enterprise WeChat user: "
+msgstr ""
+
+#: apps/xpack/serializers/channel/wecom.py:95
+msgid "Enterprise WeChat customer service: "
+msgstr ""
+
+#: apps/xpack/serializers/channel/wecom.py:132
+#: apps/xpack/serializers/channel/wecom.py:148
+msgid "This type of message is not supported yet"
+msgstr ""
+
+#: apps/xpack/serializers/channel/wecom.py:254
+msgid "Signature missing"
+msgstr ""
+
+#: apps/xpack/serializers/channel/wecom.py:266
+#: apps/xpack/serializers/channel/wecom.py:273
+#, python-brace-format
+msgid "An error occurred while processing the GET request {e}"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:58
+#: community/apps/dataset/serializers/dataset_serializers.py:82
+#: community/apps/dataset/serializers/dataset_serializers.py:214
+#: community/apps/dataset/serializers/dataset_serializers.py:295
+#: community/apps/dataset/serializers/dataset_serializers.py:296
+#: community/apps/dataset/serializers/dataset_serializers.py:357
+#: community/apps/dataset/serializers/dataset_serializers.py:358
+#: community/apps/dataset/serializers/dataset_serializers.py:502
+#: community/apps/dataset/serializers/dataset_serializers.py:503
+#: community/apps/dataset/serializers/dataset_serializers.py:568
+#: community/apps/dataset/serializers/dataset_serializers.py:607
+#: community/apps/dataset/serializers/dataset_serializers.py:701
+#: community/apps/dataset/serializers/dataset_serializers.py:933
+#: community/apps/dataset/serializers/dataset_serializers.py:934
+#: community/apps/dataset/serializers/document_serializers.py:816
+#: community/apps/function_lib/serializers/function_lib_serializer.py:141
+#: community/apps/function_lib/serializers/function_lib_serializer.py:186
+#: community/apps/function_lib/serializers/function_lib_serializer.py:203
+#: community/apps/function_lib/serializers/function_lib_serializer.py:262
+#: community/apps/setting/serializers/provider_serializers.py:76
+#: community/apps/setting/serializers/provider_serializers.py:127
+#: community/apps/setting/serializers/provider_serializers.py:174
+#: community/apps/setting/serializers/provider_serializers.py:256
+#: community/apps/setting/serializers/provider_serializers.py:277
+#: community/apps/setting/serializers/provider_serializers.py:301
+#: community/apps/setting/serializers/team_serializers.py:42
+#: community/apps/users/serializers/user_serializers.py:272
+msgid "user id"
+msgstr "User ID"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:61
+#: apps/xpack/serializers/dataset_lark_serializer.py:112
+#: apps/xpack/serializers/dataset_lark_serializer.py:113
+#: apps/xpack/serializers/dataset_lark_serializer.py:367
+#: community/apps/dataset/serializers/dataset_serializers.py:137
+#: community/apps/dataset/serializers/dataset_serializers.py:201
+#: community/apps/dataset/serializers/dataset_serializers.py:221
+#: community/apps/dataset/serializers/dataset_serializers.py:244
+#: community/apps/dataset/serializers/dataset_serializers.py:273
+#: community/apps/dataset/serializers/dataset_serializers.py:274
+#: community/apps/dataset/serializers/dataset_serializers.py:291
+#: community/apps/dataset/serializers/dataset_serializers.py:292
+#: community/apps/dataset/serializers/dataset_serializers.py:319
+#: community/apps/dataset/serializers/dataset_serializers.py:353
+#: community/apps/dataset/serializers/dataset_serializers.py:354
+#: community/apps/dataset/serializers/dataset_serializers.py:382
+#: community/apps/dataset/serializers/dataset_serializers.py:383
+#: community/apps/dataset/serializers/dataset_serializers.py:498
+#: community/apps/dataset/serializers/dataset_serializers.py:499
+#: community/apps/dataset/serializers/dataset_serializers.py:527
+#: community/apps/dataset/serializers/dataset_serializers.py:528
+#: community/apps/dataset/serializers/dataset_serializers.py:542
+#: community/apps/dataset/serializers/dataset_serializers.py:907
+#: community/apps/dataset/serializers/dataset_serializers.py:908
+#: community/apps/dataset/serializers/dataset_serializers.py:929
+#: community/apps/dataset/serializers/dataset_serializers.py:930
+msgid "dataset name"
+msgstr "Knowledge Base Name"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:63
+#: apps/xpack/serializers/dataset_lark_serializer.py:114
+#: apps/xpack/serializers/dataset_lark_serializer.py:115
+#: apps/xpack/serializers/dataset_lark_serializer.py:369
+#: community/apps/dataset/serializers/dataset_serializers.py:142
+#: community/apps/dataset/serializers/dataset_serializers.py:206
+#: community/apps/dataset/serializers/dataset_serializers.py:226
+#: community/apps/dataset/serializers/dataset_serializers.py:249
+#: community/apps/dataset/serializers/dataset_serializers.py:278
+#: community/apps/dataset/serializers/dataset_serializers.py:279
+#: community/apps/dataset/serializers/dataset_serializers.py:293
+#: community/apps/dataset/serializers/dataset_serializers.py:294
+#: community/apps/dataset/serializers/dataset_serializers.py:324
+#: community/apps/dataset/serializers/dataset_serializers.py:355
+#: community/apps/dataset/serializers/dataset_serializers.py:356
+#: community/apps/dataset/serializers/dataset_serializers.py:384
+#: community/apps/dataset/serializers/dataset_serializers.py:385
+#: community/apps/dataset/serializers/dataset_serializers.py:500
+#: community/apps/dataset/serializers/dataset_serializers.py:501
+#: community/apps/dataset/serializers/dataset_serializers.py:529
+#: community/apps/dataset/serializers/dataset_serializers.py:530
+#: community/apps/dataset/serializers/dataset_serializers.py:544
+#: community/apps/dataset/serializers/dataset_serializers.py:909
+#: community/apps/dataset/serializers/dataset_serializers.py:910
+#: community/apps/dataset/serializers/dataset_serializers.py:931
+#: community/apps/dataset/serializers/dataset_serializers.py:932
+msgid "dataset description"
+msgstr "Knowledge Base Description"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:65
+#: apps/xpack/serializers/dataset_lark_serializer.py:118
+#: apps/xpack/serializers/dataset_lark_serializer.py:377
+msgid "app id"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:66
+#: apps/xpack/serializers/dataset_lark_serializer.py:119
+#: apps/xpack/serializers/dataset_lark_serializer.py:120
+#: apps/xpack/serializers/dataset_lark_serializer.py:378
+msgid "app secret"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:67
+#: apps/xpack/serializers/dataset_lark_serializer.py:121
+#: apps/xpack/serializers/dataset_lark_serializer.py:122
+#: apps/xpack/serializers/dataset_lark_serializer.py:132
+#: apps/xpack/serializers/dataset_lark_serializer.py:165
+#: apps/xpack/serializers/dataset_lark_serializer.py:379
+msgid "folder token"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:69
+#: apps/xpack/serializers/dataset_lark_serializer.py:116
+#: apps/xpack/serializers/dataset_lark_serializer.py:117
+#: community/apps/dataset/serializers/dataset_serializers.py:231
+#: community/apps/dataset/serializers/dataset_serializers.py:254
+#: community/apps/dataset/serializers/dataset_serializers.py:330
+#: community/apps/dataset/serializers/dataset_serializers.py:386
+#: community/apps/dataset/serializers/dataset_serializers.py:387
+#: community/apps/dataset/serializers/dataset_serializers.py:531
+#: community/apps/dataset/serializers/dataset_serializers.py:532
+msgid "embedding mode"
+msgstr "Embedding mode"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:79
+#: apps/xpack/serializers/dataset_lark_serializer.py:389
+msgid "Network error or folder token error!"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:87
+#: apps/xpack/serializers/dataset_lark_serializer.py:444
+#: community/apps/dataset/serializers/dataset_serializers.py:424
+#: community/apps/dataset/serializers/dataset_serializers.py:476
+#: community/apps/dataset/serializers/dataset_serializers.py:865
+msgid "Knowledge base name duplicate!"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:130
+#: apps/xpack/serializers/dataset_lark_serializer.py:164
+#: apps/xpack/serializers/dataset_lark_serializer.py:201
+#: apps/xpack/serializers/dataset_lark_serializer.py:221
+#: apps/xpack/serializers/dataset_lark_serializer.py:346
+#: apps/xpack/serializers/dataset_lark_serializer.py:363
+#: community/apps/common/swagger_api/common_api.py:68
+#: community/apps/common/swagger_api/common_api.py:69
+#: community/apps/dataset/serializers/dataset_serializers.py:84
+#: community/apps/dataset/serializers/dataset_serializers.py:93
+#: community/apps/dataset/serializers/dataset_serializers.py:605
+#: community/apps/dataset/serializers/dataset_serializers.py:688
+#: community/apps/dataset/serializers/dataset_serializers.py:699
+#: community/apps/dataset/serializers/dataset_serializers.py:955
+#: community/apps/dataset/serializers/document_serializers.py:169
+#: community/apps/dataset/serializers/document_serializers.py:286
+#: community/apps/dataset/serializers/document_serializers.py:407
+#: community/apps/dataset/serializers/document_serializers.py:573
+#: community/apps/dataset/serializers/document_serializers.py:1055
+#: community/apps/dataset/serializers/document_serializers.py:1216
+#: community/apps/dataset/serializers/paragraph_serializers.py:96
+#: community/apps/dataset/serializers/paragraph_serializers.py:162
+#: community/apps/dataset/serializers/paragraph_serializers.py:195
+#: community/apps/dataset/serializers/paragraph_serializers.py:196
+#: community/apps/dataset/serializers/paragraph_serializers.py:208
+#: community/apps/dataset/serializers/paragraph_serializers.py:266
+#: community/apps/dataset/serializers/paragraph_serializers.py:285
+#: community/apps/dataset/serializers/paragraph_serializers.py:302
+#: community/apps/dataset/serializers/paragraph_serializers.py:459
+#: community/apps/dataset/serializers/paragraph_serializers.py:567
+#: community/apps/dataset/serializers/paragraph_serializers.py:638
+#: community/apps/dataset/serializers/paragraph_serializers.py:647
+#: community/apps/dataset/serializers/paragraph_serializers.py:715
+#: community/apps/dataset/serializers/paragraph_serializers.py:716
+#: community/apps/dataset/serializers/paragraph_serializers.py:732
+#: community/apps/dataset/serializers/problem_serializers.py:87
+#: community/apps/dataset/serializers/problem_serializers.py:112
+#: community/apps/dataset/serializers/problem_serializers.py:135
+#: community/apps/dataset/serializers/problem_serializers.py:192
+#: community/apps/dataset/swagger_api/problem_api.py:28
+#: community/apps/dataset/swagger_api/problem_api.py:29
+#: community/apps/dataset/swagger_api/problem_api.py:77
+#: community/apps/dataset/swagger_api/problem_api.py:96
+#: community/apps/dataset/swagger_api/problem_api.py:149
+#: community/apps/dataset/swagger_api/problem_api.py:177
+msgid "dataset id"
+msgstr "Dataset ID"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:145
+#: apps/xpack/serializers/dataset_lark_serializer.py:146
+#: apps/xpack/serializers/dataset_lark_serializer.py:212
+#: community/apps/dataset/serializers/document_serializers.py:812
+#: community/apps/dataset/serializers/document_serializers.py:813
+#: community/apps/setting/swagger_api/provide_api.py:22
+#: community/apps/setting/swagger_api/provide_api.py:48
+#: community/apps/setting/swagger_api/provide_api.py:49
+#: community/apps/setting/swagger_api/provide_api.py:76
+#: community/apps/setting/swagger_api/provide_api.py:77
+#: community/apps/setting/swagger_api/provide_api.py:143
+#: community/apps/setting/swagger_api/provide_api.py:144
+msgid "name"
+msgstr "Name"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:147
+#: apps/xpack/serializers/dataset_lark_serializer.py:148
+#: apps/xpack/serializers/dataset_lark_serializer.py:211
+#: community/apps/application/serializers/application_serializers.py:257
+msgid "token"
+msgstr "Token"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:149
+#: apps/xpack/serializers/dataset_lark_serializer.py:150
+#: apps/xpack/serializers/dataset_lark_serializer.py:210
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:26
+#: community/apps/dataset/serializers/document_serializers.py:229
+#: community/apps/function_lib/serializers/function_lib_serializer.py:72
+#: community/apps/function_lib/swagger_api/function_lib_api.py:92
+#: community/apps/function_lib/swagger_api/function_lib_api.py:138
+#: community/apps/function_lib/swagger_api/function_lib_api.py:184
+#: community/apps/setting/serializers/team_serializers.py:59
+#: community/apps/setting/serializers/team_serializers.py:74
+#: community/apps/setting/serializers/team_serializers.py:85
+#: community/apps/setting/serializers/valid_serializers.py:37
+msgid "type"
+msgstr "Type"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:151
+#: apps/xpack/serializers/dataset_lark_serializer.py:152
+#| msgid "id does not exist"
+msgid "is exist"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:173
+#: apps/xpack/serializers/dataset_lark_serializer.py:230
+#: apps/xpack/task/sync.py:120
+#| msgid "Knowledge base id"
+msgid "Knowledge base not found!"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:185
+#: apps/xpack/serializers/dataset_lark_serializer.py:252
+msgid "Failed to get lark document list!"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:262
+#: community/apps/common/swagger_api/common_api.py:70
+#: community/apps/common/swagger_api/common_api.py:71
+#: community/apps/dataset/serializers/document_serializers.py:293
+#: community/apps/dataset/serializers/document_serializers.py:386
+#: community/apps/dataset/serializers/document_serializers.py:490
+#: community/apps/dataset/serializers/document_serializers.py:572
+#: community/apps/dataset/serializers/document_serializers.py:581
+#: community/apps/dataset/serializers/document_serializers.py:586
+#: community/apps/dataset/serializers/document_serializers.py:854
+#: community/apps/dataset/serializers/document_serializers.py:982
+#: community/apps/dataset/serializers/document_serializers.py:1191
+#: community/apps/dataset/serializers/paragraph_serializers.py:98
+#: community/apps/dataset/serializers/paragraph_serializers.py:167
+#: community/apps/dataset/serializers/paragraph_serializers.py:212
+#: community/apps/dataset/serializers/paragraph_serializers.py:271
+#: community/apps/dataset/serializers/paragraph_serializers.py:286
+#: community/apps/dataset/serializers/paragraph_serializers.py:303
+#: community/apps/dataset/serializers/paragraph_serializers.py:426
+#: community/apps/dataset/serializers/paragraph_serializers.py:431
+#: community/apps/dataset/serializers/paragraph_serializers.py:462
+#: community/apps/dataset/serializers/paragraph_serializers.py:570
+#: community/apps/dataset/serializers/paragraph_serializers.py:642
+#: community/apps/dataset/serializers/paragraph_serializers.py:650
+#: community/apps/dataset/serializers/paragraph_serializers.py:682
+#: community/apps/dataset/serializers/paragraph_serializers.py:717
+#: community/apps/dataset/serializers/paragraph_serializers.py:718
+#: community/apps/dataset/serializers/paragraph_serializers.py:733
+#: community/apps/dataset/serializers/problem_serializers.py:58
+#: community/apps/dataset/swagger_api/problem_api.py:64
+msgid "document id"
+msgstr "Document ID"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:269
+#: apps/xpack/serializers/dataset_lark_serializer.py:289
+#: community/apps/dataset/serializers/document_serializers.py:497
+#: community/apps/dataset/serializers/document_serializers.py:593
+#: community/apps/dataset/serializers/document_serializers.py:1197
+msgid "document id not exist"
+msgstr "Document ID does not exist"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:271
+msgid "Synchronization is only supported for lark documents"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:374
+#: community/apps/dataset/serializers/dataset_serializers.py:549
+#: community/apps/dataset/serializers/dataset_serializers.py:914
+#: community/apps/dataset/serializers/dataset_serializers.py:915
+msgid "application id list"
+msgstr "Application ID list"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:416
+#: community/apps/dataset/serializers/dataset_serializers.py:175
+#: community/apps/dataset/serializers/dataset_serializers.py:837
+#: community/apps/function_lib/serializers/function_lib_serializer.py:125
+#: community/apps/function_lib/swagger_api/function_lib_api.py:119
+#: community/apps/function_lib/swagger_api/function_lib_api.py:120
+#: community/apps/function_lib/swagger_api/function_lib_api.py:165
+#: community/apps/function_lib/swagger_api/function_lib_api.py:166
+#: community/apps/setting/swagger_api/provide_api.py:81
+msgid "permission"
+msgstr "Permission"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:463
+#: community/apps/dataset/serializers/dataset_serializers.py:884
+#, python-brace-format
+msgid "Unknown application id {dataset_id}, cannot be associated"
+msgstr ""
+
+#: apps/xpack/serializers/license_serializers.py:52
+msgid "license file"
+msgstr "License file"
+
+#: apps/xpack/serializers/license_tools.py:134
+msgid "License usage limit exceeded."
+msgstr ""
+
+#: apps/xpack/serializers/license_tools.py:158
+msgid "The network is busy, try again later."
+msgstr ""
+
+#: apps/xpack/serializers/oauth2.py:79 apps/xpack/serializers/oauth2.py:82
+msgid "Failed to obtain user information"
+msgstr ""
+
+#: apps/xpack/serializers/operate_log.py:36
+#: community/apps/application/serializers/application_statistics_serializers.py:27
+#: community/apps/application/serializers/chat_serializers.py:116
+#: community/apps/application/swagger_api/application_statistics_api.py:26
+msgid "Start time"
+msgstr "Start Time"
+
+#: apps/xpack/serializers/operate_log.py:37
+#: community/apps/application/serializers/application_statistics_serializers.py:28
+#: community/apps/application/serializers/chat_serializers.py:117
+#: community/apps/application/swagger_api/application_statistics_api.py:31
+#: community/apps/application/swagger_api/chat_api.py:270
+msgid "End time"
+msgstr "End Time"
+
+#: apps/xpack/serializers/operate_log.py:38
+#: apps/xpack/swagger_api/operate_log.py:17
+#: apps/xpack/swagger_api/operate_log.py:18
+#: apps/xpack/swagger_api/operate_log.py:45
+#: apps/xpack/swagger_api/operate_log.py:46
+msgid "menu"
+msgstr ""
+
+#: apps/xpack/serializers/operate_log.py:39
+#: apps/xpack/swagger_api/operate_log.py:20
+#: apps/xpack/swagger_api/operate_log.py:21
+#: apps/xpack/swagger_api/operate_log.py:48
+#: apps/xpack/swagger_api/operate_log.py:49
+msgid "operate"
+msgstr ""
+
+#: apps/xpack/serializers/operate_log.py:40
+#: apps/xpack/swagger_api/operate_log.py:51
+#: apps/xpack/swagger_api/operate_log.py:52
+#| msgid "user id"
+msgid "user"
+msgstr "User"
+
+#: apps/xpack/serializers/operate_log.py:41
+#: apps/xpack/swagger_api/operate_log.py:54
+#: apps/xpack/swagger_api/operate_log.py:55
+#: community/apps/dataset/serializers/document_serializers.py:417
+msgid "status"
+msgstr "Status"
+
+#: apps/xpack/serializers/operate_log.py:42
+#: apps/xpack/swagger_api/operate_log.py:57
+#: apps/xpack/swagger_api/operate_log.py:58
+msgid "ip_address"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:14
+msgid "app_id is required"
+msgstr "App ID is required"
+
+#: apps/xpack/serializers/platform_serializer.py:15
+msgid "app_secret is required"
+msgstr "App Secret is required"
+
+#: apps/xpack/serializers/platform_serializer.py:16
+msgid "token is required"
+msgstr "Token is required"
+
+#: apps/xpack/serializers/platform_serializer.py:17
+msgid "callback_url is required"
+msgstr "Callback URL is required"
+
+#: apps/xpack/serializers/platform_serializer.py:23
+#: apps/xpack/serializers/platform_serializer.py:32
+msgid "App ID is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:24
+#: apps/xpack/serializers/platform_source_serializer.py:24
+msgid "Agent ID is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:25
+msgid "Secret is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:26
+msgid "Token is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:28
+#: apps/xpack/serializers/platform_serializer.py:36
+#: apps/xpack/serializers/platform_serializer.py:42
+#: apps/xpack/serializers/platform_serializer.py:48
+#: apps/xpack/serializers/platform_source_serializer.py:19
+msgid "Callback URL is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:33
+#: apps/xpack/serializers/platform_source_serializer.py:18
+msgid "App Secret is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:35
+msgid "Verification Token is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:40
+msgid "Client ID is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:41
+msgid "Client Secret is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:46
+#| msgid "app_secret is required"
+msgid "Signing Secret is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:47
+#| msgid "token is required"
+msgid "Bot User Token is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:68
+msgid "Check if the fields are correct"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:114
+#: apps/xpack/views/platform.py:85 apps/xpack/views/platform.py:101
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:13
+#: community/apps/application/serializers/application_serializers.py:335
+#: community/apps/application/serializers/application_serializers.py:581
+#: community/apps/application/serializers/application_serializers.py:696
+#: community/apps/application/serializers/application_serializers.py:791
+#: community/apps/application/serializers/application_serializers.py:1230
+#: community/apps/application/serializers/application_serializers.py:1272
+#: community/apps/application/serializers/application_statistics_serializers.py:26
+#: community/apps/application/serializers/application_version_serializers.py:35
+#: community/apps/application/serializers/application_version_serializers.py:59
+#: community/apps/application/serializers/chat_message_serializers.py:207
+#: community/apps/application/serializers/chat_message_serializers.py:270
+#: community/apps/application/serializers/chat_serializers.py:77
+#: community/apps/application/serializers/chat_serializers.py:102
+#: community/apps/application/serializers/chat_serializers.py:119
+#: community/apps/application/serializers/chat_serializers.py:287
+#: community/apps/application/serializers/chat_serializers.py:363
+#: community/apps/application/serializers/chat_serializers.py:440
+#: community/apps/application/swagger_api/application_api.py:87
+#: community/apps/application/swagger_api/application_api.py:101
+#: community/apps/application/swagger_api/application_api.py:112
+#: community/apps/application/swagger_api/application_api.py:143
+#: community/apps/application/swagger_api/application_api.py:392
+#: community/apps/application/swagger_api/application_api.py:413
+#: community/apps/application/swagger_api/application_api.py:424
+#: community/apps/application/swagger_api/application_statistics_api.py:21
+#: community/apps/application/swagger_api/application_version_api.py:42
+#: community/apps/application/swagger_api/application_version_api.py:56
+#: community/apps/application/swagger_api/chat_api.py:23
+#: community/apps/application/swagger_api/chat_api.py:33
+#: community/apps/application/swagger_api/chat_api.py:167
+#: community/apps/application/swagger_api/chat_api.py:168
+#: community/apps/application/swagger_api/chat_api.py:199
+#: community/apps/application/swagger_api/chat_api.py:222
+#: community/apps/application/swagger_api/chat_api.py:249
+#: community/apps/application/swagger_api/chat_api.py:281
+#: community/apps/application/swagger_api/chat_api.py:350
+#: community/apps/application/swagger_api/chat_api.py:410
+#: community/apps/application/swagger_api/chat_api.py:427
+#: community/apps/application/swagger_api/chat_api.py:460
+#: community/apps/application/views/chat_views.py:477
+msgid "Application ID"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:116
+msgid "Platform type, for example: wechat"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:125
+#: apps/xpack/serializers/platform_serializer.py:126
+msgid "Platform type"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:128
+msgid "Status"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:138
+#: apps/xpack/serializers/platform_serializer.py:139
+msgid "Configuration information"
+msgstr ""
+
+#: apps/xpack/serializers/platform_serializer.py:191
+#, python-brace-format
+msgid "The platform configuration corresponding to {type} was not found"
+msgstr ""
+
+#: apps/xpack/serializers/platform_source_serializer.py:23
+#: apps/xpack/serializers/platform_source_serializer.py:32
+msgid "Corp ID is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_source_serializer.py:28
+#: apps/xpack/serializers/platform_source_serializer.py:33
+msgid "App Key is required"
+msgstr ""
+
+#: apps/xpack/serializers/platform_source_serializer.py:78
+msgid "Configuration information is wrong and failed to save"
+msgstr ""
+
+#: apps/xpack/serializers/platform_source_serializer.py:104
+msgid "Connection failed"
+msgstr ""
+
+#: apps/xpack/serializers/platform_source_serializer.py:123
+msgid "Platform does not exist"
+msgstr ""
+
+#: apps/xpack/serializers/platform_source_serializer.py:134
+msgid "Unsupported platform type"
+msgstr ""
+
+#: apps/xpack/serializers/qr_login/qr_login.py:28
+msgid "Team"
+msgstr "Team Member"
+
+#: apps/xpack/serializers/system_params_serializers.py:63
+msgid "theme"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:70
+msgid "website icon"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:77
+msgid "login logo"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:84
+msgid "Login background image"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:91
+msgid "website title"
+msgstr "Website title"
+
+#: apps/xpack/serializers/system_params_serializers.py:98
+msgid "website slogan"
+msgstr "Website slogan"
+
+#: apps/xpack/serializers/system_params_serializers.py:105
+msgid "Show user manual"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:112
+msgid "User manual address"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:119
+msgid "Show forum"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:126
+msgid "Forum address"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:133
+msgid "Show project"
+msgstr ""
+
+#: apps/xpack/serializers/system_params_serializers.py:140
+msgid "Project address"
+msgstr ""
+
+#: apps/xpack/serializers/tools.py:58
+#, python-brace-format
+msgid ""
+"Thinking about 【{question}】...If you want me to continue answering, please "
+"reply {trigger_message}"
+msgstr ""
+
+#: apps/xpack/serializers/tools.py:158
+msgid ""
+"\n"
+" ------------\n"
+"[To be continued, reply \"Continue to answer the question]"
+msgstr ""
+
+#: apps/xpack/serializers/tools.py:238
+#, python-brace-format
+msgid ""
+"To be continued, reply \"{trigger_message}\" to continue answering the "
+"question"
+msgstr ""
+
+#: apps/xpack/swagger_api/application_setting_api.py:79
+msgid "Custom theme {theme_color: , header_font_color: }"
+msgstr ""
+
+#: apps/xpack/swagger_api/application_setting_api.py:93
+msgid "Float location {top: 0, left: 0}"
+msgstr ""
+
+#: apps/xpack/swagger_api/application_setting_api.py:101
+#: apps/xpack/swagger_api/application_setting_api.py:102
+#: apps/xpack/swagger_api/auth_api.py:10 apps/xpack/swagger_api/auth_api.py:11
+#: apps/xpack/swagger_api/auth_api.py:81 apps/xpack/swagger_api/auth_api.py:82
+msgid "Authentication configuration"
+msgstr ""
+
+#: apps/xpack/swagger_api/application_setting_api.py:106
+#: apps/xpack/swagger_api/application_setting_api.py:107
+#: apps/xpack/swagger_api/auth_api.py:15 apps/xpack/swagger_api/auth_api.py:16
+#: apps/xpack/swagger_api/auth_api.py:30 apps/xpack/swagger_api/auth_api.py:87
+#: apps/xpack/swagger_api/auth_api.py:88 apps/xpack/views/auth.py:27
+#: apps/xpack/views/auth.py:28
+msgid "Authentication type"
+msgstr ""
+
+#: apps/xpack/swagger_api/application_setting_api.py:109
+#: apps/xpack/swagger_api/application_setting_api.py:110
+#: apps/xpack/swagger_api/auth_api.py:18 apps/xpack/swagger_api/auth_api.py:19
+#: apps/xpack/swagger_api/auth_api.py:93 apps/xpack/swagger_api/auth_api.py:94
+msgid "Configuration"
+msgstr ""
+
+#: apps/xpack/swagger_api/application_setting_api.py:112
+#: apps/xpack/swagger_api/application_setting_api.py:113
+#: apps/xpack/swagger_api/auth_api.py:21 apps/xpack/swagger_api/auth_api.py:22
+#: community/apps/common/swagger_api/common_api.py:72
+#: community/apps/common/swagger_api/common_api.py:73
+#: community/apps/dataset/serializers/document_serializers.py:819
+#: community/apps/dataset/serializers/document_serializers.py:820
+#: community/apps/dataset/serializers/document_serializers.py:838
+#: community/apps/dataset/serializers/document_serializers.py:839
+#: community/apps/dataset/serializers/paragraph_serializers.py:57
+#: community/apps/dataset/serializers/paragraph_serializers.py:71
+#: community/apps/dataset/serializers/paragraph_serializers.py:719
+#: community/apps/dataset/serializers/paragraph_serializers.py:720
+#: community/apps/dataset/swagger_api/problem_api.py:130
+#: community/apps/function_lib/serializers/function_lib_serializer.py:110
+#: community/apps/function_lib/serializers/function_lib_serializer.py:129
+#: community/apps/function_lib/serializers/function_lib_serializer.py:139
+#: community/apps/function_lib/swagger_api/function_lib_api.py:121
+#: community/apps/function_lib/swagger_api/function_lib_api.py:122
+#: community/apps/function_lib/swagger_api/function_lib_api.py:167
+#: community/apps/function_lib/swagger_api/function_lib_api.py:168
+#: community/apps/setting/serializers/team_serializers.py:46
+#: community/apps/users/serializers/user_serializers.py:473
+#: community/apps/users/serializers/user_serializers.py:496
+#: community/apps/users/serializers/user_serializers.py:584
+#: community/apps/users/serializers/user_serializers.py:585
+#: community/apps/users/serializers/user_serializers.py:721
+#: community/apps/users/serializers/user_serializers.py:737
+#: community/apps/users/serializers/user_serializers.py:738
+msgid "Is active"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:37
+#| msgid "parameter configuration"
+msgid "Wecom configuration"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:38
+#| msgid "parameter configuration"
+msgid "Wecom configuration details"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:40 apps/xpack/swagger_api/auth_api.py:53
+msgid "Corp ID"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:41
+msgid "Agent ID"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:42 apps/xpack/swagger_api/auth_api.py:55
+#: apps/xpack/swagger_api/auth_api.py:67
+msgid "App Secret"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:43 apps/xpack/swagger_api/auth_api.py:56
+#: apps/xpack/swagger_api/auth_api.py:68
+msgid "Callback URL"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:50
+#| msgid "parameter configuration"
+msgid "Dingtalk configuration"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:51
+msgid "Dingtalk configuration details"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:54 apps/xpack/swagger_api/auth_api.py:66
+msgid "App Key"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:63
+#| msgid "parameter configuration"
+msgid "Feishu configuration"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:64
+msgid "Feishu configuration details"
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:22
+msgid "license status"
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:24
+msgid ""
+"License status, possible values are: valid, invalid, expired, which "
+"respectively represent: valid, invalid, expired"
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:26
+msgid "license details"
+msgstr "License details"
+
+#: apps/xpack/swagger_api/license_api.py:30
+msgid "customer name"
+msgstr "Customer name"
+
+#: apps/xpack/swagger_api/license_api.py:31
+msgid "customer name. For example: *** company."
+msgstr "Customer name. For example: *** company."
+
+#: apps/xpack/swagger_api/license_api.py:33
+msgid "independent software vendor"
+msgstr "Independent software vendor"
+
+#: apps/xpack/swagger_api/license_api.py:35
+msgid ""
+"Independent Software Vendor. For example: *** Company, suitable for the "
+"embedded version of the product."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:37
+msgid "Authorization deadline."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:39
+msgid ""
+"Authorization deadline. For example: 2020-12-31, this license will expire on "
+"2021-01-01."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:41
+msgid "product name."
+msgstr "Product name"
+
+#: apps/xpack/swagger_api/license_api.py:43
+msgid "Product name. For example: JumpServer, CMP, etc."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:45
+msgid "product version."
+msgstr "Product version"
+
+#: apps/xpack/swagger_api/license_api.py:47
+msgid "Product version. For example: JumpServer 2.0, CMP 1.0, etc."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:49
+msgid "license version."
+msgstr "License version"
+
+#: apps/xpack/swagger_api/license_api.py:51
+msgid "License version. For example: 1.0, 2.0, etc."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:53
+msgid "authorization quantity."
+msgstr "Authorization quantity"
+
+#: apps/xpack/swagger_api/license_api.py:55
+msgid ""
+"Authorization quantity. For example: 100, this license can be used by 100 "
+"users."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:57
+msgid "Serial number, the unique identifier of the License."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:59
+msgid ""
+"Serial number, the unique identifier of the license. The customer support "
+"portal will save the serial number after generating the license. If the "
+"serial number is not recorded in the customer support portal, the license "
+"will be regarded as an unknown source."
+msgstr ""
+
+#: apps/xpack/swagger_api/license_api.py:61
+msgid "remarks"
+msgstr "Remarks"
+
+#: apps/xpack/swagger_api/license_api.py:63
+msgid ""
+"Remarks, record additional information, length limit is 50. For example, a "
+"customer purchases two identical JumpServer subscriptions and uses them in "
+"different computer rooms respectively. You can use this field to note the A "
+"computer room and B computer room to help distinguish the licenses."
+msgstr ""
+
+#: apps/xpack/swagger_api/operate_log.py:12
+#: apps/xpack/swagger_api/operate_log.py:13
+#: apps/xpack/swagger_api/operate_log.py:38
+#: apps/xpack/swagger_api/operate_log.py:39 apps/xpack/views/operate_log.py:24
+#: apps/xpack/views/operate_log.py:36
+msgid "Operate log"
+msgstr ""
+
+#: apps/xpack/swagger_api/operate_log.py:23
+#: apps/xpack/swagger_api/operate_log.py:24
+msgid "menu_label"
+msgstr ""
+
+#: apps/xpack/swagger_api/operate_log.py:26
+#: apps/xpack/swagger_api/operate_log.py:27
+msgid "operate_label"
+msgstr ""
+
+#: apps/xpack/swagger_api/operate_log.py:42
+#: apps/xpack/swagger_api/operate_log.py:43
+#: community/apps/dataset/serializers/dataset_serializers.py:104
+msgid "id"
+msgstr "ID"
+
+#: apps/xpack/swagger_api/operate_log.py:60
+#: apps/xpack/swagger_api/operate_log.py:61
+#| msgid "license details"
+msgid "details"
+msgstr "Details"
+
+#: apps/xpack/views/application_setting_views.py:22
+#: apps/xpack/views/application_setting_views.py:23
+#| msgid "Modification time"
+msgid "Modify Application Settings"
+msgstr "Modify Application Display Settings"
+
+#: apps/xpack/views/application_setting_views.py:24
+#: apps/xpack/views/application_setting_views.py:40
+msgid "Pro/Application/Public Access"
+msgstr ""
+
+#: apps/xpack/views/application_setting_views.py:37
+#: apps/xpack/views/application_setting_views.py:38
+#| msgid "Application version id"
+msgid "Get Application Settings"
+msgstr ""
+
+#: apps/xpack/views/auth.py:29
+msgid "Authentication"
+msgstr ""
+
+#: apps/xpack/views/auth.py:40 apps/xpack/views/auth.py:41
+msgid "Add or modify authentication configuration"
+msgstr ""
+
+#: apps/xpack/views/auth.py:44 apps/xpack/views/auth.py:58
+#: apps/xpack/views/auth.py:72
+msgid "System settings/login authentication"
+msgstr ""
+
+#: apps/xpack/views/auth.py:55 apps/xpack/views/auth.py:56
+msgid "Get authentication configuration"
+msgstr ""
+
+#: apps/xpack/views/auth.py:69 apps/xpack/views/auth.py:70
+msgid "test connection"
+msgstr "Test connection"
+
+#: apps/xpack/views/auth.py:96 apps/xpack/views/auth.py:97
+#: community/apps/users/views/user.py:173
+#: community/apps/users/views/user.py:174
+msgid "Log in"
+msgstr ""
+
+#: apps/xpack/views/auth.py:101 apps/xpack/views/auth.py:114
+#: apps/xpack/views/auth.py:130 apps/xpack/views/auth.py:146
+#: apps/xpack/views/auth.py:207 apps/xpack/views/auth.py:224
+#: apps/xpack/views/auth.py:242 apps/xpack/views/auth.py:260
+#: apps/xpack/views/auth.py:278 apps/xpack/views/auth.py:296
+msgid "Three-party login"
+msgstr ""
+
+#: apps/xpack/views/auth.py:111 apps/xpack/views/auth.py:112
+msgid "CAS login"
+msgstr ""
+
+#: apps/xpack/views/auth.py:127 apps/xpack/views/auth.py:128
+msgid "OIDC login"
+msgstr ""
+
+#: apps/xpack/views/auth.py:143 apps/xpack/views/auth.py:144
+msgid "OAuth2 login"
+msgstr ""
+
+#: apps/xpack/views/auth.py:160 apps/xpack/views/auth.py:161
+#: apps/xpack/views/auth.py:162 apps/xpack/views/auth.py:170
+#: apps/xpack/views/auth.py:194 apps/xpack/views/auth.py:195
+#: apps/xpack/views/auth.py:196
+msgid "Get platform information"
+msgstr ""
+
+#: apps/xpack/views/auth.py:167 apps/xpack/views/auth.py:168
+msgid "Modify platform information"
+msgstr ""
+
+#: apps/xpack/views/auth.py:175 apps/xpack/views/auth.py:176
+#: apps/xpack/views/auth.py:178
+msgid "Test platform connection"
+msgstr ""
+
+#: apps/xpack/views/auth.py:185 apps/xpack/views/auth.py:186
+msgid "Scan code login type"
+msgstr ""
+
+#: apps/xpack/views/auth.py:187
+msgid "Scan code to log in"
+msgstr ""
+
+#: apps/xpack/views/auth.py:204 apps/xpack/views/auth.py:205
+msgid "DingTalk callback"
+msgstr ""
+
+#: apps/xpack/views/auth.py:221 apps/xpack/views/auth.py:222
+msgid "DingTalk OAuth2 callback"
+msgstr ""
+
+#: apps/xpack/views/auth.py:239 apps/xpack/views/auth.py:240
+msgid "Lark callback"
+msgstr ""
+
+#: apps/xpack/views/auth.py:257 apps/xpack/views/auth.py:258
+msgid "Lark OAuth2 callback"
+msgstr ""
+
+#: apps/xpack/views/auth.py:275 apps/xpack/views/auth.py:276
+msgid "Wecom callback"
+msgstr ""
+
+#: apps/xpack/views/auth.py:293 apps/xpack/views/auth.py:294
+msgid "Wecom OAuth2 callback"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:22
+#: apps/xpack/views/dataset_lark_views.py:23
+msgid "Create a lark knowledge base"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:26
+#: apps/xpack/views/dataset_lark_views.py:40
+#: community/apps/dataset/views/dataset.py:39
+#: community/apps/dataset/views/dataset.py:62
+#: community/apps/dataset/views/dataset.py:82
+#: community/apps/dataset/views/dataset.py:98
+#: community/apps/dataset/views/dataset.py:109
+#: community/apps/dataset/views/dataset.py:123
+#: community/apps/dataset/views/dataset.py:137
+#: community/apps/dataset/views/dataset.py:157
+#: community/apps/dataset/views/dataset.py:172
+#: community/apps/dataset/views/dataset.py:187
+#: community/apps/dataset/views/dataset.py:202
+#: community/apps/dataset/views/dataset.py:217
+#: community/apps/dataset/views/dataset.py:231
+#: community/apps/dataset/views/dataset.py:250
+msgid "Knowledge Base"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:36
+#: apps/xpack/views/dataset_lark_views.py:37
+msgid "Update the lark knowledge base"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:53
+#: apps/xpack/views/dataset_lark_views.py:54
+msgid "Get the list of documents in the lark knowledge base"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:57
+#: apps/xpack/views/dataset_lark_views.py:74
+#: apps/xpack/views/dataset_lark_views.py:90
+#: apps/xpack/views/dataset_lark_views.py:110
+#: community/apps/dataset/views/document.py:34
+#: community/apps/dataset/views/document.py:47
+#: community/apps/dataset/views/document.py:62
+#: community/apps/dataset/views/document.py:81
+#: community/apps/dataset/views/document.py:102
+#: community/apps/dataset/views/document.py:123
+#: community/apps/dataset/views/document.py:137
+#: community/apps/dataset/views/document.py:158
+#: community/apps/dataset/views/document.py:178
+#: community/apps/dataset/views/document.py:193
+#: community/apps/dataset/views/document.py:208
+#: community/apps/dataset/views/document.py:224
+#: community/apps/dataset/views/document.py:244
+#: community/apps/dataset/views/document.py:265
+#: community/apps/dataset/views/document.py:284
+#: community/apps/dataset/views/document.py:306
+#: community/apps/dataset/views/document.py:324
+#: community/apps/dataset/views/document.py:349
+#: community/apps/dataset/views/document.py:364
+#: community/apps/dataset/views/document.py:380
+#: community/apps/dataset/views/document.py:396
+#: community/apps/dataset/views/document.py:413
+#: community/apps/dataset/views/document.py:429
+#: community/apps/dataset/views/document.py:442
+#: community/apps/dataset/views/document.py:467
+msgid "Knowledge Base/Documentation"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:70
+#: apps/xpack/views/dataset_lark_views.py:71
+msgid "Import documents to the lark knowledge base"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:86
+#: apps/xpack/views/dataset_lark_views.py:87
+msgid "Synchronize lark document"
+msgstr ""
+
+#: apps/xpack/views/dataset_lark_views.py:104
+#: apps/xpack/views/dataset_lark_views.py:105
+msgid "Batch sync lark documents"
+msgstr ""
+
+#: apps/xpack/views/display.py:17 apps/xpack/views/display.py:18
+msgid "View appearance settings"
+msgstr ""
+
+#: apps/xpack/views/display.py:19 apps/xpack/views/display.py:33
+msgid "System Settings/Appearance Settings"
+msgstr ""
+
+#: apps/xpack/views/display.py:30 apps/xpack/views/display.py:31
+msgid "Update appearance settings"
+msgstr ""
+
+#: apps/xpack/views/license.py:29 apps/xpack/views/license.py:30
+msgid "Get license information"
+msgstr ""
+
+#: apps/xpack/views/license.py:38 apps/xpack/views/license.py:39
+msgid "Update license information"
+msgstr ""
+
+#: apps/xpack/views/license.py:44
+msgid "upload file"
+msgstr "Upload file"
+
+#: apps/xpack/views/operate_log.py:21 apps/xpack/views/operate_log.py:22
+msgid "Get menu operate log"
+msgstr ""
+
+#: apps/xpack/views/operate_log.py:33 apps/xpack/views/operate_log.py:34
+msgid "Get operate log"
+msgstr ""
+
+#: apps/xpack/views/platform.py:56 apps/xpack/views/platform.py:57
+msgid "Get platform configuration"
+msgstr ""
+
+#: apps/xpack/views/platform.py:59 apps/xpack/views/platform.py:67
+msgid "Application/application access"
+msgstr ""
+
+#: apps/xpack/views/platform.py:63 apps/xpack/views/platform.py:64
+msgid "Update platform configuration"
+msgstr ""
+
+#: apps/xpack/views/platform.py:80 apps/xpack/views/platform.py:81
+msgid "Get platform status"
+msgstr ""
+
+#: apps/xpack/views/platform.py:86
+msgid "Application/Get platform status"
+msgstr ""
+
+#: apps/xpack/views/platform.py:96 apps/xpack/views/platform.py:97
+msgid "Update platform status"
+msgstr ""
+
+#: apps/xpack/views/platform.py:103
+msgid "Application/Update platform status"
+msgstr ""
+
+#: apps/xpack/views/system_api_key_views.py:28
+#: apps/xpack/views/system_api_key_views.py:29
+msgid "Get personal system API_KEY list"
+msgstr ""
+
+#: apps/xpack/views/system_api_key_views.py:30
+#: apps/xpack/views/system_api_key_views.py:39
+#: apps/xpack/views/system_api_key_views.py:53
+#: apps/xpack/views/system_api_key_views.py:62
+msgid "Personal system/API_KEY"
+msgstr ""
+
+#: apps/xpack/views/system_api_key_views.py:37
+#: apps/xpack/views/system_api_key_views.py:38
+msgid "Update personal system API_KEY"
+msgstr ""
+
+#: apps/xpack/views/system_api_key_views.py:51
+#: apps/xpack/views/system_api_key_views.py:52
+msgid "Delete personal system API_KEY"
+msgstr ""
+
+#: apps/xpack/views/system_api_key_views.py:60
+#: apps/xpack/views/system_api_key_views.py:61
+msgid "Add personal system API_KEY"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:27
+msgid "Model type error"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:37
+#: community/apps/common/field/common.py:21
+#: community/apps/common/field/common.py:34
+msgid "Message type error"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:56
+msgid "Conversation list"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:57
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:30
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:19
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:13
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:13
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:19
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:13
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:13
+#: community/apps/application/serializers/application_serializers.py:72
+#: community/apps/application/serializers/chat_serializers.py:365
+#: community/apps/application/swagger_api/application_api.py:53
+#: community/apps/application/swagger_api/application_api.py:185
+#: community/apps/application/swagger_api/application_api.py:186
+#: community/apps/application/swagger_api/application_api.py:334
+#: community/apps/application/swagger_api/application_api.py:335
+msgid "Model id"
+msgstr "Model ID"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:59
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:30
+msgid "Paragraph List"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:61
+#: community/apps/application/serializers/chat_message_serializers.py:201
+#: community/apps/application/serializers/chat_message_serializers.py:253
+#: community/apps/application/serializers/chat_serializers.py:76
+#: community/apps/application/serializers/chat_serializers.py:240
+#: community/apps/application/serializers/chat_serializers.py:439
+#: community/apps/application/serializers/chat_serializers.py:531
+#: community/apps/application/serializers/chat_serializers.py:587
+#: community/apps/application/serializers/chat_serializers.py:613
+#: community/apps/application/serializers/chat_serializers.py:672
+#: community/apps/application/serializers/chat_serializers.py:712
+#: community/apps/application/swagger_api/chat_api.py:38
+#: community/apps/application/swagger_api/chat_api.py:76
+#: community/apps/application/swagger_api/chat_api.py:171
+#: community/apps/application/swagger_api/chat_api.py:172
+#: community/apps/application/swagger_api/chat_api.py:286
+#: community/apps/application/swagger_api/chat_api.py:355
+#: community/apps/application/swagger_api/chat_api.py:432
+#: community/apps/application/swagger_api/chat_api.py:465
+#: community/apps/application/views/chat_views.py:482
+msgid "Conversation ID"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:63
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:15
+#: community/apps/application/serializers/chat_message_serializers.py:254
+#: community/apps/application/serializers/chat_serializers.py:240
+msgid "User Questions"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:66
+msgid "Post-processor"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:69
+msgid "Completion Question"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:71
+#: community/apps/application/serializers/chat_message_serializers.py:203
+msgid "Streaming Output"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:72
+#: community/apps/application/serializers/chat_message_serializers.py:208
+#: community/apps/application/serializers/chat_message_serializers.py:271
+#: community/apps/application/serializers/chat_serializers.py:103
+msgid "Client id"
+msgstr "Client ID"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:73
+#: community/apps/application/serializers/chat_message_serializers.py:209
+#: community/apps/application/serializers/chat_message_serializers.py:272
+msgid "Client Type"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:76
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:46
+#: community/apps/application/swagger_api/application_api.py:262
+msgid "No reference segment settings"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:78
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:31
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:48
+#: community/apps/application/serializers/application_serializers.py:70
+#: community/apps/application/serializers/application_serializers.py:511
+#: community/apps/application/serializers/application_serializers.py:582
+#: community/apps/application/serializers/application_serializers.py:627
+#: community/apps/application/serializers/application_serializers.py:697
+#: community/apps/application/serializers/application_serializers.py:718
+#: community/apps/application/serializers/application_serializers.py:792
+#: community/apps/application/serializers/application_serializers.py:1228
+#: community/apps/application/serializers/chat_serializers.py:118
+#: community/apps/application/serializers/chat_serializers.py:285
+#: community/apps/application/serializers/chat_serializers.py:338
+#: community/apps/application/serializers/chat_serializers.py:360
+#: community/apps/function_lib/serializers/function_lib_serializer.py:332
+#: community/apps/function_lib/serializers/function_lib_serializer.py:358
+#: community/apps/function_lib/serializers/function_lib_serializer.py:387
+msgid "User ID"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:81
+#| msgid "Model id"
+msgid "Model settings"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:84
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:31
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:29
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:27
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:27
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:19
+#: community/apps/application/serializers/chat_serializers.py:382
+msgid "Model parameter settings"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:91
+msgid "message type error"
+msgstr "Message type error"
+
+#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:226
+#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:271
+msgid ""
+"Sorry, the AI model is not configured. Please go to the application to set "
+"up the AI model first."
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:27
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:25
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:25
+#: community/apps/application/serializers/chat_serializers.py:579
+msgid "question"
+msgstr "Question"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:33
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:28
+msgid "History Questions"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:35
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:25
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:21
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:18
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:24
+#: community/apps/application/swagger_api/application_api.py:55
+#: community/apps/application/swagger_api/application_api.py:56
+#: community/apps/application/swagger_api/application_api.py:188
+#: community/apps/application/swagger_api/application_api.py:189
+#: community/apps/application/swagger_api/application_api.py:337
+#: community/apps/application/swagger_api/application_api.py:338
+msgid "Number of multi-round conversations"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:38
+msgid "Maximum length of the knowledge base paragraph"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:40
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:22
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:16
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:22
+#: community/apps/application/serializers/application_serializers.py:108
+#: community/apps/application/serializers/application_serializers.py:138
+#: community/apps/application/swagger_api/application_api.py:286
+#: community/apps/application/swagger_api/application_api.py:287
+msgid "Prompt word"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:42
+#: community/apps/application/swagger_api/application_api.py:300
+#: community/apps/application/swagger_api/application_api.py:301
+msgid "System prompt words (role)"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:44
+msgid "Completion problem"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:34
+#: community/apps/application/serializers/application_serializers.py:237
+msgid "Question completion prompt"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py:20
+#: community/apps/application/serializers/chat_message_serializers.py:99
+#: community/apps/application/swagger_api/application_api.py:210
+#: community/apps/application/swagger_api/application_api.py:355
+#, python-brace-format
+msgid ""
+"() contains the user's question. Answer the guessed user's question based on "
+"the context ({question}) Requirement: Output a complete question and put it "
+"in the tag"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:28
+msgid "System completes question text"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:31
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:39
+msgid "Dataset id list"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:34
+msgid "List of document ids to exclude"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:37
+msgid "List of exclusion vector ids"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:40
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:21
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:24
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:24
+#: community/apps/application/serializers/application_serializers.py:121
+#: community/apps/application/serializers/chat_serializers.py:243
+#: community/apps/application/swagger_api/application_api.py:249
+#: community/apps/application/swagger_api/application_api.py:250
+msgid "Reference segment number"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:43
+#: community/apps/application/swagger_api/application_api.py:252
+#: community/apps/application/swagger_api/application_api.py:253
+msgid "Similarity"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:46
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:30
+#: community/apps/application/serializers/application_serializers.py:129
+#: community/apps/application/serializers/application_serializers.py:590
+#: community/apps/dataset/serializers/dataset_serializers.py:576
+msgid "The type only supports embedding|keywords|blend"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:47
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:31
+#: community/apps/application/serializers/application_serializers.py:130
+#: community/apps/application/serializers/application_serializers.py:591
+#: community/apps/application/swagger_api/application_api.py:259
+msgid "Retrieval Mode"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:31
+#: community/apps/application/serializers/application_serializers.py:84
+#: community/apps/application/serializers/application_serializers.py:1026
+#: community/apps/application/serializers/application_serializers.py:1036
+#: community/apps/application/serializers/application_serializers.py:1046
+#: community/apps/dataset/serializers/dataset_serializers.py:801
+#: community/apps/dataset/serializers/document_serializers.py:746
+#: community/apps/setting/models_provider/tools.py:23
+msgid "Model does not exist"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:33
+#, python-brace-format
+msgid "No permission to use this model {model_name}"
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:41
+msgid ""
+"The vector model of the associated knowledge base is inconsistent and the "
+"segmentation cannot be recalled."
+msgstr ""
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:43
+msgid "The knowledge base setting is wrong, please reset the knowledge base"
+msgstr ""
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:21
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:15
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:21
+msgid "Role Setting"
+msgstr ""
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:28
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:24
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:29
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:47
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:26
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:22
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:26
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:15
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:15
+msgid "Whether to return content"
+msgstr ""
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:35
+msgid "Context Type"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:16
+msgid "API Input Fields"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:18
+msgid "User Input Fields"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:19
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:24
+#: community/apps/application/serializers/application_serializers.py:698
+#: community/apps/application/serializers/chat_message_serializers.py:274
+#: community/apps/function_lib/serializers/function_lib_serializer.py:359
+msgid "picture"
+msgstr "Picture"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:20
+#: community/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py:13
+#: community/apps/application/serializers/chat_message_serializers.py:275
+msgid "document"
+msgstr "Document"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:21
+#: community/apps/application/serializers/chat_message_serializers.py:276
+msgid "Audio"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:23
+#: community/apps/application/serializers/chat_message_serializers.py:278
+msgid "Child Nodes"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:24
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:21
+msgid "Form Data"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:58
+msgid ""
+"Parameter value error: The uploaded document lacks file_id, and the document "
+"upload fails"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:67
+msgid ""
+"Parameter value error: The uploaded image lacks file_id, and the image "
+"upload fails"
+msgstr ""
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:77
+msgid ""
+"Parameter value error: The uploaded audio lacks file_id, and the audio "
+"upload fails."
+msgstr ""
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:19
+#: community/apps/application/serializers/chat_serializers.py:124
+msgid "Comparator"
+msgstr ""
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:20
+#: community/apps/application/swagger_api/application_api.py:271
+msgid "value"
+msgstr "Value"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:21
+msgid "Fields"
+msgstr ""
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:25
+msgid "Branch id"
+msgstr "Branch ID"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:26
+msgid "Branch Type"
+msgstr ""
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:27
+msgid "Condition or|and"
+msgstr ""
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:20
+msgid "Response Type"
+msgstr ""
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:21
+#: community/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py:14
+msgid "Reference Field"
+msgstr ""
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:23
+msgid "Direct answer content"
+msgstr ""
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:30
+msgid "Reference field cannot be empty"
+msgstr ""
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:32
+msgid "Reference field error"
+msgstr ""
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:35
+msgid "Content cannot be empty"
+msgstr ""
+
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:19
+msgid "Form Configuration"
+msgstr ""
+
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:20
+msgid "Form output content"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:22
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:24
+msgid "Variable Name"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:23
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:34
+msgid "Variable Value"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:27
+msgid "Library ID"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:35
+msgid "The function has been deleted"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:25
+msgid "Is this field required"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:28
+msgid "The field only supports string|int|dict|array|float"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:30
+#: community/apps/function_lib/serializers/function_lib_serializer.py:76
+#: community/apps/function_lib/swagger_api/function_lib_api.py:98
+#: community/apps/function_lib/swagger_api/function_lib_api.py:144
+#: community/apps/function_lib/swagger_api/function_lib_api.py:190
+msgid "source"
+msgstr "Source"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:32
+#: community/apps/function_lib/serializers/function_lib_serializer.py:78
+msgid "The field only supports custom|reference"
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:40
+#, python-brace-format
+msgid "{field}, this field is required."
+msgstr ""
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:46
+#: community/apps/function_lib/views/function_lib_views.py:131
+#: community/apps/function_lib/views/function_lib_views.py:145
+msgid "function"
+msgstr "Function"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:15
+msgid "Prompt word (positive)"
+msgstr ""
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:17
+msgid "Prompt word (negative)"
+msgstr ""
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:24
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:20
+msgid "Conversation storage type"
+msgstr ""
+
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:26
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:33
+msgid "Maximum number of words in a quoted segment"
+msgstr ""
+
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:27
+#: community/apps/common/swagger_api/common_api.py:36
+#: community/apps/dataset/serializers/dataset_serializers.py:573
+msgid "similarity"
+msgstr "Similarity"
+
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:17
+msgid "The audio file cannot be empty"
+msgstr ""
+
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:31
+msgid ""
+"Parameter value error: The uploaded audio lacks file_id, and the audio "
+"upload fails"
+msgstr ""
+
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:17
+msgid "Text content"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:107
+#, python-brace-format
+msgid "The branch {branch} of the {node} node needs to be connected"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:113
+#, python-brace-format
+msgid "{node} Nodes cannot be considered as end nodes"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:123
+msgid "The next node that does not exist"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:137
+msgid "The starting node is required"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:139
+msgid "There can only be one starting node"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:147
+#, python-brace-format
+msgid "The node {node} model does not exist"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:157
+#, python-brace-format
+msgid "Node {node} is unavailable"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:163
+#, python-brace-format
+msgid "The library ID of node {node} cannot be empty"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:166
+#, python-brace-format
+msgid "The function library for node {node} is not available"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:172
+msgid "Basic information node is required"
+msgstr ""
+
+#: community/apps/application/flow/workflow_manage.py:174
+msgid "There can only be one basic information node"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:75
+#: community/apps/application/serializers/chat_serializers.py:618
+#: community/apps/application/serializers/chat_serializers.py:677
+#: community/apps/application/serializers/chat_serializers.py:709
+#: community/apps/application/swagger_api/chat_api.py:365
+#: community/apps/application/swagger_api/chat_api.py:393
+#: community/apps/application/swagger_api/chat_api.py:394
+#: community/apps/application/swagger_api/chat_api.py:415
+#: community/apps/application/swagger_api/chat_api.py:494
+#: community/apps/application/swagger_api/chat_api.py:495
+msgid "Knowledge base id"
+msgstr "Knowledge base ID"
+
+#: community/apps/application/serializers/application_serializers.py:76
+msgid "Knowledge Base List"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:90
+msgid "The knowledge base id does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:107
+msgid "No reference status"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:123
+msgid "Acquaintance"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:126
+#: community/apps/application/swagger_api/application_api.py:256
+#: community/apps/application/swagger_api/application_api.py:257
+msgid "Maximum number of quoted characters"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:133
+msgid "Segment settings not referenced"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:140
+msgid "Role prompts"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:142
+#: community/apps/application/swagger_api/application_api.py:303
+#: community/apps/application/swagger_api/application_api.py:305
+msgid "No citation segmentation prompt"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:144
+msgid "Thinking process switch"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:148
+msgid "The thinking process begins to mark"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:151
+msgid "End of thinking process marker"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:156
+#: community/apps/application/serializers/application_serializers.py:482
+#: community/apps/application/serializers/application_serializers.py:623
+#: community/apps/application/swagger_api/application_api.py:49
+#: community/apps/application/swagger_api/application_api.py:50
+#: community/apps/application/swagger_api/application_api.py:181
+#: community/apps/application/swagger_api/application_api.py:182
+#: community/apps/application/swagger_api/application_api.py:330
+#: community/apps/application/swagger_api/application_api.py:331
+#: community/apps/application/swagger_api/application_api.py:377
+msgid "Application Name"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:159
+#: community/apps/application/serializers/application_serializers.py:484
+#: community/apps/application/serializers/application_serializers.py:625
+#: community/apps/application/swagger_api/application_api.py:51
+#: community/apps/application/swagger_api/application_api.py:52
+#: community/apps/application/swagger_api/application_api.py:183
+#: community/apps/application/swagger_api/application_api.py:184
+#: community/apps/application/swagger_api/application_api.py:332
+#: community/apps/application/swagger_api/application_api.py:333
+#: community/apps/application/swagger_api/application_api.py:382
+msgid "Application Description"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:160
+msgid "Workflow Objects"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:162
+#: community/apps/application/serializers/application_serializers.py:225
+#: community/apps/application/serializers/application_serializers.py:492
+#: community/apps/application/swagger_api/application_api.py:57
+#: community/apps/application/swagger_api/application_api.py:58
+#: community/apps/application/swagger_api/application_api.py:190
+#: community/apps/application/swagger_api/application_api.py:191
+#: community/apps/application/swagger_api/application_api.py:339
+#: community/apps/application/swagger_api/application_api.py:340
+msgid "Opening remarks"
+msgstr "Opening Remarks"
+
+#: community/apps/application/serializers/application_serializers.py:214
+#: community/apps/dataset/serializers/dataset_serializers.py:105
+#: community/apps/dataset/serializers/dataset_serializers.py:106
+msgid "application name"
+msgstr "Application Name"
+
+#: community/apps/application/serializers/application_serializers.py:217
+msgid "application describe"
+msgstr "Application Describe"
+
+#: community/apps/application/serializers/application_serializers.py:219
+#: community/apps/application/serializers/application_serializers.py:486
+msgid "Model"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:223
+#: community/apps/application/serializers/application_serializers.py:490
+msgid "Historical chat records"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:228
+#: community/apps/application/serializers/application_serializers.py:494
+msgid "Related Knowledge Base"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:235
+#: community/apps/application/serializers/application_serializers.py:504
+#: community/apps/application/serializers/chat_serializers.py:379
+msgid "Question completion"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:239
+#: community/apps/application/swagger_api/application_api.py:203
+#: community/apps/application/swagger_api/application_api.py:349
+msgid "Application Type"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:243
+msgid "Application type only supports SIMPLE|WORK_FLOW"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:247
+#: community/apps/application/serializers/application_serializers.py:508
+msgid "Model parameters"
+msgstr "Model Parameters"
+
+#: community/apps/application/serializers/application_serializers.py:255
+msgid "Host"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:256
+msgid "protocol"
+msgstr "Protocol"
+
+#: community/apps/application/serializers/application_serializers.py:339
+#: community/apps/application/swagger_api/application_api.py:153
+#: community/apps/application/swagger_api/application_api.py:154
+msgid "Reset Token"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:340
+msgid "Is it enabled"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:343
+#: community/apps/application/swagger_api/application_api.py:158
+#: community/apps/application/swagger_api/application_api.py:159
+msgid "Number of visits"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:345
+#: community/apps/application/swagger_api/application_api.py:160
+#: community/apps/application/swagger_api/application_api.py:161
+msgid "Whether to enable whitelist"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:348
+#: community/apps/application/serializers/application_serializers.py:349
+#: community/apps/application/swagger_api/application_api.py:163
+#: community/apps/application/swagger_api/application_api.py:164
+msgid "Whitelist"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:352
+#: community/apps/application/swagger_api/application_api.py:166
+#: community/apps/application/swagger_api/application_api.py:167
+msgid "Whether to display knowledge sources"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:423
+msgid "access_token"
+msgstr "Access Token"
+
+#: community/apps/application/serializers/application_serializers.py:425
+msgid "Certification Information"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:462
+msgid "Invalid access_token"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:473
+msgid "Wrong password"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:498
+msgid "Dataset settings"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:501
+msgid "Model setup"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:505
+msgid "Icon"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:515
+#: community/apps/application/serializers/application_serializers.py:722
+#: community/apps/setting/serializers/valid_serializers.py:29
+msgid ""
+"The community version supports up to 5 applications. If you need more "
+"applications, please contact us (https://fit2cloud.com/)."
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:583
+msgid "Query text"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:585
+msgid "topN"
+msgstr "TopN"
+
+#: community/apps/application/serializers/application_serializers.py:587
+msgid "Relevance"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:596
+#: community/apps/application/serializers/application_serializers.py:705
+#: community/apps/application/serializers/application_serializers.py:797
+msgid "Application id does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:628
+msgid "Select User ID"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:717
+#: community/apps/dataset/serializers/document_serializers.py:164
+#: community/apps/dataset/serializers/document_serializers.py:213
+#: community/apps/dataset/serializers/document_serializers.py:220
+#: community/apps/dataset/serializers/file_serializers.py:59
+#: community/apps/dataset/views/file.py:35
+#: community/apps/dataset/views/file.py:44
+#: community/apps/function_lib/serializers/function_lib_serializer.py:331
+msgid "file"
+msgstr "File"
+
+#: community/apps/application/serializers/application_serializers.py:732
+#: community/apps/common/handle/impl/qa/zip_parse_qa_handle.py:62
+#: community/apps/common/handle/impl/zip_split_handle.py:56
+#: community/apps/dataset/serializers/document_serializers.py:874
+#: community/apps/dataset/serializers/document_serializers.py:882
+#: community/apps/function_lib/serializers/function_lib_serializer.py:343
+msgid "Unsupported file format"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:872
+msgid "work_flow is a required field"
+msgstr "Work_flow is a required field"
+
+#: community/apps/application/serializers/application_serializers.py:934
+#: community/apps/application/serializers/application_serializers.py:1076
+#, python-brace-format
+msgid "Unknown knowledge base id {dataset_id}, unable to associate"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:954
+msgid "Illegal User"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1028
+#: community/apps/application/serializers/application_serializers.py:1038
+#: community/apps/application/serializers/application_serializers.py:1048
+#, python-brace-format
+msgid "No permission to use this model:{model_name}"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1259
+#: community/apps/application/swagger_api/chat_api.py:498
+#: community/apps/application/swagger_api/chat_api.py:499
+msgid "Availability"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1263
+#: community/apps/application/swagger_api/application_api.py:129
+#: community/apps/application/swagger_api/application_api.py:130
+msgid "Is cross-domain allowed"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1268
+msgid "Cross-domain address"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1269
+#: community/apps/application/swagger_api/application_api.py:131
+msgid "Cross-domain list"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1274
+msgid "ApiKeyid"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1295
+msgid "APIKey does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/application_version_serializers.py:30
+#: community/apps/application/swagger_api/application_version_api.py:24
+#: community/apps/application/swagger_api/application_version_api.py:25
+#: community/apps/application/swagger_api/application_version_api.py:47
+#: community/apps/application/swagger_api/application_version_api.py:70
+#: community/apps/application/swagger_api/application_version_api.py:71
+msgid "Version Name"
+msgstr ""
+
+#: community/apps/application/serializers/application_version_serializers.py:37
+#: community/apps/application/serializers/chat_serializers.py:115
+#: community/apps/application/serializers/chat_serializers.py:240
+msgid "summary"
+msgstr "Summary"
+
+#: community/apps/application/serializers/application_version_serializers.py:61
+msgid "Workflow version id"
+msgstr ""
+
+#: community/apps/application/serializers/application_version_serializers.py:71
+#: community/apps/application/serializers/application_version_serializers.py:86
+msgid "Workflow version does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:195
+#: community/apps/dataset/serializers/paragraph_serializers.py:47
+#: community/apps/dataset/serializers/paragraph_serializers.py:180
+#: community/apps/dataset/serializers/paragraph_serializers.py:692
+#: community/apps/dataset/serializers/paragraph_serializers.py:705
+#: community/apps/dataset/serializers/paragraph_serializers.py:706
+#: community/apps/dataset/serializers/problem_serializers.py:41
+#: community/apps/dataset/serializers/problem_serializers.py:52
+#: community/apps/dataset/serializers/problem_serializers.py:113
+#: community/apps/dataset/swagger_api/problem_api.py:24
+#: community/apps/dataset/swagger_api/problem_api.py:25
+#: community/apps/dataset/swagger_api/problem_api.py:109
+#: community/apps/dataset/swagger_api/problem_api.py:110
+#: community/apps/dataset/swagger_api/problem_api.py:126
+#: community/apps/dataset/swagger_api/problem_api.py:127
+#: community/apps/dataset/swagger_api/problem_api.py:154
+#: community/apps/dataset/swagger_api/problem_api.py:169
+msgid "content"
+msgstr "Content"
+
+#: community/apps/application/serializers/chat_message_serializers.py:196
+#: community/apps/setting/serializers/team_serializers.py:45
+#: community/apps/users/serializers/user_serializers.py:472
+#: community/apps/users/serializers/user_serializers.py:495
+#: community/apps/users/serializers/user_serializers.py:586
+msgid "Role"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:202
+msgid "Regenerate"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:256
+msgid "Is the answer in streaming mode"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:257
+msgid "Do you want to reply again"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:259
+#: community/apps/application/serializers/chat_serializers.py:442
+#: community/apps/application/serializers/chat_serializers.py:534
+#: community/apps/application/serializers/chat_serializers.py:590
+#: community/apps/application/serializers/chat_serializers.py:616
+#: community/apps/application/serializers/chat_serializers.py:675
+#: community/apps/application/swagger_api/chat_api.py:148
+#: community/apps/application/swagger_api/chat_api.py:149
+#: community/apps/application/swagger_api/chat_api.py:360
+#: community/apps/application/swagger_api/chat_api.py:437
+#: community/apps/application/swagger_api/chat_api.py:470
+msgid "Conversation record id"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:262
+msgid "Node id"
+msgstr "Node ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:265
+#: community/apps/application/swagger_api/chat_api.py:142
+#: community/apps/application/swagger_api/chat_api.py:143
+msgid "Runtime node id"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:268
+msgid "Node parameters"
+msgstr "Node Parameters"
+
+#: community/apps/application/serializers/chat_message_serializers.py:273
+msgid "Global variables"
+msgstr "Global Variables"
+
+#: community/apps/application/serializers/chat_message_serializers.py:286
+#: community/apps/application/serializers/chat_message_serializers.py:421
+#: community/apps/application/serializers/chat_serializers.py:469
+msgid "Conversation does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:303
+msgid "The number of visits exceeds today's visits"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:314
+msgid "The current model is not available"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:316
+msgid "The model is downloading, please try again later"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:361
+#: community/apps/application/serializers/chat_serializers.py:599
+#: community/apps/application/serializers/chat_serializers.py:645
+#: community/apps/application/serializers/chat_serializers.py:694
+msgid "Conversation record does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/chat_message_serializers.py:454
+#: community/apps/application/serializers/chat_serializers.py:314
+msgid "The application has not been published. Please use it after publishing."
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:55
+msgid "node"
+msgstr "Node"
+
+#: community/apps/application/serializers/chat_serializers.py:56
+msgid "Connection"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:71
+#: community/apps/application/swagger_api/chat_api.py:48
+#: community/apps/application/swagger_api/chat_api.py:49
+#: community/apps/application/swagger_api/chat_api.py:169
+#: community/apps/application/swagger_api/chat_api.py:170
+#: community/apps/application/swagger_api/chat_api.py:256
+msgid "abstract"
+msgstr "Abstract"
+
+#: community/apps/application/serializers/chat_serializers.py:121
+#: community/apps/application/swagger_api/chat_api.py:258
+msgid "Minimum number of likes"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:123
+#: community/apps/application/swagger_api/chat_api.py:260
+msgid "Minimum number of clicks"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:126
+msgid "Only supports and|or"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:241
+msgid "Problem after optimization"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:242
+msgid "answer"
+msgstr "Answer"
+
+#: community/apps/application/serializers/chat_serializers.py:242
+msgid "User feedback"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:244
+msgid "Section title + content"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:245
+#: community/apps/application/views/chat_views.py:385
+#: community/apps/application/views/chat_views.py:386
+msgid "Annotation"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:245
+msgid "Consuming tokens"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:245
+msgid "Time consumed (s)"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:246
+msgid "Question Time"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:337
+msgid "Workflow"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:369
+msgid "Multi-round conversation"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:372
+msgid "Related Datasets"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:449
+msgid "Application authentication information does not exist"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:451
+msgid "Displaying knowledge sources is not enabled"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:537
+msgid "Bidding Status"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:546
+msgid ""
+"Voting on the current session minutes, please do not send repeated requests"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:551
+msgid "Non-existent conversation chat_record_id"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:568
+msgid "Already voted, please cancel first and then vote again"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:575
+#: community/apps/application/swagger_api/chat_api.py:379
+#: community/apps/application/swagger_api/chat_api.py:380
+#: community/apps/dataset/swagger_api/problem_api.py:128
+#: community/apps/dataset/swagger_api/problem_api.py:129
+msgid "Section title"
+msgstr "Section Title"
+
+#: community/apps/application/serializers/chat_serializers.py:576
+#: community/apps/application/swagger_api/chat_api.py:381
+#: community/apps/application/swagger_api/chat_api.py:382
+#: community/apps/application/swagger_api/chat_api.py:483
+#: community/apps/application/swagger_api/chat_api.py:484
+#: community/apps/common/swagger_api/common_api.py:57
+#: community/apps/common/swagger_api/common_api.py:58
+msgid "Paragraph content"
+msgstr "Paragraph Content"
+
+#: community/apps/application/serializers/chat_serializers.py:620
+#: community/apps/application/serializers/chat_serializers.py:679
+#: community/apps/application/serializers/chat_serializers.py:710
+#: community/apps/application/swagger_api/chat_api.py:370
+#: community/apps/application/swagger_api/chat_api.py:395
+#: community/apps/application/swagger_api/chat_api.py:396
+#: community/apps/application/swagger_api/chat_api.py:496
+#: community/apps/application/swagger_api/chat_api.py:497
+msgid "Document id"
+msgstr "Document ID"
+
+#: community/apps/application/serializers/chat_serializers.py:626
+#: community/apps/application/serializers/chat_serializers.py:717
+#: community/apps/dataset/serializers/paragraph_serializers.py:576
+msgid "The document id is incorrect"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:681
+#: community/apps/application/swagger_api/chat_api.py:310
+#: community/apps/application/swagger_api/chat_api.py:311
+msgid "Paragraph id"
+msgstr "Paragraph ID"
+
+#: community/apps/application/serializers/chat_serializers.py:697
+#, python-brace-format
+msgid ""
+"The paragraph id is wrong. The current conversation record does not exist. "
+"[{paragraph_id}] paragraph id"
+msgstr ""
+
+#: community/apps/application/serializers/chat_serializers.py:736
+msgid "Conversation records that do not exist"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:24
+#: community/apps/application/views/chat_views.py:470
+#: community/apps/application/views/chat_views.py:471
+msgid "Upload files"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:35
+#: community/apps/application/swagger_api/application_api.py:36
+msgid "Application authentication token"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:48
+#: community/apps/application/swagger_api/application_version_api.py:22
+#: community/apps/application/swagger_api/application_version_api.py:23
+msgid "Primary key id"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:60
+msgid "Example List"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:61
+#: community/apps/application/swagger_api/application_api.py:62
+msgid "Affiliation user"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:64
+msgid "Is publish"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:66
+#: community/apps/application/swagger_api/application_api.py:67
+#: community/apps/application/swagger_api/application_version_api.py:28
+#: community/apps/application/swagger_api/application_version_api.py:29
+#: community/apps/application/swagger_api/chat_api.py:185
+#: community/apps/application/swagger_api/chat_api.py:186
+#: community/apps/application/swagger_api/chat_api.py:335
+#: community/apps/application/swagger_api/chat_api.py:336
+#: community/apps/application/swagger_api/chat_api.py:503
+#: community/apps/application/swagger_api/chat_api.py:504
+msgid "Creation time"
+msgstr "Create Time"
+
+#: community/apps/application/swagger_api/application_api.py:69
+#: community/apps/application/swagger_api/application_api.py:70
+#: community/apps/application/swagger_api/application_version_api.py:30
+#: community/apps/application/swagger_api/application_version_api.py:31
+#: community/apps/application/swagger_api/chat_api.py:332
+#: community/apps/application/swagger_api/chat_api.py:333
+#: community/apps/application/swagger_api/chat_api.py:500
+#: community/apps/application/swagger_api/chat_api.py:501
+msgid "Modification time"
+msgstr "Update Time"
+
+#: community/apps/application/swagger_api/application_api.py:74
+#: community/apps/application/swagger_api/application_api.py:194
+#: community/apps/application/swagger_api/application_api.py:195
+#: community/apps/application/swagger_api/application_api.py:343
+#: community/apps/application/swagger_api/application_api.py:344
+#: community/apps/application/swagger_api/chat_api.py:229
+#: community/apps/application/swagger_api/chat_api.py:230
+msgid "List of associated knowledge base IDs"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:76
+msgid "List of associated knowledge base IDs (returned when querying details)"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:91
+msgid "Model Type"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:117
+msgid "Application api_key id"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:126
+#: community/apps/application/swagger_api/application_api.py:127
+#: community/apps/application/swagger_api/application_api.py:156
+#: community/apps/application/swagger_api/application_api.py:157
+msgid "Is activation"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:198
+#: community/apps/application/swagger_api/application_api.py:347
+#: community/apps/application/swagger_api/application_api.py:348
+msgid "Problem Optimization"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:199
+msgid "Whether to enable problem optimization"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:204
+#: community/apps/application/swagger_api/application_api.py:350
+msgid "Application Type SIMPLE | WORK_FLOW"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:207
+#: community/apps/application/swagger_api/application_api.py:208
+#: community/apps/application/swagger_api/application_api.py:352
+#: community/apps/application/swagger_api/application_api.py:353
+msgid "Question optimization tips"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:211
+#: community/apps/application/swagger_api/application_api.py:212
+#: community/apps/application/swagger_api/application_api.py:356
+#: community/apps/application/swagger_api/application_api.py:357
+msgid "Text-to-speech model ID"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:213
+#: community/apps/application/swagger_api/application_api.py:214
+#: community/apps/application/swagger_api/application_api.py:358
+#: community/apps/application/swagger_api/application_api.py:359
+msgid "Speech-to-text model id"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:215
+#: community/apps/application/swagger_api/application_api.py:216
+#: community/apps/application/swagger_api/application_api.py:360
+#: community/apps/application/swagger_api/application_api.py:361
+msgid "Is speech-to-text enabled"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:217
+#: community/apps/application/swagger_api/application_api.py:218
+#: community/apps/application/swagger_api/application_api.py:362
+#: community/apps/application/swagger_api/application_api.py:363
+msgid "Is text-to-speech enabled"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:219
+#: community/apps/application/swagger_api/application_api.py:220
+#: community/apps/application/swagger_api/application_api.py:364
+#: community/apps/application/swagger_api/application_api.py:365
+msgid "Text-to-speech type"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:233
+msgid "Node List"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:236
+msgid "Connection List"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:266
+msgid "state"
+msgstr "State"
+
+#: community/apps/application/swagger_api/application_api.py:268
+msgid "ai_questioning|designated_answer"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:273
+msgid ""
+"ai_questioning: is the title, designated_answer: is the designated answer "
+"content"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:403
+#: community/apps/function_lib/swagger_api/function_lib_api.py:216
+msgid "Upload image files"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_api.py:434
+#: community/apps/application/swagger_api/application_api.py:435
+msgid "Text"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:41
+#: community/apps/application/swagger_api/application_statistics_api.py:42
+#: community/apps/application/swagger_api/chat_api.py:490
+#: community/apps/application/swagger_api/chat_api.py:491
+msgid "Number of Likes"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:44
+#: community/apps/application/swagger_api/chat_api.py:492
+#: community/apps/application/swagger_api/chat_api.py:493
+msgid "Number of thumbs-downs"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:45
+#: community/apps/application/swagger_api/application_statistics_api.py:46
+msgid "Number of tokens used"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:47
+#: community/apps/application/swagger_api/application_statistics_api.py:48
+msgid "Number of conversations"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:49
+#: community/apps/application/swagger_api/application_statistics_api.py:50
+msgid "Number of customers"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:51
+#: community/apps/application/swagger_api/application_statistics_api.py:52
+msgid "Number of new customers"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:54
+#: community/apps/application/swagger_api/application_statistics_api.py:69
+#: community/apps/application/swagger_api/application_statistics_api.py:70
+msgid "time"
+msgstr "Time"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:55
+msgid "Time, this field is only available when querying trends"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_statistics_api.py:66
+#: community/apps/application/swagger_api/application_statistics_api.py:83
+msgid "New quantity"
+msgstr "New Quantity"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:81
+#: community/apps/application/swagger_api/application_statistics_api.py:82
+msgid "Today's new quantity"
+msgstr ""
+
+#: community/apps/application/swagger_api/application_version_api.py:26
+#: community/apps/application/swagger_api/application_version_api.py:27
+msgid "Workflow data"
+msgstr "Workflow Data"
+
+#: community/apps/application/swagger_api/application_version_api.py:61
+msgid "Application version id"
+msgstr "Application Version ID"
+
+#: community/apps/application/swagger_api/chat_api.py:61
+#: community/apps/application/swagger_api/chat_api.py:62
+#: community/apps/application/swagger_api/chat_api.py:92
+#: community/apps/dataset/serializers/problem_serializers.py:91
+msgid "problem"
+msgstr "Problem"
+
+#: community/apps/application/swagger_api/chat_api.py:68
+msgid "Question content"
+msgstr "Question Content"
+
+#: community/apps/application/swagger_api/chat_api.py:72
+msgid "role"
+msgstr "Role"
+
+#: community/apps/application/swagger_api/chat_api.py:77
+#: community/apps/application/swagger_api/chat_api.py:93
+msgid "regenerate"
+msgstr "Regenerate"
+
+#: community/apps/application/swagger_api/chat_api.py:79
+msgid "Stream Output"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:94
+msgid "Is it streaming output"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:96
+#: community/apps/application/swagger_api/chat_api.py:97
+#| msgid "Workflow data"
+msgid "Form data"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:101
+#: community/apps/application/swagger_api/chat_api.py:102
+#| msgid "state list"
+msgid "Image list"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:107
+msgid "Image name"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:109
+msgid "Image URL"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:115
+#: community/apps/application/swagger_api/chat_api.py:116
+#: community/apps/dataset/views/document.py:133
+#: community/apps/dataset/views/document.py:134
+msgid "Document list"
+msgstr "Document List"
+
+#: community/apps/application/swagger_api/chat_api.py:122
+msgid "Document name"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:124
+msgid "Document URL"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:129
+#: community/apps/application/swagger_api/chat_api.py:130
+#| msgid "id list"
+msgid "Audio list"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:135
+msgid "Audio name"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:137
+msgid "Audio URL"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:145
+#: community/apps/application/swagger_api/chat_api.py:146
+msgid "Node data"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:151
+#: community/apps/application/swagger_api/chat_api.py:152
+msgid "Child node"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:173
+#: community/apps/application/swagger_api/chat_api.py:174
+msgid "Number of dialogue questions"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:176
+#: community/apps/application/swagger_api/chat_api.py:177
+msgid "Number of tags"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:178
+#: community/apps/application/swagger_api/chat_api.py:179
+#: community/apps/common/swagger_api/common_api.py:64
+#: community/apps/common/swagger_api/common_api.py:65
+#: community/apps/dataset/serializers/paragraph_serializers.py:711
+#: community/apps/dataset/serializers/paragraph_serializers.py:712
+msgid "Number of likes"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:180
+#: community/apps/application/swagger_api/chat_api.py:181
+msgid "Number of clicks"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:182
+#: community/apps/application/swagger_api/chat_api.py:183
+msgid "Change time"
+msgstr "Update time"
+
+#: community/apps/application/swagger_api/chat_api.py:224
+msgid "Application ID, pass when modifying, do not pass when creating"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:225
+#: community/apps/application/swagger_api/chat_api.py:226
+msgid "Model ID"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:232
+#: community/apps/application/swagger_api/chat_api.py:234
+msgid "Do you want to initiate multiple sessions"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:237
+msgid "Problem optimization"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:238
+msgid "Do you want to enable problem optimization"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:254
+msgid "Historical days"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:262
+msgid "or|and comparator"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:266
+#| msgid "Start time"
+msgid "start time"
+msgstr "Start Time"
+
+#: community/apps/application/swagger_api/chat_api.py:291
+msgid "Is it ascending order"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:304
+msgid "Session log id"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:305
+msgid "Conversation log id"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:306
+#: community/apps/application/swagger_api/chat_api.py:307
+#: community/apps/application/swagger_api/chat_api.py:446
+msgid "Voting Status"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:308
+#: community/apps/application/swagger_api/chat_api.py:309
+msgid "Dataset id"
+msgstr "Knowledge Base ID"
+
+#: community/apps/application/swagger_api/chat_api.py:312
+#: community/apps/application/swagger_api/chat_api.py:313
+msgid "Resource ID"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:314
+#: community/apps/application/swagger_api/chat_api.py:315
+msgid "Resource Type"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:317
+#: community/apps/application/swagger_api/chat_api.py:318
+msgid "Number of tokens consumed by the question"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:320
+#: community/apps/application/swagger_api/chat_api.py:321
+msgid "The number of tokens consumed by the answer"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:324
+#: community/apps/application/swagger_api/chat_api.py:325
+msgid "Improved annotation list"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:328
+msgid "Corresponding session Corresponding subscript"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:329
+msgid "Corresponding session id corresponding subscript"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:397
+#: community/apps/application/swagger_api/chat_api.py:398
+msgid "Conversation id list"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:447
+msgid "-1: Cancel vote | 0: Agree | 1: Oppose"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:485
+#: community/apps/application/swagger_api/chat_api.py:486
+#: community/apps/common/swagger_api/common_api.py:59
+#: community/apps/common/swagger_api/common_api.py:60
+#: community/apps/dataset/serializers/paragraph_serializers.py:687
+#: community/apps/dataset/serializers/paragraph_serializers.py:707
+#: community/apps/dataset/serializers/paragraph_serializers.py:708
+msgid "title"
+msgstr "Title"
+
+#: community/apps/application/swagger_api/chat_api.py:486
+#: community/apps/common/swagger_api/common_api.py:60
+msgid "Description of xxx"
+msgstr ""
+
+#: community/apps/application/swagger_api/chat_api.py:487
+#: community/apps/application/swagger_api/chat_api.py:488
+#: community/apps/common/swagger_api/common_api.py:61
+#: community/apps/common/swagger_api/common_api.py:62
+msgid "Number of hits"
+msgstr ""
+
+#: community/apps/application/views/application_version_views.py:28
+#: community/apps/application/views/application_version_views.py:29
+#: community/apps/application/views/application_views.py:489
+#: community/apps/application/views/application_views.py:490
+msgid "Get the application list"
+msgstr ""
+
+#: community/apps/application/views/application_version_views.py:32
+#: community/apps/application/views/application_version_views.py:50
+#: community/apps/application/views/application_version_views.py:68
+#: community/apps/application/views/application_version_views.py:83
+msgid "Application/Version"
+msgstr ""
+
+#: community/apps/application/views/application_version_views.py:45
+#: community/apps/application/views/application_version_views.py:46
+msgid "Get the list of application versions by page"
+msgstr ""
+
+#: community/apps/application/views/application_version_views.py:64
+#: community/apps/application/views/application_version_views.py:65
+msgid "Get application version details"
+msgstr ""
+
+#: community/apps/application/views/application_version_views.py:78
+#: community/apps/application/views/application_version_views.py:79
+msgid "Modify application version information"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:42
+#: community/apps/application/views/application_views.py:43
+msgid "User Statistics"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:44
+#: community/apps/application/views/application_views.py:70
+#: community/apps/application/views/application_views.py:95
+#: community/apps/application/views/application_views.py:121
+msgid "Application/Statistics"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:68
+#: community/apps/application/views/application_views.py:69
+msgid "User demographic trends"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:93
+#: community/apps/application/views/application_views.py:94
+msgid "Conversation statistics"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:119
+#: community/apps/application/views/application_views.py:120
+msgid "Dialogue-related statistical trends"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:150
+#: community/apps/application/views/application_views.py:151
+msgid "Modify application icon"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:152
+#: community/apps/application/views/application_views.py:175
+#: community/apps/application/views/application_views.py:189
+#: community/apps/application/views/application_views.py:202
+#: community/apps/application/views/application_views.py:216
+#: community/apps/application/views/application_views.py:236
+#: community/apps/application/views/application_views.py:255
+#: community/apps/application/views/application_views.py:274
+#: community/apps/application/views/application_views.py:313
+#: community/apps/application/views/application_views.py:482
+#: community/apps/application/views/application_views.py:493
+#: community/apps/application/views/application_views.py:508
+#: community/apps/application/views/application_views.py:535
+#: community/apps/application/views/application_views.py:555
+#: community/apps/application/views/application_views.py:575
+#: community/apps/application/views/application_views.py:593
+#: community/apps/application/views/application_views.py:614
+#: community/apps/application/views/application_views.py:635
+#: community/apps/application/views/application_views.py:670
+msgid "Application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:173
+msgid "Import Application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:187
+msgid "Export Application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:200
+#: community/apps/application/views/application_views.py:201
+msgid "Get embedded js"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:214
+#: community/apps/application/views/application_views.py:215
+msgid "Get a list of models"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:234
+#: community/apps/application/views/application_views.py:235
+#: community/apps/setting/views/model.py:100
+#: community/apps/setting/views/model.py:101
+msgid "Get model parameter form"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:253
+#: community/apps/application/views/application_views.py:254
+msgid "Get a list of function libraries"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:272
+#: community/apps/application/views/application_views.py:273
+msgid "Get library details"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:292
+#: community/apps/application/views/application_views.py:293
+msgid "Get the list of apps created by the current user"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:294
+#: community/apps/application/views/application_views.py:333
+#: community/apps/application/views/chat_views.py:74
+#: community/apps/application/views/chat_views.py:93
+#: community/apps/application/views/chat_views.py:105
+#: community/apps/application/views/chat_views.py:118
+#: community/apps/application/views/chat_views.py:347
+msgid "Application/Chat"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:311
+#: community/apps/application/views/application_views.py:312
+msgid "Get application data"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:331
+#: community/apps/application/views/application_views.py:332
+msgid "Get application related information"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:346
+#: community/apps/application/views/application_views.py:347
+msgid "Add ApiKey"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:348
+#: community/apps/application/views/application_views.py:364
+#: community/apps/application/views/application_views.py:383
+#: community/apps/application/views/application_views.py:402
+msgid "Application/API_KEY"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:362
+#: community/apps/application/views/application_views.py:363
+msgid "Get the application API_KEY list"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:381
+#: community/apps/application/views/application_views.py:382
+msgid "Modify application API_KEY"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:400
+#: community/apps/application/views/application_views.py:401
+msgid "Delete Application API_KEY"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:421
+#: community/apps/application/views/application_views.py:422
+msgid "Modify Application AccessToken"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:423
+#: community/apps/application/views/application_views.py:441
+msgid "Application/Public Access"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:438
+#: community/apps/application/views/application_views.py:439
+msgid "Get the application AccessToken information"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:462
+#: community/apps/application/views/application_views.py:463
+msgid "Application Certification"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:465
+msgid "Application/Certification"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:479
+#: community/apps/application/views/application_views.py:480
+msgid "Create an application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:505
+msgid "Hit Test List"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:530
+#: community/apps/application/views/application_views.py:531
+msgid "Publishing an application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:551
+#: community/apps/application/views/application_views.py:552
+msgid "Deleting application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:570
+#: community/apps/application/views/application_views.py:571
+msgid "Modify the application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:589
+#: community/apps/application/views/application_views.py:590
+msgid "Get application details"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:609
+#: community/apps/application/views/application_views.py:610
+msgid "Get the knowledge base available to the current application"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:630
+#: community/apps/application/views/application_views.py:631
+msgid "Get the application list by page"
+msgstr ""
+
+#: community/apps/application/views/application_views.py:665
+#: community/apps/application/views/application_views.py:666
+msgid "text to speech"
+msgstr "Text to speech"
+
+#: community/apps/application/views/chat_views.py:36
+#: community/apps/application/views/chat_views.py:37
+msgid "OpenAI Interface Dialogue"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:39
+msgid "OpenAI Dialogue"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:52
+#: community/apps/application/views/chat_views.py:53
+msgid "Export conversation"
+msgstr "Export Conversation"
+
+#: community/apps/application/views/chat_views.py:55
+#: community/apps/application/views/chat_views.py:156
+#: community/apps/application/views/chat_views.py:174
+#: community/apps/application/views/chat_views.py:197
+#: community/apps/application/views/chat_views.py:217
+#: community/apps/application/views/chat_views.py:235
+#: community/apps/application/views/chat_views.py:257
+#: community/apps/application/views/chat_views.py:282
+#: community/apps/application/views/chat_views.py:302
+#: community/apps/application/views/chat_views.py:324
+#: community/apps/application/views/chat_views.py:489
+msgid "Application/Conversation Log"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:71
+#: community/apps/application/views/chat_views.py:72
+msgid "Get the session id according to the application id"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:90
+#: community/apps/application/views/chat_views.py:91
+msgid "Get the workflow temporary session id"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:102
+#: community/apps/application/views/chat_views.py:103
+msgid "Get a temporary session id"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:115
+#: community/apps/application/views/chat_views.py:116
+msgid "dialogue"
+msgstr "Dialogue"
+
+#: community/apps/application/views/chat_views.py:152
+#: community/apps/application/views/chat_views.py:153
+msgid "Get the conversation list"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:172
+#: community/apps/application/views/chat_views.py:173
+msgid "Delete a conversation"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:192
+#: community/apps/application/views/chat_views.py:193
+msgid "Get client conversation list by paging"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:215
+#: community/apps/application/views/chat_views.py:216
+msgid "Client deletes conversation"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:232
+#: community/apps/application/views/chat_views.py:233
+msgid "Client modifies dialogue summary"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:253
+#: community/apps/application/views/chat_views.py:254
+msgid "Get the conversation list by page"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:278
+#: community/apps/application/views/chat_views.py:279
+msgid "Get conversation record details"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:298
+#: community/apps/application/views/chat_views.py:299
+msgid "Get a list of conversation records"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:319
+#: community/apps/application/views/chat_views.py:320
+msgid "Get the conversation history list by page"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:342
+#: community/apps/application/views/chat_views.py:343
+msgid "Like, Dislike"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:365
+#: community/apps/application/views/chat_views.py:366
+msgid "Get the list of marked paragraphs"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:369
+#: community/apps/application/views/chat_views.py:390
+#: community/apps/application/views/chat_views.py:442
+msgid "Application/Conversation Log/Annotation"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:412
+#: community/apps/application/views/chat_views.py:413
+msgid "Add to Knowledge Base"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:416
+msgid "Application/Conversation Log/Add to Knowledge Base"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:438
+#: community/apps/application/views/chat_views.py:439
+msgid "Delete a Annotation"
+msgstr ""
+
+#: community/apps/application/views/chat_views.py:487
+#: community/apps/dataset/views/file.py:28
+#: community/apps/dataset/views/file.py:29
+#: community/apps/dataset/views/file.py:34
+msgid "Upload file"
+msgstr ""
+
+#: community/apps/common/auth/authenticate.py:62
+#: community/apps/common/auth/authenticate.py:83
+msgid "Not logged in, please log in first"
+msgstr ""
+
+#: community/apps/common/auth/authenticate.py:68
+#: community/apps/common/auth/authenticate.py:74
+#: community/apps/common/auth/authenticate.py:89
+#: community/apps/common/auth/authenticate.py:95
+msgid "Authentication information is incorrect! illegal user"
+msgstr ""
+
+#: community/apps/common/auth/authentication.py:94
+msgid "No permission to access"
+msgstr ""
+
+#: community/apps/common/auth/handle/impl/application_key.py:23
+#: community/apps/common/auth/handle/impl/application_key.py:25
+msgid "Secret key is invalid"
+msgstr ""
+
+#: community/apps/common/auth/handle/impl/public_access_token.py:48
+#: community/apps/common/auth/handle/impl/public_access_token.py:50
+#: community/apps/common/auth/handle/impl/public_access_token.py:52
+#: community/apps/common/auth/handle/impl/public_access_token.py:54
+msgid "Authentication information is incorrect"
+msgstr ""
+
+#: community/apps/common/auth/handle/impl/user_token.py:34
+msgid "Login expired"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:31
+msgid "The username or password is incorrect"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:32
+msgid "Please log in first and bring the user Token"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:33
+#: community/apps/users/serializers/user_serializers.py:429
+msgid "Email sending failed"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:34
+msgid "Email format error"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:35
+msgid "The email has been registered, please log in directly"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:36
+msgid "The email is not registered, please register first"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:38
+msgid "The verification code is incorrect or the verification code has expired"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:39
+msgid "The username has been registered, please log in directly"
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:41
+msgid ""
+"The username cannot be empty and must be between 6 and 20 characters long."
+msgstr ""
+
+#: community/apps/common/constants/exception_code_constants.py:43
+msgid "Password and confirmation password are inconsistent"
+msgstr ""
+
+#: community/apps/common/constants/permission_constants.py:61
+msgid "ADMIN"
+msgstr ""
+
+#: community/apps/common/constants/permission_constants.py:61
+msgid "Admin, prefabs are not currently used"
+msgstr ""
+
+#: community/apps/common/constants/permission_constants.py:62
+msgid "USER"
+msgstr ""
+
+#: community/apps/common/constants/permission_constants.py:62
+msgid "All user permissions"
+msgstr ""
+
+#: community/apps/common/constants/permission_constants.py:63
+msgid "chat"
+msgstr "Chat"
+
+#: community/apps/common/constants/permission_constants.py:63
+msgid "Only has application dialog interface permissions"
+msgstr ""
+
+#: community/apps/common/constants/permission_constants.py:64
+msgid "Apply private key"
+msgstr ""
+
+#: community/apps/common/event/__init__.py:30
+msgid "The download process was interrupted, please try again"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:91
+#, python-brace-format
+msgid "Query vector data: {paragraph_id_list} error {error} {traceback}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:96
+#, python-brace-format
+msgid "Start--->Embedding paragraph: {paragraph_id_list}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:108
+#, python-brace-format
+msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:114
+#, python-brace-format
+msgid "End--->Embedding paragraph: {paragraph_id_list}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:123
+#, python-brace-format
+msgid "Start--->Embedding paragraph: {paragraph_id}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:148
+#, python-brace-format
+msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:153
+#, python-brace-format
+msgid "End--->Embedding paragraph: {paragraph_id}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:269
+#, python-brace-format
+msgid "Start--->Embedding document: {document_id}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:291
+#, python-brace-format
+msgid "Vectorized document: {document_id} error {error} {traceback}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:296
+#, python-brace-format
+msgid "End--->Embedding document: {document_id}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:307
+#, python-brace-format
+msgid "Start--->Embedding dataset: {dataset_id}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:311
+#, python-brace-format
+msgid "Start--->Embedding document: {document_list}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:315
+#: community/apps/embedding/task/embedding.py:123
+#, python-brace-format
+msgid "Vectorized dataset: {dataset_id} error {error} {traceback}"
+msgstr ""
+
+#: community/apps/common/event/listener_manage.py:318
+#, python-brace-format
+msgid "End--->Embedding dataset: {dataset_id}"
+msgstr ""
+
+#: community/apps/common/field/common.py:45
+msgid "not a function"
+msgstr ""
+
+#: community/apps/common/forms/base_field.py:64
+#, python-brace-format
+msgid "The field {field_label} is required"
+msgstr ""
+
+#: community/apps/common/forms/slider_field.py:56
+#, python-brace-format
+msgid "The {field_label} cannot be less than {min}"
+msgstr ""
+
+#: community/apps/common/forms/slider_field.py:62
+#, python-brace-format
+msgid "The {field_label} cannot be greater than {max}"
+msgstr ""
+
+#: community/apps/common/handle/handle_exception.py:30
+msgid "Unknown exception"
+msgstr ""
+
+#: community/apps/common/handle/impl/pdf_split_handle.py:278
+#, python-brace-format
+msgid "This document has no preface and is treated as ordinary text: {e}"
+msgstr ""
+
+#: community/apps/common/init/init_doc.py:26
+#: community/apps/common/init/init_doc.py:45
+msgid "Intelligent customer service platform"
+msgstr ""
+
+#: community/apps/common/job/clean_chat_job.py:25
+msgid "start clean chat log"
+msgstr "Start cleaning chat logs"
+
+#: community/apps/common/job/clean_chat_job.py:71
+msgid "end clean chat log"
+msgstr "End of chat log cleaning"
+
+#: community/apps/common/job/clean_debug_file_job.py:21
+msgid "start clean debug file"
+msgstr "Start cleaning debug files"
+
+#: community/apps/common/job/clean_debug_file_job.py:25
+msgid "end clean debug file"
+msgstr "End of debug file cleaning"
+
+#: community/apps/common/job/client_access_num_job.py:25
+msgid "start reset access_num"
+msgstr "Start resetting access_num"
+
+#: community/apps/common/job/client_access_num_job.py:27
+msgid "end reset access_num"
+msgstr "End of access_num reset"
+
+#: community/apps/common/log/log.py:37
+msgid "unknown"
+msgstr ""
+
+#: community/apps/common/response/result.py:24
+msgid "Success"
+msgstr ""
+
+#: community/apps/common/response/result.py:36
+#: community/apps/common/response/result.py:80
+#: community/apps/common/response/result.py:82
+msgid "current page"
+msgstr "Current page"
+
+#: community/apps/common/response/result.py:42
+#: community/apps/common/response/result.py:85
+#: community/apps/common/response/result.py:87
+msgid "page size"
+msgstr "Page size"
+
+#: community/apps/common/response/result.py:53
+#: community/apps/common/response/result.py:101
+#: community/apps/common/response/result.py:130
+msgid "response parameters"
+msgstr "Response Parameters"
+
+#: community/apps/common/response/result.py:59
+#: community/apps/common/response/result.py:107
+#: community/apps/common/response/result.py:136
+msgid "response code"
+msgstr "Response Code"
+
+#: community/apps/common/response/result.py:61
+#: community/apps/common/response/result.py:109
+#: community/apps/common/response/result.py:138
+msgid "success:200 fail:other"
+msgstr ""
+
+#: community/apps/common/response/result.py:64
+#: community/apps/common/response/result.py:112
+#: community/apps/common/response/result.py:141
+msgid "prompt"
+msgstr "Prompt"
+
+#: community/apps/common/response/result.py:65
+#: community/apps/common/response/result.py:113
+#: community/apps/common/response/result.py:142
+msgid "success"
+msgstr "Success"
+
+#: community/apps/common/response/result.py:66
+#: community/apps/common/response/result.py:114
+#: community/apps/common/response/result.py:143
+msgid "error prompt"
+msgstr "Error Prompt"
+
+#: community/apps/common/response/result.py:72
+#: community/apps/common/response/result.py:74
+msgid "total number of data"
+msgstr "Total number of data"
+
+#: community/apps/common/swagger_api/common_api.py:24
+#: community/apps/dataset/serializers/dataset_serializers.py:569
+msgid "query text"
+msgstr "Query Text"
+
+#: community/apps/common/swagger_api/common_api.py:42
+msgid "Retrieval pattern embedding|keywords|blend"
+msgstr ""
+
+#: community/apps/common/swagger_api/common_api.py:66
+#: community/apps/common/swagger_api/common_api.py:67
+msgid "Number of clicks and dislikes"
+msgstr ""
+
+#: community/apps/common/swagger_api/common_api.py:74
+#: community/apps/common/swagger_api/common_api.py:75
+msgid "relevance score"
+msgstr "Relevance score"
+
+#: community/apps/common/swagger_api/common_api.py:76
+#: community/apps/common/swagger_api/common_api.py:77
+msgid "Comprehensive score, used for ranking"
+msgstr ""
+
+#: community/apps/common/swagger_api/common_api.py:78
+#: community/apps/common/swagger_api/common_api.py:79
+#: community/apps/users/serializers/user_serializers.py:591
+#: community/apps/users/serializers/user_serializers.py:592
+msgid "Update time"
+msgstr "Update Time"
+
+#: community/apps/common/swagger_api/common_api.py:81
+#: community/apps/common/swagger_api/common_api.py:82
+#: community/apps/users/serializers/user_serializers.py:589
+#: community/apps/users/serializers/user_serializers.py:590
+msgid "Create time"
+msgstr ""
+
+#: community/apps/common/util/common.py:239
+msgid "Text-to-speech node, the text content must be of string type"
+msgstr ""
+
+#: community/apps/common/util/common.py:241
+msgid "Text-to-speech node, the text content cannot be empty"
+msgstr ""
+
+#: community/apps/dataset/serializers/common_serializers.py:87
+msgid "source url"
+msgstr "Source URL"
+
+#: community/apps/dataset/serializers/common_serializers.py:89
+#: community/apps/dataset/serializers/dataset_serializers.py:333
+#: community/apps/dataset/serializers/dataset_serializers.py:390
+#: community/apps/dataset/serializers/dataset_serializers.py:391
+#: community/apps/dataset/serializers/document_serializers.py:155
+#: community/apps/dataset/serializers/document_serializers.py:181
+msgid "selector"
+msgstr "Selector"
+
+#: community/apps/dataset/serializers/common_serializers.py:96
+#: community/apps/dataset/serializers/dataset_serializers.py:341
+#, python-brace-format
+msgid "URL error, cannot parse [{source_url}]"
+msgstr ""
+
+#: community/apps/dataset/serializers/common_serializers.py:105
+#: community/apps/dataset/serializers/common_serializers.py:124
+#: community/apps/dataset/serializers/common_serializers.py:125
+#: community/apps/dataset/serializers/document_serializers.py:85
+#: community/apps/dataset/swagger_api/document_api.py:23
+#: community/apps/dataset/swagger_api/document_api.py:24
+#: community/apps/dataset/swagger_api/document_api.py:49
+#: community/apps/dataset/swagger_api/document_api.py:50
+msgid "id list"
+msgstr "ID list"
+
+#: community/apps/dataset/serializers/common_serializers.py:115
+#, python-brace-format
+msgid "The following id does not exist: {error_id_list}"
+msgstr ""
+
+#: community/apps/dataset/serializers/common_serializers.py:183
+#: community/apps/dataset/serializers/common_serializers.py:207
+msgid "The knowledge base is inconsistent with the vector model"
+msgstr ""
+
+#: community/apps/dataset/serializers/common_serializers.py:185
+#: community/apps/dataset/serializers/common_serializers.py:209
+msgid "Knowledge base setting error, please reset the knowledge base"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:109
+#: community/apps/dataset/serializers/dataset_serializers.py:110
+#: community/apps/setting/serializers/model_apply_serializers.py:51
+msgid "model id"
+msgstr "Model ID"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:112
+#: community/apps/dataset/serializers/dataset_serializers.py:114
+msgid "Whether to start multiple rounds of dialogue"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:115
+#: community/apps/dataset/serializers/dataset_serializers.py:116
+msgid "opening remarks"
+msgstr "Opening Remarks"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:118
+msgid "example"
+msgstr "Example"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:119
+msgid "User id"
+msgstr "User ID"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:121
+#: community/apps/dataset/serializers/dataset_serializers.py:122
+msgid "Whether to publish"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:124
+#: community/apps/dataset/serializers/dataset_serializers.py:125
+#: community/apps/dataset/serializers/dataset_serializers.py:304
+#: community/apps/dataset/serializers/dataset_serializers.py:305
+#: community/apps/dataset/serializers/dataset_serializers.py:366
+#: community/apps/dataset/serializers/dataset_serializers.py:367
+#: community/apps/dataset/serializers/dataset_serializers.py:511
+#: community/apps/dataset/serializers/dataset_serializers.py:512
+#: community/apps/dataset/serializers/dataset_serializers.py:942
+#: community/apps/dataset/serializers/dataset_serializers.py:943
+#: community/apps/dataset/serializers/document_serializers.py:824
+#: community/apps/dataset/serializers/document_serializers.py:825
+#: community/apps/dataset/serializers/paragraph_serializers.py:200
+#: community/apps/dataset/serializers/paragraph_serializers.py:201
+#: community/apps/dataset/serializers/paragraph_serializers.py:724
+#: community/apps/dataset/serializers/paragraph_serializers.py:725
+#: community/apps/dataset/swagger_api/problem_api.py:33
+#: community/apps/dataset/swagger_api/problem_api.py:34
+#: community/apps/dataset/swagger_api/problem_api.py:135
+#: community/apps/dataset/swagger_api/problem_api.py:136
+#: community/apps/function_lib/swagger_api/function_lib_api.py:32
+#: community/apps/function_lib/swagger_api/function_lib_api.py:33
+msgid "create time"
+msgstr "Create Time"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:127
+#: community/apps/dataset/serializers/dataset_serializers.py:128
+#: community/apps/dataset/serializers/dataset_serializers.py:301
+#: community/apps/dataset/serializers/dataset_serializers.py:302
+#: community/apps/dataset/serializers/dataset_serializers.py:363
+#: community/apps/dataset/serializers/dataset_serializers.py:364
+#: community/apps/dataset/serializers/dataset_serializers.py:508
+#: community/apps/dataset/serializers/dataset_serializers.py:509
+#: community/apps/dataset/serializers/dataset_serializers.py:939
+#: community/apps/dataset/serializers/dataset_serializers.py:940
+#: community/apps/dataset/serializers/document_serializers.py:821
+#: community/apps/dataset/serializers/document_serializers.py:822
+#: community/apps/dataset/serializers/paragraph_serializers.py:197
+#: community/apps/dataset/serializers/paragraph_serializers.py:198
+#: community/apps/dataset/serializers/paragraph_serializers.py:721
+#: community/apps/dataset/serializers/paragraph_serializers.py:722
+#: community/apps/dataset/swagger_api/problem_api.py:30
+#: community/apps/dataset/swagger_api/problem_api.py:31
+#: community/apps/dataset/swagger_api/problem_api.py:132
+#: community/apps/dataset/swagger_api/problem_api.py:133
+#: community/apps/function_lib/swagger_api/function_lib_api.py:34
+#: community/apps/function_lib/swagger_api/function_lib_api.py:35
+msgid "update time"
+msgstr "Update Time"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:257
+#: community/apps/dataset/serializers/dataset_serializers.py:260
+#: community/apps/dataset/serializers/document_serializers.py:211
+#: community/apps/dataset/serializers/document_serializers.py:218
+#: community/apps/dataset/serializers/document_serializers.py:987
+#: community/apps/dataset/serializers/document_serializers.py:1016
+msgid "file list"
+msgstr "File list"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:269
+msgid "upload files "
+msgstr "Upload files"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:297
+#: community/apps/dataset/serializers/dataset_serializers.py:298
+#: community/apps/dataset/serializers/dataset_serializers.py:359
+#: community/apps/dataset/serializers/dataset_serializers.py:360
+#: community/apps/dataset/serializers/dataset_serializers.py:504
+#: community/apps/dataset/serializers/dataset_serializers.py:505
+#: community/apps/dataset/serializers/dataset_serializers.py:935
+#: community/apps/dataset/serializers/dataset_serializers.py:936
+#: community/apps/dataset/serializers/document_serializers.py:814
+#: community/apps/dataset/serializers/document_serializers.py:815
+msgid "char length"
+msgstr "Character length"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:299
+#: community/apps/dataset/serializers/dataset_serializers.py:300
+#: community/apps/dataset/serializers/dataset_serializers.py:361
+#: community/apps/dataset/serializers/dataset_serializers.py:362
+#: community/apps/dataset/serializers/dataset_serializers.py:506
+#: community/apps/dataset/serializers/dataset_serializers.py:507
+#: community/apps/dataset/serializers/dataset_serializers.py:937
+#: community/apps/dataset/serializers/dataset_serializers.py:938
+msgid "document count"
+msgstr "Document count"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:308
+#: community/apps/dataset/serializers/dataset_serializers.py:309
+#: community/apps/dataset/serializers/dataset_serializers.py:370
+#: community/apps/dataset/serializers/dataset_serializers.py:371
+#: community/apps/dataset/serializers/dataset_serializers.py:515
+#: community/apps/dataset/serializers/dataset_serializers.py:516
+#: community/apps/dataset/serializers/document_serializers.py:290
+#: community/apps/dataset/serializers/document_serializers.py:485
+msgid "document list"
+msgstr "Document list"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:327
+#: community/apps/dataset/serializers/dataset_serializers.py:388
+#: community/apps/dataset/serializers/dataset_serializers.py:389
+msgid "web source url"
+msgstr "Web source URL"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:414
+#: community/apps/setting/serializers/valid_serializers.py:26
+msgid ""
+"The community version supports up to 50 knowledge bases. If you need more "
+"knowledge bases, please contact us (https://fit2cloud.com/)."
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:533
+#: community/apps/dataset/serializers/dataset_serializers.py:534
+msgid "documents"
+msgstr "Documents"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:577
+msgid "search mode"
+msgstr "Search mode"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:582
+#: community/apps/dataset/serializers/dataset_serializers.py:618
+#: community/apps/dataset/serializers/dataset_serializers.py:706
+msgid "id does not exist"
+msgstr "ID does not exist"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:609
+msgid "sync type"
+msgstr "Synchronization type"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:611
+msgid "The synchronization type only supports:replace|complete"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:620
+#: community/apps/dataset/serializers/document_serializers.py:499
+msgid "Synchronization is only supported for web site types"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:694
+msgid ""
+"Synchronization type->replace: replacement synchronization, complete: "
+"complete synchronization"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:803
+#: community/apps/dataset/serializers/document_serializers.py:748
+#: community/apps/setting/models_provider/tools.py:25
+msgid "No permission to use this model"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:815
+msgid "Failed to send the vectorization task, please try again later!"
+msgstr ""
+
+#: community/apps/dataset/serializers/dataset_serializers.py:911
+#: community/apps/dataset/serializers/document_serializers.py:846
+msgid "meta"
+msgstr "Metadata"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:913
+msgid "Knowledge base metadata->web:{source_url:xxx,selector:'xxx'},base:{}"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:87
+#: community/apps/dataset/serializers/document_serializers.py:100
+#: community/apps/dataset/serializers/document_serializers.py:416
+#: community/apps/dataset/swagger_api/document_api.py:37
+#: community/apps/dataset/swagger_api/document_api.py:51
+msgid "task type"
+msgstr "Task type"
+
+#: community/apps/dataset/serializers/document_serializers.py:95
+#: community/apps/dataset/serializers/document_serializers.py:108
+msgid "task type not support"
+msgstr "Task type not supported"
+
+#: community/apps/dataset/serializers/document_serializers.py:115
+#: community/apps/dataset/serializers/document_serializers.py:188
+#: community/apps/dataset/serializers/document_serializers.py:200
+#: community/apps/dataset/serializers/document_serializers.py:201
+#: community/apps/dataset/serializers/document_serializers.py:412
+#: community/apps/dataset/serializers/document_serializers.py:476
+#: community/apps/dataset/serializers/document_serializers.py:836
+#: community/apps/dataset/serializers/document_serializers.py:837
+msgid "document name"
+msgstr "Document Name"
+
+#: community/apps/dataset/serializers/document_serializers.py:118
+msgid "The type only supports optimization|directly_return"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:120
+#: community/apps/dataset/serializers/document_serializers.py:414
+#: community/apps/dataset/serializers/document_serializers.py:480
+#: community/apps/dataset/serializers/document_serializers.py:840
+#: community/apps/dataset/swagger_api/document_api.py:25
+msgid "hit handling method"
+msgstr "Hit Handling Method"
+
+#: community/apps/dataset/serializers/document_serializers.py:126
+#: community/apps/dataset/serializers/document_serializers.py:844
+#: community/apps/dataset/swagger_api/document_api.py:27
+msgid "directly return similarity"
+msgstr "Directly return similarity"
+
+#: community/apps/dataset/serializers/document_serializers.py:129
+#: community/apps/dataset/serializers/document_serializers.py:415
+msgid "document is active"
+msgstr "Document is active"
+
+#: community/apps/dataset/serializers/document_serializers.py:150
+#: community/apps/dataset/serializers/document_serializers.py:152
+msgid "document url list"
+msgstr "Document URL list"
+
+#: community/apps/dataset/serializers/document_serializers.py:178
+#: community/apps/dataset/serializers/document_serializers.py:179
+msgid "source url list"
+msgstr "Source URL list"
+
+#: community/apps/dataset/serializers/document_serializers.py:202
+#: community/apps/dataset/serializers/document_serializers.py:203
+msgid "paragraphs"
+msgstr "Paragraphs"
+
+#: community/apps/dataset/serializers/document_serializers.py:227
+msgid "The template type only supports excel|csv"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:237
+msgid "Export template type csv|excel"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:289
+#: community/apps/dataset/serializers/paragraph_serializers.py:304
+#: community/apps/dataset/serializers/paragraph_serializers.py:436
+msgid "target dataset id"
+msgstr "Target knowledge base ID"
+
+#: community/apps/dataset/serializers/document_serializers.py:391
+#: community/apps/dataset/serializers/paragraph_serializers.py:305
+#: community/apps/dataset/serializers/paragraph_serializers.py:441
+msgid "target document id"
+msgstr "Target document ID"
+
+#: community/apps/dataset/serializers/document_serializers.py:399
+#: community/apps/dataset/serializers/document_serializers.py:400
+msgid "document id list"
+msgstr "Document ID list"
+
+#: community/apps/dataset/serializers/document_serializers.py:418
+msgid "order by"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:653
+msgid "Section title (optional)"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:654
+msgid ""
+"Section content (required, question answer, no more than 4096 characters)"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:655
+msgid "Question (optional, one per line in the cell)"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:765
+msgid "The task is being executed, please do not send it repeatedly."
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:842
+msgid "ai optimization: optimization, direct return: directly_return"
+msgstr "AI optimization: optimization, direct return: directly_return"
+
+#: community/apps/dataset/serializers/document_serializers.py:848
+msgid "Document metadata->web:{source_url:xxx,selector:'xxx'},base:{}"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:859
+msgid "dataset id not exist"
+msgstr "Dataset ID does not exist"
+
+#: community/apps/dataset/serializers/document_serializers.py:990
+#: community/apps/dataset/serializers/document_serializers.py:1020
+msgid "limit"
+msgstr "Limit"
+
+#: community/apps/dataset/serializers/document_serializers.py:994
+#: community/apps/dataset/serializers/document_serializers.py:996
+msgid "patterns"
+msgstr "Patterns"
+
+#: community/apps/dataset/serializers/document_serializers.py:999
+msgid "Auto Clean"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:1006
+msgid "The maximum size of the uploaded file cannot exceed 100MB"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:1025
+msgid "Segmented regular list"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:1029
+#: community/apps/dataset/serializers/document_serializers.py:1030
+msgid "Whether to clear special characters"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:1049
+msgid "space"
+msgstr "Space"
+
+#: community/apps/dataset/serializers/document_serializers.py:1050
+msgid "semicolon"
+msgstr "Semicolon"
+
+#: community/apps/dataset/serializers/document_serializers.py:1050
+msgid "comma"
+msgstr "Comma"
+
+#: community/apps/dataset/serializers/document_serializers.py:1051
+msgid "period"
+msgstr "Period"
+
+#: community/apps/dataset/serializers/document_serializers.py:1051
+msgid "enter"
+msgstr "Enter"
+
+#: community/apps/dataset/serializers/document_serializers.py:1052
+msgid "blank line"
+msgstr "Blank line"
+
+#: community/apps/dataset/serializers/document_serializers.py:1165
+msgid "Hit handling method is required"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:1167
+msgid "The hit processing method must be directly_return|optimization"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:1213
+#: community/apps/dataset/serializers/paragraph_serializers.py:753
+msgid "The task is being executed, please do not send it again."
+msgstr ""
+
+#: community/apps/dataset/serializers/file_serializers.py:82
+msgid "File not found"
+msgstr ""
+
+#: community/apps/dataset/serializers/image_serializers.py:23
+msgid "image"
+msgstr "Image"
+
+#: community/apps/dataset/serializers/image_serializers.py:42
+msgid "Image not found"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:52
+#: community/apps/dataset/serializers/paragraph_serializers.py:68
+#: community/apps/dataset/serializers/paragraph_serializers.py:69
+#: community/apps/dataset/serializers/paragraph_serializers.py:82
+#: community/apps/dataset/serializers/paragraph_serializers.py:85
+#: community/apps/dataset/serializers/paragraph_serializers.py:91
+#: community/apps/dataset/serializers/paragraph_serializers.py:93
+#: community/apps/dataset/serializers/paragraph_serializers.py:653
+msgid "section title"
+msgstr "Section Title"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:65
+#: community/apps/dataset/serializers/paragraph_serializers.py:66
+msgid "section content"
+msgstr "Section Content"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:73
+#: community/apps/dataset/serializers/paragraph_serializers.py:74
+#: community/apps/dataset/serializers/problem_serializers.py:88
+msgid "problem list"
+msgstr "Problem List"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:100
+#: community/apps/dataset/serializers/paragraph_serializers.py:172
+#: community/apps/dataset/serializers/paragraph_serializers.py:214
+#: community/apps/dataset/serializers/paragraph_serializers.py:276
+#: community/apps/dataset/serializers/paragraph_serializers.py:308
+#: community/apps/dataset/serializers/paragraph_serializers.py:456
+#: community/apps/dataset/serializers/paragraph_serializers.py:563
+#: community/apps/dataset/serializers/problem_serializers.py:57
+#: community/apps/dataset/swagger_api/problem_api.py:61
+msgid "paragraph id"
+msgstr "Paragraph ID"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:105
+#: community/apps/dataset/serializers/paragraph_serializers.py:467
+msgid "Paragraph id does not exist"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:134
+msgid "Already associated, please do not associate again"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:191
+#: community/apps/dataset/serializers/paragraph_serializers.py:192
+msgid "question content"
+msgstr "Question Content"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:193
+#: community/apps/dataset/serializers/paragraph_serializers.py:709
+#: community/apps/dataset/swagger_api/problem_api.py:26
+msgid "hit num"
+msgstr "Hit Num"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:210
+#: community/apps/dataset/serializers/paragraph_serializers.py:281
+#: community/apps/dataset/serializers/problem_serializers.py:39
+#: community/apps/dataset/serializers/problem_serializers.py:64
+#: community/apps/dataset/serializers/problem_serializers.py:194
+#: community/apps/dataset/swagger_api/problem_api.py:101
+msgid "problem id"
+msgstr "Problem ID"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:222
+msgid "Paragraph does not exist"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:224
+msgid "Problem does not exist"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:306
+#: community/apps/dataset/serializers/paragraph_serializers.py:449
+#: community/apps/dataset/serializers/paragraph_serializers.py:450
+msgid "paragraph id list"
+msgstr "Paragraph ID list"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:317
+msgid "The document to be migrated is consistent with the target document"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:319
+#, python-brace-format
+msgid "The document id does not exist [{document_id}]"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:323
+#, python-brace-format
+msgid "The target document id does not exist [{document_id}]"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:503
+msgid "Problem id does not exist"
+msgstr ""
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:713
+#: community/apps/dataset/serializers/paragraph_serializers.py:714
+msgid "Number of dislikes"
+msgstr ""
+
+#: community/apps/dataset/serializers/problem_serializers.py:50
+msgid "Issue ID is passed when modifying, not when creating."
+msgstr ""
+
+#: community/apps/dataset/serializers/problem_serializers.py:62
+#: community/apps/dataset/swagger_api/problem_api.py:51
+#: community/apps/dataset/swagger_api/problem_api.py:52
+#: community/apps/dataset/swagger_api/problem_api.py:83
+#: community/apps/dataset/swagger_api/problem_api.py:84
+msgid "problem id list"
+msgstr "Problem ID list"
+
+#: community/apps/dataset/swagger_api/document_api.py:38
+#: community/apps/dataset/swagger_api/document_api.py:52
+msgid "1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents"
+msgstr ""
+
+#: community/apps/dataset/swagger_api/document_api.py:64
+#: community/apps/dataset/swagger_api/document_api.py:65
+msgid "state list"
+msgstr "State list"
+
+#: community/apps/dataset/swagger_api/image_api.py:22
+msgid "image file"
+msgstr "Image file"
+
+#: community/apps/dataset/swagger_api/problem_api.py:54
+#: community/apps/dataset/swagger_api/problem_api.py:55
+msgid "Associated paragraph information list"
+msgstr ""
+
+#: community/apps/dataset/swagger_api/problem_api.py:131
+msgid "Hit num"
+msgstr "Hit Num"
+
+#: community/apps/dataset/task/generate.py:95
+#, python-brace-format
+msgid ""
+"Generate issue based on document: {document_id} error {error}{traceback}"
+msgstr ""
+
+#: community/apps/dataset/task/generate.py:99
+#, python-brace-format
+msgid "End--->Generate problem: {document_id}"
+msgstr ""
+
+#: community/apps/dataset/task/sync.py:29
+#: community/apps/dataset/task/sync.py:43
+#, python-brace-format
+msgid "Start--->Start synchronization web knowledge base:{dataset_id}"
+msgstr ""
+
+#: community/apps/dataset/task/sync.py:34
+#: community/apps/dataset/task/sync.py:47
+#, python-brace-format
+msgid "End--->End synchronization web knowledge base:{dataset_id}"
+msgstr ""
+
+#: community/apps/dataset/task/sync.py:36
+#: community/apps/dataset/task/sync.py:49
+#, python-brace-format
+msgid "Synchronize web knowledge base:{dataset_id} error{error}{traceback}"
+msgstr ""
+
+#: community/apps/dataset/task/tools.py:114
+#, python-brace-format
+msgid "Association problem failed {error}"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:35
+#: community/apps/dataset/views/dataset.py:36
+msgid "Synchronize the knowledge base of the website"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:57
+#: community/apps/dataset/views/dataset.py:58
+msgid "Create QA knowledge base"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:77
+#: community/apps/dataset/views/dataset.py:78
+msgid "Create a web site knowledge base"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:93
+#: community/apps/dataset/views/dataset.py:94
+msgid "Get a list of applications available in the knowledge base"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:105
+#: community/apps/dataset/views/dataset.py:106
+msgid "Get a list of knowledge bases"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:119
+#: community/apps/dataset/views/dataset.py:120
+msgid "Create a knowledge base"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:134
+msgid "Hit test list"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:154
+msgid "Re-vectorize"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:170
+msgid "Export knowledge base"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:184
+#: community/apps/dataset/views/dataset.py:185
+msgid "Export knowledge base containing images"
+msgstr "Export ZIP Knowledge Base"
+
+#: community/apps/dataset/views/dataset.py:199
+msgid "Delete knowledge base"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:213
+#: community/apps/dataset/views/dataset.py:214
+msgid "Query knowledge base details based on knowledge base id"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:226
+#: community/apps/dataset/views/dataset.py:227
+msgid "Modify knowledge base information"
+msgstr ""
+
+#: community/apps/dataset/views/dataset.py:245
+#: community/apps/dataset/views/dataset.py:246
+#: community/apps/dataset/views/document.py:463
+#: community/apps/dataset/views/document.py:464
+msgid "Get the knowledge base paginated list"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:31
+#: community/apps/dataset/views/document.py:32
+msgid "Get QA template"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:44
+#: community/apps/dataset/views/document.py:45
+msgid "Get form template"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:57
+#: community/apps/dataset/views/document.py:58
+msgid "Create Web site documents"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:77
+#: community/apps/dataset/views/document.py:78
+msgid "Import QA and create documentation"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:98
+#: community/apps/dataset/views/document.py:99
+msgid "Import tables and create documents"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:118
+#: community/apps/dataset/views/document.py:119
+msgid "Create document"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:152
+#: community/apps/dataset/views/document.py:153
+msgid "Modify document hit processing methods in batches"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:171
+#: community/apps/dataset/views/document.py:172
+msgid "Create documents in batches"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:187
+#: community/apps/dataset/views/document.py:188
+msgid "Batch sync documents"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:202
+#: community/apps/dataset/views/document.py:203
+msgid "Delete documents in batches"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:220
+#: community/apps/dataset/views/document.py:221
+msgid "Synchronize web site types"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:239
+#: community/apps/dataset/views/document.py:240
+msgid "Cancel task"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:260
+#: community/apps/dataset/views/document.py:261
+msgid "Cancel tasks in batches"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:279
+#: community/apps/dataset/views/document.py:280
+msgid "Refresh document vector library"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:300
+#: community/apps/dataset/views/document.py:301
+msgid "Batch refresh document vector library"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:319
+#: community/apps/dataset/views/document.py:320
+msgid "Migrate documents in batches"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:346
+#: community/apps/dataset/views/document.py:347
+msgid "Export document"
+msgstr "Export Document"
+
+#: community/apps/dataset/views/document.py:361
+#: community/apps/dataset/views/document.py:362
+msgid "Export Zip document"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:376
+#: community/apps/dataset/views/document.py:377
+msgid "Get document details"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:391
+#: community/apps/dataset/views/document.py:392
+msgid "Modify document"
+msgstr "Update Document"
+
+#: community/apps/dataset/views/document.py:409
+#: community/apps/dataset/views/document.py:410
+msgid "Delete document"
+msgstr "Delete Document"
+
+#: community/apps/dataset/views/document.py:427
+#: community/apps/dataset/views/document.py:428
+msgid "Get a list of segment IDs"
+msgstr ""
+
+#: community/apps/dataset/views/document.py:439
+#: community/apps/dataset/views/document.py:440
+msgid "Segmented document"
+msgstr ""
+
+#: community/apps/dataset/views/file.py:42
+#: community/apps/dataset/views/file.py:43
+msgid "Get file"
+msgstr ""
+
+#: community/apps/dataset/views/image.py:28
+#: community/apps/dataset/views/image.py:29
+#: community/apps/dataset/views/image.py:34
+msgid "Upload image"
+msgstr ""
+
+#: community/apps/dataset/views/image.py:35
+#: community/apps/dataset/views/image.py:44
+msgid "Image"
+msgstr ""
+
+#: community/apps/dataset/views/image.py:42
+#: community/apps/dataset/views/image.py:43
+msgid "Get Image"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:28
+#: community/apps/dataset/views/paragraph.py:29
+msgid "Paragraph list"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:32
+#: community/apps/dataset/views/paragraph.py:51
+#: community/apps/dataset/views/paragraph.py:69
+#: community/apps/dataset/views/paragraph.py:85
+#: community/apps/dataset/views/paragraph.py:103
+#: community/apps/dataset/views/paragraph.py:121
+#: community/apps/dataset/views/paragraph.py:140
+#: community/apps/dataset/views/paragraph.py:156
+#: community/apps/dataset/views/paragraph.py:172
+#: community/apps/dataset/views/paragraph.py:193
+#: community/apps/dataset/views/paragraph.py:211
+#: community/apps/dataset/views/paragraph.py:238
+msgid "Knowledge Base/Documentation/Paragraph"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:46
+#: community/apps/dataset/views/paragraph.py:47
+msgid "Create Paragraph"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:64
+#: community/apps/dataset/views/paragraph.py:65
+msgid "Add associated questions"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:80
+#: community/apps/dataset/views/paragraph.py:81
+msgid "Get a list of paragraph questions"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:99
+#: community/apps/dataset/views/paragraph.py:100
+msgid "Disassociation issue"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:117
+#: community/apps/dataset/views/paragraph.py:118
+msgid "Related questions"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:135
+#: community/apps/dataset/views/paragraph.py:136
+msgid "Modify paragraph data"
+msgstr "Update Paragraph Data"
+
+#: community/apps/dataset/views/paragraph.py:152
+#: community/apps/dataset/views/paragraph.py:153
+msgid "Get paragraph details"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:168
+#: community/apps/dataset/views/paragraph.py:169
+msgid "Delete paragraph"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:187
+#: community/apps/dataset/views/paragraph.py:188
+msgid "Delete paragraphs in batches"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:206
+#: community/apps/dataset/views/paragraph.py:207
+msgid "Migrate paragraphs in batches"
+msgstr ""
+
+#: community/apps/dataset/views/paragraph.py:233
+#: community/apps/dataset/views/paragraph.py:234
+msgid "Get paragraph list by pagination"
+msgstr ""
+
+#: community/apps/dataset/views/problem.py:28
+#: community/apps/dataset/views/problem.py:29
+msgid "Question list"
+msgstr "Question List"
+
+#: community/apps/dataset/views/problem.py:32
+#: community/apps/dataset/views/problem.py:50
+#: community/apps/dataset/views/problem.py:68
+#: community/apps/dataset/views/problem.py:88
+#: community/apps/dataset/views/problem.py:103
+#: community/apps/dataset/views/problem.py:120
+#: community/apps/dataset/views/problem.py:136
+#: community/apps/dataset/views/problem.py:155
+msgid "Knowledge Base/Documentation/Paragraph/Question"
+msgstr ""
+
+#: community/apps/dataset/views/problem.py:45
+#: community/apps/dataset/views/problem.py:46
+msgid "Create question"
+msgstr "Create Question"
+
+#: community/apps/dataset/views/problem.py:64
+#: community/apps/dataset/views/problem.py:65
+msgid "Get a list of associated paragraphs"
+msgstr ""
+
+#: community/apps/dataset/views/problem.py:82
+#: community/apps/dataset/views/problem.py:83
+msgid "Batch deletion issues"
+msgstr ""
+
+#: community/apps/dataset/views/problem.py:98
+#: community/apps/dataset/views/problem.py:99
+msgid "Batch associated paragraphs"
+msgstr ""
+
+#: community/apps/dataset/views/problem.py:116
+#: community/apps/dataset/views/problem.py:117
+msgid "Delete question"
+msgstr ""
+
+#: community/apps/dataset/views/problem.py:131
+#: community/apps/dataset/views/problem.py:132
+msgid "Modify question"
+msgstr "Update Question"
+
+#: community/apps/dataset/views/problem.py:150
+#: community/apps/dataset/views/problem.py:151
+msgid "Get the list of questions by page"
+msgstr ""
+
+#: community/apps/embedding/task/embedding.py:30
+#: community/apps/embedding/task/embedding.py:81
+#, python-brace-format
+msgid "Failed to obtain vector model: {error} {traceback}"
+msgstr ""
+
+#: community/apps/embedding/task/embedding.py:110
+#, python-brace-format
+msgid "Start--->Vectorized dataset: {dataset_id}"
+msgstr ""
+
+#: community/apps/embedding/task/embedding.py:114
+#, python-brace-format
+msgid "Dataset documentation: {document_names}"
+msgstr ""
+
+#: community/apps/embedding/task/embedding.py:127
+#, python-brace-format
+msgid "End--->Vectorized dataset: {dataset_id}"
+msgstr ""
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:70
+#: community/apps/function_lib/serializers/function_lib_serializer.py:83
+#: community/apps/function_lib/swagger_api/function_lib_api.py:68
+#: community/apps/function_lib/swagger_api/function_lib_api.py:69
+#: community/apps/function_lib/swagger_api/function_lib_api.py:84
+#: community/apps/function_lib/swagger_api/function_lib_api.py:85
+#: community/apps/function_lib/swagger_api/function_lib_api.py:130
+#: community/apps/function_lib/swagger_api/function_lib_api.py:131
+#: community/apps/function_lib/swagger_api/function_lib_api.py:176
+#: community/apps/function_lib/swagger_api/function_lib_api.py:177
+msgid "variable name"
+msgstr "Variable Name"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:71
+#: community/apps/function_lib/swagger_api/function_lib_api.py:88
+#: community/apps/function_lib/swagger_api/function_lib_api.py:89
+#: community/apps/function_lib/swagger_api/function_lib_api.py:134
+#: community/apps/function_lib/swagger_api/function_lib_api.py:135
+#: community/apps/function_lib/swagger_api/function_lib_api.py:180
+#: community/apps/function_lib/swagger_api/function_lib_api.py:181
+msgid "required"
+msgstr "Required"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:74
+msgid "fields only support string|int|dict|array|float"
+msgstr "Fields only support string|int|dict|array|float"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:85
+#: community/apps/function_lib/swagger_api/function_lib_api.py:72
+#: community/apps/function_lib/swagger_api/function_lib_api.py:73
+msgid "variable value"
+msgstr "Variable Value"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:93
+#: community/apps/function_lib/serializers/function_lib_serializer.py:104
+#: community/apps/function_lib/serializers/function_lib_serializer.py:119
+#: community/apps/function_lib/serializers/py_lint_serializer.py:23
+#: community/apps/function_lib/swagger_api/function_lib_api.py:28
+#: community/apps/function_lib/swagger_api/function_lib_api.py:29
+#: community/apps/function_lib/swagger_api/function_lib_api.py:75
+#: community/apps/function_lib/swagger_api/function_lib_api.py:76
+#: community/apps/function_lib/swagger_api/function_lib_api.py:117
+#: community/apps/function_lib/swagger_api/function_lib_api.py:118
+#: community/apps/function_lib/swagger_api/function_lib_api.py:163
+#: community/apps/function_lib/swagger_api/function_lib_api.py:164
+#: community/apps/function_lib/swagger_api/py_lint_api.py:22
+#: community/apps/function_lib/swagger_api/py_lint_api.py:23
+msgid "function content"
+msgstr "Function Content"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:98
+#: community/apps/function_lib/serializers/function_lib_serializer.py:114
+#: community/apps/function_lib/serializers/function_lib_serializer.py:135
+#: community/apps/function_lib/serializers/function_lib_serializer.py:388
+#: community/apps/function_lib/swagger_api/function_lib_api.py:24
+#: community/apps/function_lib/swagger_api/function_lib_api.py:25
+#: community/apps/function_lib/swagger_api/function_lib_api.py:46
+#: community/apps/function_lib/swagger_api/function_lib_api.py:113
+#: community/apps/function_lib/swagger_api/function_lib_api.py:114
+#: community/apps/function_lib/swagger_api/function_lib_api.py:159
+#: community/apps/function_lib/swagger_api/function_lib_api.py:160
+msgid "function name"
+msgstr "Function Name"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:101
+#: community/apps/function_lib/serializers/function_lib_serializer.py:117
+#: community/apps/function_lib/serializers/function_lib_serializer.py:138
+#: community/apps/function_lib/swagger_api/function_lib_api.py:26
+#: community/apps/function_lib/swagger_api/function_lib_api.py:27
+#: community/apps/function_lib/swagger_api/function_lib_api.py:51
+#: community/apps/function_lib/swagger_api/function_lib_api.py:115
+#: community/apps/function_lib/swagger_api/function_lib_api.py:116
+#: community/apps/function_lib/swagger_api/function_lib_api.py:161
+#: community/apps/function_lib/swagger_api/function_lib_api.py:162
+msgid "function description"
+msgstr "Function Description"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:232
+msgid "field has no value set"
+msgstr "Field has no value set"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:248
+#: community/apps/function_lib/serializers/function_lib_serializer.py:253
+msgid "type error"
+msgstr "Type error"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:256
+#, python-brace-format
+msgid "Field: {name} Type: {_type} Value: {value} Type conversion error"
+msgstr ""
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:261
+msgid "function id"
+msgstr "Function ID"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:267
+#: community/apps/function_lib/serializers/function_lib_serializer.py:303
+#: community/apps/function_lib/serializers/function_lib_serializer.py:366
+#: community/apps/function_lib/serializers/function_lib_serializer.py:396
+msgid "Function does not exist"
+msgstr ""
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:357
+#: community/apps/function_lib/serializers/function_lib_serializer.py:386
+#| msgid "function"
+msgid "function ID"
+msgstr "Function"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:23
+#: community/apps/function_lib/swagger_api/function_lib_api.py:205
+msgid "ID"
+msgstr ""
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:30
+#: community/apps/function_lib/swagger_api/function_lib_api.py:31
+msgid "input field"
+msgstr "Input field"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:62
+#: community/apps/function_lib/swagger_api/function_lib_api.py:78
+#: community/apps/function_lib/swagger_api/function_lib_api.py:124
+#: community/apps/function_lib/swagger_api/function_lib_api.py:170
+msgid "Input variable list"
+msgstr ""
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:94
+#: community/apps/function_lib/swagger_api/function_lib_api.py:140
+#: community/apps/function_lib/swagger_api/function_lib_api.py:186
+msgid "Field type string|int|dict|array|float"
+msgstr ""
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:100
+#: community/apps/function_lib/swagger_api/function_lib_api.py:146
+#: community/apps/function_lib/swagger_api/function_lib_api.py:192
+msgid "The source only supports custom|reference"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:28
+#: community/apps/function_lib/views/function_lib_views.py:29
+msgid "Get function list"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:30
+#: community/apps/function_lib/views/function_lib_views.py:46
+#: community/apps/function_lib/views/function_lib_views.py:59
+#: community/apps/function_lib/views/function_lib_views.py:74
+#: community/apps/function_lib/views/function_lib_views.py:85
+#: community/apps/function_lib/views/function_lib_views.py:95
+#: community/apps/function_lib/views/function_lib_views.py:111
+#: community/apps/function_lib/views/py_lint.py:29
+msgid "Function"
+msgstr "Function Library"
+
+#: community/apps/function_lib/views/function_lib_views.py:43
+#: community/apps/function_lib/views/function_lib_views.py:44
+msgid "Create function"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:56
+#: community/apps/function_lib/views/function_lib_views.py:57
+msgid "Debug function"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:71
+#: community/apps/function_lib/views/function_lib_views.py:72
+msgid "Update function"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:83
+#: community/apps/function_lib/views/function_lib_views.py:84
+msgid "Delete function"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:93
+#: community/apps/function_lib/views/function_lib_views.py:94
+msgid "Get function details"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:106
+#: community/apps/function_lib/views/function_lib_views.py:107
+msgid "Get function list by pagination"
+msgstr ""
+
+#: community/apps/function_lib/views/function_lib_views.py:129
+#| msgid "function"
+msgid "Import function"
+msgstr "Import Function"
+
+#: community/apps/function_lib/views/function_lib_views.py:143
+#| msgid "Export conversation"
+msgid "Export function"
+msgstr "Export function"
+
+#: community/apps/function_lib/views/py_lint.py:26
+#: community/apps/function_lib/views/py_lint.py:27
+msgid "Check code"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:66
+msgid "Model type cannot be empty"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:91
+msgid "The current platform does not support downloading models"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:146
+msgid "LLM"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:147
+msgid "Embedding Model"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:148
+msgid "Speech2Text"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:149
+msgid "TTS"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:150
+msgid "Vision Model"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:151
+msgid "Image Generation"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:152
+msgid "Rerank"
+msgstr ""
+
+#: community/apps/setting/models_provider/base_model_provider.py:226
+msgid "The model does not support"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42
+msgid ""
+"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi "
+"Lab, developers can integrate high-quality text retrieval and sorting "
+"through the LlamaIndex framework."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45
+msgid ""
+"Chinese (including various dialects such as Cantonese), English, Japanese, "
+"and Korean support free switching between multiple languages."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48
+msgid ""
+"CosyVoice is based on a new generation of large generative speech models, "
+"which can predict emotions, intonation, rhythm, etc. based on context, and "
+"has better anthropomorphic effects."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51
+msgid ""
+"Universal text vector is Tongyi Lab's multi-language text unified vector "
+"model based on the LLM base. It provides high-level vector services for "
+"multiple mainstream languages around the world and helps developers quickly "
+"convert text data into high-quality vector data."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69
+#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40
+msgid ""
+"Tongyi Wanxiang - a large image model for text generation, supports "
+"bilingual input in Chinese and English, and supports the input of reference "
+"pictures for reference content or reference style migration. Key styles "
+"include but are not limited to watercolor, oil painting, Chinese painting, "
+"sketch, flat illustration, two-dimensional, and 3D. Cartoon."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95
+msgid "Alibaba Cloud Bailian"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:28
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:40
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:68
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:55
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:45
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:23
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:58
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:41
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:39
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:44
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:27
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:31
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:44
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:22
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:61
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:40
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:68
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:61
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:40
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:19
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:78
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:53
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:46
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:29
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:24
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:47
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:19
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:39
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:25
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:59
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:39
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:40
+#, python-brace-format
+msgid "{model_type} Model type is not supported"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:32
+#, python-brace-format
+msgid "{key} is required"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:52
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:55
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:43
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:54
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:56
+#: community/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py:43
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:54
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py:54
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:52
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py:77
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:60
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:76
+#: community/apps/setting/models_provider/impl/xf_model_provider/model/tts.py:101
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:34
+#: community/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py:44
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:56
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py:49
+msgid "Hello"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:38
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:86
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:73
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:65
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:40
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:77
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:61
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:38
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:45
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:51
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:64
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:39
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:80
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:86
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:64
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:39
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:80
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:104
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:55
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:70
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:38
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:38
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:50
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:84
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:41
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:65
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:60
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:40
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:37
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:77
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:56
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:61
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:59
+#, python-brace-format
+msgid ""
+"Verification failed, please check whether the parameters are correct: {error}"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:12
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:20
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:14
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:41
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:14
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:22
+msgid "Temperature"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:13
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:21
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:42
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:23
+msgid ""
+"Higher values make the output more random, while lower values make it more "
+"focused and deterministic"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:21
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:29
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:31
+msgid "Output the maximum Tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:30
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:32
+msgid "Specify the maximum number of tokens that the model can generate"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:72
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:60
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:32
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:50
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:28
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:63
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:46
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:46
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:62
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:63
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:49
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:27
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:66
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:45
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:72
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:49
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:27
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:66
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:45
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:55
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:72
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:34
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:71
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:29
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:52
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:40
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:59
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:29
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:64
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:44
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:45
+#, python-brace-format
+msgid "{key} is required"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:14
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:15
+msgid "Image size"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22
+msgid "Specify the size of the generated image, such as: 1024x1024"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:43
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:43
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:41
+msgid "Number of pictures"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34
+msgid "Specify the number of generated images"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41
+msgid "Style"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41
+msgid "Specify the style of generated images"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45
+msgid "Default value, the image style is randomly output by the model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46
+msgid "photography"
+msgstr "Photography"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47
+msgid "Portraits"
+msgstr "Portraits"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48
+msgid "3D cartoon"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49
+msgid "animation"
+msgstr "Animation"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50
+msgid "painting"
+msgstr "Painting"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51
+msgid "watercolor"
+msgstr "Watercolor"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52
+msgid "sketch"
+msgstr "Sketch"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53
+msgid "Chinese painting"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54
+msgid "flat illustration"
+msgstr "Flat illustration"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15
+msgid "timbre"
+msgstr "Timbre"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
+msgid "Chinese sounds can support mixed scenes of Chinese and English"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20
+msgid "Long Xiaochun"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21
+msgid "Long Xiaoxia"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22
+msgid "Long Xiaochen"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23
+msgid "Long Xiaobai"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24
+msgid "Long laotie"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25
+msgid "Long Shu"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26
+msgid "Long Shuo"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27
+msgid "Long Jing"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28
+msgid "Long Miao"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29
+msgid "Long Yue"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30
+msgid "Long Yuan"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31
+msgid "Long Fei"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32
+msgid "Long Jielidou"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33
+msgid "Long Tong"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34
+msgid "Long Xiang"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28
+msgid "speaking speed"
+msgstr "Speaking speed"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
+msgid "[0.5,2], the default is 1, usually one decimal place is enough"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:34
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:74
+msgid "API URL"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:35
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:75
+msgid "API Key"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36
+msgid ""
+"An update to Claude 2 that doubles the context window and improves "
+"reliability, hallucination rates, and evidence-based accuracy in long "
+"documents and RAG contexts."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43
+msgid ""
+"Anthropic is a powerful model that can handle a variety of tasks, from "
+"complex dialogue and creative content generation to detailed command "
+"obedience."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50
+msgid ""
+"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-"
+"instant responsiveness. The model can answer simple queries and requests "
+"quickly. Customers will be able to build seamless AI experiences that mimic "
+"human interactions. Claude 3 Haiku can process images and return text "
+"output, and provides 200K context windows."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57
+msgid ""
+"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between "
+"intelligence and speed, especially when it comes to handling enterprise "
+"workloads. This model offers maximum utility while being priced lower than "
+"competing products, and it's been engineered to be a solid choice for "
+"deploying AI at scale."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64
+msgid ""
+"The Claude 3.5 Sonnet raises the industry standard for intelligence, "
+"outperforming competing models and the Claude 3 Opus in extensive "
+"evaluations, with the speed and cost-effectiveness of our mid-range models."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71
+msgid ""
+"A faster, more affordable but still very powerful model that can handle a "
+"range of tasks including casual conversation, text analysis, summarization "
+"and document question answering."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78
+msgid ""
+"Titan Text Premier is the most powerful and advanced model in the Titan Text "
+"series, designed to deliver exceptional performance for a variety of "
+"enterprise applications. With its cutting-edge features, it delivers greater "
+"accuracy and outstanding results, making it an excellent choice for "
+"organizations looking for a top-notch text processing solution."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85
+msgid ""
+"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-"
+"tuning English-language tasks, including summarization and copywriting, "
+"where customers require smaller, more cost-effective, and highly "
+"customizable models."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91
+msgid ""
+"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making "
+"it ideal for a variety of high-level general language tasks, such as open-"
+"ended text generation and conversational chat, as well as support in "
+"retrieval-augmented generation (RAG). At launch, the model is optimized for "
+"English, but other languages are supported."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97
+msgid ""
+"7B dense converter for rapid deployment and easy customization. Small in "
+"size yet powerful in a variety of use cases. Supports English and code, as "
+"well as 32k context windows."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103
+msgid ""
+"Advanced Mistral AI large-scale language model capable of handling any "
+"language task, including complex multilingual reasoning, text understanding, "
+"transformation, and code generation."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109
+msgid ""
+"Ideal for content creation, conversational AI, language understanding, R&D, "
+"and enterprise applications"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115
+msgid ""
+"Ideal for limited computing power and resources, edge devices, and faster "
+"training times."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123
+msgid ""
+"Titan Embed Text is the largest embedding model in the Amazon Titan Embed "
+"series and can handle various text embedding tasks, such as text "
+"classification, text similarity calculation, etc."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47
+#, python-brace-format
+msgid "The following fields are required: {keys}"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:64
+msgid "Verification failed, please check whether the parameters are correct"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28
+msgid "Picture quality"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17
+msgid ""
+"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) "
+"to find one that suits your desired tone and audience. The current voiceover "
+"is optimized for English."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24
+msgid "Good at common conversational tasks, supports 32K contexts"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29
+msgid "Good at handling programming tasks, supports 16K contexts"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32
+msgid "Latest Gemini 1.0 Pro model, updated with Google update"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36
+msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58
+msgid "Latest Gemini 1.5 Flash model, updated with Google updates"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53
+msgid "convert audio to text"
+msgstr "Convert audio to text"
+
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:53
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:54
+msgid "Model catalog"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py:39
+msgid "local model"
+msgstr "Local model"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:43
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:48
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:35
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:43
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:24
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:44
+msgid "API domain name is invalid"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:35
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:48
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:53
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:40
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:48
+msgid "The model does not exist, please download the model first"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 7B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 13B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 70B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68
+msgid ""
+"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese "
+"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so "
+"that it has strong Chinese conversation capabilities."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72
+msgid ""
+"Meta Llama 3: The most capable public product LLM to date. 8 billion "
+"parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76
+msgid ""
+"Meta Llama 3: The most capable public product LLM to date. 70 billion "
+"parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80
+msgid ""
+"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 500 million parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84
+msgid ""
+"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 1.8 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88
+msgid ""
+"Compared with previous versions, qwen 1.5 4b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"4 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93
+msgid ""
+"Compared with previous versions, qwen 1.5 7b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"7 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97
+msgid ""
+"Compared with previous versions, qwen 1.5 14b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"14 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101
+msgid ""
+"Compared with previous versions, qwen 1.5 32b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"32 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105
+msgid ""
+"Compared with previous versions, qwen 1.5 72b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"72 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109
+msgid ""
+"Compared with previous versions, qwen 1.5 110b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 110 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193
+msgid ""
+"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open "
+"model."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197
+msgid ""
+"A high-performance open embedding model with a large token context window."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16
+msgid ""
+"The image generation endpoint allows you to create raw images based on text "
+"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 "
+"or 1792x1024 pixels."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
+msgid ""
+" \n"
+"By default, images are produced in standard quality, but with DALL·E 3 you "
+"can set quality: \"hd\" to enhance detail. Square, standard quality images "
+"are generated fastest.\n"
+" "
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44
+msgid ""
+"You can use DALL·E 3 to request 1 image at a time (requesting more images by "
+"issuing parallel requests), or use DALL·E 2 with the n parameter to request "
+"up to 10 images at a time."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111
+msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38
+msgid "Latest gpt-4, updated with OpenAI adjustments"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99
+msgid ""
+"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI "
+"adjustments"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102
+msgid ""
+"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI "
+"adjustments"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46
+msgid "The latest gpt-4-turbo, updated with OpenAI adjustments"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49
+msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53
+msgid ""
+"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 "
+"tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57
+msgid ""
+"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 "
+"tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61
+msgid ""
+"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June "
+"13, 2024"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65
+msgid ""
+"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69
+msgid ""
+"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 "
+"tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72
+msgid ""
+"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 "
+"tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75
+msgid ""
+"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 "
+"tokens"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63
+msgid "Tongyi Qianwen"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:46
+msgid "Please provide server URL"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:49
+msgid "Please provide the model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:52
+msgid "Please provide the API Key"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58
+msgid "Tencent Cloud"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:41
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:88
+#, python-brace-format
+msgid "{keys} is required"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14
+msgid "painting style"
+msgstr "Painting style"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14
+msgid "If not passed, the default value is 201 (Japanese anime style)"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18
+msgid "Not limited to style"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19
+msgid "ink painting"
+msgstr "Ink painting"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20
+msgid "concept art"
+msgstr "Concept art"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21
+msgid "Oil painting 1"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22
+msgid "Oil Painting 2 (Van Gogh)"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23
+msgid "watercolor painting"
+msgstr "Watercolor painting"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24
+msgid "pixel art"
+msgstr "Pixel art"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25
+msgid "impasto style"
+msgstr "Impasto style"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26
+msgid "illustration"
+msgstr "Illustration"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27
+msgid "paper cut style"
+msgstr "Paper cut style"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28
+msgid "Impressionism 1 (Monet)"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29
+msgid "Impressionism 2"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31
+msgid "classical portraiture"
+msgstr "Classical portraiture"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32
+msgid "black and white sketch"
+msgstr "Black and white sketch"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33
+msgid "cyberpunk"
+msgstr "Cyberpunk"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34
+msgid "science fiction style"
+msgstr "Science fiction style"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35
+msgid "dark style"
+msgstr "Dark style"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37
+msgid "vaporwave"
+msgstr "Vaporwave"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38
+msgid "Japanese animation"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39
+msgid "monster style"
+msgstr "Monster style"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40
+msgid "Beautiful ancient style"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41
+msgid "retro anime"
+msgstr "Retro anime"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42
+msgid "Game cartoon hand drawing"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43
+msgid "Universal realistic style"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50
+msgid "Generate image resolution"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50
+msgid "If not transmitted, the default value is 768:768."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38
+msgid ""
+"The most effective version of the current hybrid model, the trillion-level "
+"parameter scale MOE-32K long article model. Reaching the absolute leading "
+"level on various benchmarks, with complex instructions and reasoning, "
+"complex mathematical capabilities, support for function call, and "
+"application focus optimization in fields such as multi-language translation, "
+"finance, law, and medical care"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45
+msgid ""
+"A better routing strategy is adopted to simultaneously alleviate the "
+"problems of load balancing and expert convergence. For long articles, the "
+"needle-in-a-haystack index reaches 99.9%"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51
+msgid ""
+"Upgraded to MOE structure, the context window is 256k, leading many open "
+"source models in multiple evaluation sets such as NLP, code, mathematics, "
+"industry, etc."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57
+msgid ""
+"Hunyuan's latest version of the role-playing model, a role-playing model "
+"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan "
+"model combined with the role-playing scene data set for additional training, "
+"and has better basic effects in role-playing scenes."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63
+msgid ""
+"Hunyuan's latest MOE architecture FunctionCall model has been trained with "
+"high-quality FunctionCall data and has a context window of 32K, leading in "
+"multiple dimensions of evaluation indicators."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69
+msgid ""
+"Hunyuan's latest code generation model, after training the base model with "
+"200B high-quality code data, and iterating on high-quality SFT data for half "
+"a year, the context long window length has been increased to 8K, and it "
+"ranks among the top in the automatic evaluation indicators of code "
+"generation in the five major languages; the five major languages In the "
+"manual high-quality evaluation of 10 comprehensive code tasks that consider "
+"all aspects, the performance is in the first echelon."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77
+msgid ""
+"Tencent's Hunyuan Embedding interface can convert text into high-quality "
+"vector data. The vector dimension is 1024 dimensions."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87
+msgid "Mixed element visual model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94
+msgid "Hunyuan graph model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125
+msgid "Tencent Hunyuan"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42
+msgid "Facebook’s 125M parameter model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25
+msgid "BAAI’s 7B parameter model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26
+msgid "BAAI’s 13B parameter mode"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16
+msgid ""
+"If the gap between width, height and 512 is too large, the picture rendering "
+"effect will be poor and the probability of excessive delay will increase "
+"significantly. Recommended ratio and corresponding width and height before "
+"super score: width*height"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29
+msgid "Universal female voice"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25
+msgid "Supernatural timbre-ZiZi 2.0"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26
+msgid "Supernatural timbre-ZiZi"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27
+msgid "Supernatural sound-Ranran 2.0"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28
+msgid "Supernatural sound-Ranran"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30
+msgid "Universal male voice"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33
+msgid "[0.2,3], the default is 1, usually one decimal place is enough"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88
+msgid ""
+"The user goes to the model inference page of Volcano Ark to create an "
+"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call "
+"it."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59
+msgid "Universal 2.0-Vincent Diagram"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64
+msgid "Universal 2.0Pro-Vincent Chart"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69
+msgid "Universal 1.4-Vincent Chart"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74
+msgid "Animation 1.3.0-Vincent Picture"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79
+msgid "Animation 1.3.1-Vincent Picture"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113
+msgid "volcano engine"
+msgstr "Volcengine"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:51
+#, python-brace-format
+msgid "{model_name} The model does not support"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53
+msgid ""
+"ERNIE-Bot-4 is a large language model independently developed by Baidu. It "
+"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
+"content creation and generation."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27
+msgid ""
+"ERNIE-Bot is a large language model independently developed by Baidu. It "
+"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
+"content creation and generation."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30
+msgid ""
+"ERNIE-Bot-turbo is a large language model independently developed by Baidu. "
+"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, "
+"content creation and generation, and has a faster response speed."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33
+msgid ""
+"BLOOMZ-7B is a well-known large language model in the industry. It was "
+"developed and open sourced by BigScience and can output text in 46 languages "
+"and 13 programming languages."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39
+msgid ""
+"Llama-2-13b-chat was developed by Meta AI and is open source. It performs "
+"well in scenarios such as coding, reasoning and knowledge application. "
+"Llama-2-13b-chat is a native open source version with balanced performance "
+"and effect, suitable for conversation scenarios."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42
+msgid ""
+"Llama-2-70b-chat was developed by Meta AI and is open source. It performs "
+"well in scenarios such as coding, reasoning, and knowledge application. "
+"Llama-2-70b-chat is a native open source version with high-precision effects."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45
+msgid ""
+"The Chinese enhanced version developed by the Qianfan team based on "
+"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-"
+"EVAL."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49
+msgid ""
+"Embedding-V1 is a text representation model based on Baidu Wenxin large "
+"model technology. It can convert text into a vector form represented by "
+"numerical values and can be used in text retrieval, information "
+"recommendation, knowledge mining and other scenarios. Embedding-V1 provides "
+"the Embeddings interface, which can generate corresponding vector "
+"representations based on input content. You can call this interface to input "
+"text into the model and obtain the corresponding vector representation for "
+"subsequent text processing and analysis."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66
+msgid "Thousand sails large model"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42
+msgid "Please outline this picture"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15
+msgid "Speaker"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16
+msgid ""
+"Speaker, optional value: Please go to the console to add a trial or purchase "
+"speaker. After adding, the speaker parameter value will be displayed."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21
+msgid "iFlytek Xiaoyan"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22
+msgid "iFlytek Xujiu"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23
+msgid "iFlytek Xiaoping"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24
+msgid "iFlytek Xiaojing"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25
+msgid "iFlytek Xuxiaobao"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28
+msgid "Speech speed, optional value: [0-100], default is 50"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50
+msgid "Chinese and English recognition"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66
+msgid "iFlytek Spark"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15
+msgid ""
+"The image generation endpoint allows you to create raw images based on text "
+"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or "
+"1792x1024 pixels."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29
+msgid ""
+"By default, images are generated in standard quality, you can set quality: "
+"\"hd\" to enhance detail. Square, standard quality images are generated "
+"fastest."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42
+msgid ""
+"You can request 1 image at a time (requesting more images by making parallel "
+"requests), or up to 10 images at a time using the n parameter."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20
+msgid "Chinese female"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21
+msgid "Chinese male"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22
+msgid "Japanese male"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23
+msgid "Cantonese female"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24
+msgid "English female"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25
+msgid "English male"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26
+msgid "Korean female"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37
+msgid ""
+"Code Llama is a language model specifically designed for code generation."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44
+msgid ""
+" \n"
+"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, "
+"designed to perform specific tasks.\n"
+" "
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53
+msgid ""
+"Code Llama Python is a language model specifically designed for Python code "
+"generation."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60
+msgid ""
+"CodeQwen 1.5 is a language model for code generation with high performance."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67
+msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74
+msgid "Deepseek is a large-scale language model with 13 billion parameters."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16
+msgid ""
+"Image size, only cogview-3-plus supports this parameter. Optional range: "
+"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the "
+"default is 1024x1024."
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34
+msgid ""
+"Have strong multi-modal understanding capabilities. Able to understand up to "
+"five images simultaneously and supports video content understanding"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37
+msgid ""
+"Focus on single picture understanding. Suitable for scenarios requiring "
+"efficient image analysis"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40
+msgid ""
+"Focus on single picture understanding. Suitable for scenarios requiring "
+"efficient image analysis (free)"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46
+msgid ""
+"Quickly and accurately generate images based on user text descriptions. "
+"Resolution supports 1024x1024"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49
+msgid ""
+"Generate high-quality images based on user text descriptions, supporting "
+"multiple image sizes"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52
+msgid ""
+"Generate high-quality images based on user text descriptions, supporting "
+"multiple image sizes (free)"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75
+msgid "zhipu AI"
+msgstr "Zhipu AI"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:32
+#: community/apps/setting/serializers/model_apply_serializers.py:37
+msgid "vector text"
+msgstr "Vector text"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:33
+msgid "vector text list"
+msgstr "Vector text list"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:41
+msgid "text"
+msgstr "Text"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:42
+msgid "metadata"
+msgstr "Metadata"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:47
+msgid "query"
+msgstr "Query"
+
+#: community/apps/setting/serializers/provider_serializers.py:79
+#: community/apps/setting/serializers/provider_serializers.py:83
+#: community/apps/setting/serializers/provider_serializers.py:130
+#: community/apps/setting/serializers/provider_serializers.py:176
+#: community/apps/setting/serializers/provider_serializers.py:190
+#: community/apps/setting/swagger_api/provide_api.py:30
+#: community/apps/setting/swagger_api/provide_api.py:54
+#: community/apps/setting/swagger_api/provide_api.py:55
+#: community/apps/setting/swagger_api/provide_api.py:87
+#: community/apps/setting/swagger_api/provide_api.py:88
+#: community/apps/setting/swagger_api/provide_api.py:170
+msgid "model name"
+msgstr "Model Name"
+
+#: community/apps/setting/serializers/provider_serializers.py:81
+#: community/apps/setting/serializers/provider_serializers.py:132
+#: community/apps/setting/serializers/provider_serializers.py:142
+#: community/apps/setting/serializers/provider_serializers.py:180
+#: community/apps/setting/swagger_api/provide_api.py:26
+#: community/apps/setting/swagger_api/provide_api.py:51
+#: community/apps/setting/swagger_api/provide_api.py:52
+#: community/apps/setting/swagger_api/provide_api.py:84
+#: community/apps/setting/swagger_api/provide_api.py:85
+#: community/apps/setting/swagger_api/provide_api.py:134
+#: community/apps/setting/swagger_api/provide_api.py:165
+msgid "model type"
+msgstr "Model Type"
+
+#: community/apps/setting/serializers/provider_serializers.py:85
+#: community/apps/setting/serializers/provider_serializers.py:178
+#: community/apps/setting/serializers/provider_serializers.py:402
+#: community/apps/setting/swagger_api/provide_api.py:35
+#: community/apps/setting/swagger_api/provide_api.py:57
+#: community/apps/setting/swagger_api/provide_api.py:58
+#: community/apps/setting/swagger_api/provide_api.py:79
+#: community/apps/setting/swagger_api/provide_api.py:80
+#: community/apps/setting/swagger_api/provide_api.py:105
+#: community/apps/setting/swagger_api/provide_api.py:129
+#: community/apps/setting/swagger_api/provide_api.py:160
+#: community/apps/setting/swagger_api/provide_api.py:179
+msgid "provider"
+msgstr "Provider"
+
+#: community/apps/setting/serializers/provider_serializers.py:87
+#: community/apps/setting/serializers/provider_serializers.py:134
+#: community/apps/setting/serializers/provider_serializers.py:182
+msgid "permission type"
+msgstr "Permission Type"
+
+#: community/apps/setting/serializers/provider_serializers.py:89
+msgid "create user"
+msgstr "Create User"
+
+#: community/apps/setting/serializers/provider_serializers.py:138
+#: community/apps/setting/serializers/provider_serializers.py:186
+msgid "permissions only supportPUBLIC|PRIVATE"
+msgstr "Permissions only support PUBLIC|PRIVATE"
+
+#: community/apps/setting/serializers/provider_serializers.py:145
+#: community/apps/setting/serializers/provider_serializers.py:196
+msgid "certification information"
+msgstr "Certification information"
+
+#: community/apps/setting/serializers/provider_serializers.py:193
+msgid "parameter configuration"
+msgstr "Parameter configuration"
+
+#: community/apps/setting/serializers/provider_serializers.py:202
+#, python-brace-format
+msgid "Model name【{model_name}】already exists"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:29
+#: community/apps/setting/swagger_api/system_setting.py:25
+#: community/apps/setting/swagger_api/system_setting.py:26
+#: community/apps/setting/swagger_api/system_setting.py:57
+#: community/apps/setting/swagger_api/system_setting.py:58
+msgid "SMTP host"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:30
+#: community/apps/setting/swagger_api/system_setting.py:28
+#: community/apps/setting/swagger_api/system_setting.py:29
+#: community/apps/setting/swagger_api/system_setting.py:60
+#: community/apps/setting/swagger_api/system_setting.py:61
+msgid "SMTP port"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:31
+#: community/apps/setting/serializers/system_setting.py:35
+#: community/apps/setting/swagger_api/system_setting.py:31
+#: community/apps/setting/swagger_api/system_setting.py:32
+#: community/apps/setting/swagger_api/system_setting.py:43
+#: community/apps/setting/swagger_api/system_setting.py:44
+#: community/apps/setting/swagger_api/system_setting.py:63
+#: community/apps/setting/swagger_api/system_setting.py:64
+#: community/apps/setting/swagger_api/system_setting.py:75
+#: community/apps/setting/swagger_api/system_setting.py:76
+msgid "Sender's email"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:32
+#: community/apps/setting/swagger_api/system_setting.py:34
+#: community/apps/setting/swagger_api/system_setting.py:35
+#: community/apps/setting/swagger_api/system_setting.py:66
+#: community/apps/setting/swagger_api/system_setting.py:67
+#: community/apps/users/serializers/user_serializers.py:72
+#: community/apps/users/serializers/user_serializers.py:112
+#: community/apps/users/serializers/user_serializers.py:143
+#: community/apps/users/serializers/user_serializers.py:211
+#: community/apps/users/serializers/user_serializers.py:293
+#: community/apps/users/serializers/user_serializers.py:346
+#: community/apps/users/serializers/user_serializers.py:671
+#: community/apps/users/serializers/user_serializers.py:703
+#: community/apps/users/serializers/user_serializers.py:704
+#: community/apps/users/serializers/user_serializers.py:743
+#: community/apps/users/serializers/user_serializers.py:763
+#: community/apps/users/serializers/user_serializers.py:764
+#: community/apps/users/views/user.py:109
+#: community/apps/users/views/user.py:110
+#: community/apps/users/views/user.py:111
+#: community/apps/users/views/user.py:112
+msgid "Password"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:33
+#: community/apps/setting/swagger_api/system_setting.py:37
+#: community/apps/setting/swagger_api/system_setting.py:38
+#: community/apps/setting/swagger_api/system_setting.py:69
+#: community/apps/setting/swagger_api/system_setting.py:70
+msgid "Whether to enable TLS"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:34
+#: community/apps/setting/swagger_api/system_setting.py:40
+#: community/apps/setting/swagger_api/system_setting.py:41
+#: community/apps/setting/swagger_api/system_setting.py:72
+#: community/apps/setting/swagger_api/system_setting.py:73
+msgid "Whether to enable SSL"
+msgstr ""
+
+#: community/apps/setting/serializers/system_setting.py:49
+msgid "Email verification failed"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:43
+#: community/apps/users/serializers/user_serializers.py:70
+#: community/apps/users/serializers/user_serializers.py:111
+#: community/apps/users/serializers/user_serializers.py:136
+#: community/apps/users/serializers/user_serializers.py:209
+#: community/apps/users/serializers/user_serializers.py:470
+#: community/apps/users/serializers/user_serializers.py:493
+#: community/apps/users/serializers/user_serializers.py:518
+#: community/apps/users/serializers/user_serializers.py:519
+#: community/apps/users/serializers/user_serializers.py:581
+#: community/apps/users/serializers/user_serializers.py:627
+#: community/apps/users/serializers/user_serializers.py:628
+#: community/apps/users/serializers/user_serializers.py:663
+#: community/apps/users/serializers/user_serializers.py:700
+#: community/apps/users/serializers/user_serializers.py:701
+msgid "Username"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:44
+#: community/apps/users/serializers/user_serializers.py:131
+#: community/apps/users/serializers/user_serializers.py:210
+#: community/apps/users/serializers/user_serializers.py:226
+#: community/apps/users/serializers/user_serializers.py:256
+#: community/apps/users/serializers/user_serializers.py:287
+#: community/apps/users/serializers/user_serializers.py:343
+#: community/apps/users/serializers/user_serializers.py:356
+#: community/apps/users/serializers/user_serializers.py:438
+#: community/apps/users/serializers/user_serializers.py:471
+#: community/apps/users/serializers/user_serializers.py:494
+#: community/apps/users/serializers/user_serializers.py:520
+#: community/apps/users/serializers/user_serializers.py:582
+#: community/apps/users/serializers/user_serializers.py:629
+#: community/apps/users/serializers/user_serializers.py:658
+#: community/apps/users/serializers/user_serializers.py:702
+#: community/apps/users/serializers/user_serializers.py:713
+#: community/apps/users/serializers/user_serializers.py:734
+msgid "Email"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:47
+#: community/apps/setting/serializers/team_serializers.py:148
+#: community/apps/setting/serializers/team_serializers.py:256
+msgid "team id"
+msgstr "Team ID"
+
+#: community/apps/setting/serializers/team_serializers.py:48
+#: community/apps/setting/serializers/team_serializers.py:254
+#: community/apps/setting/serializers/team_serializers.py:324
+msgid "member id"
+msgstr "Member ID"
+
+#: community/apps/setting/serializers/team_serializers.py:54
+msgid "use"
+msgstr "Use"
+
+#: community/apps/setting/serializers/team_serializers.py:55
+msgid "manage"
+msgstr "Manage"
+
+#: community/apps/setting/serializers/team_serializers.py:60
+msgid "Operation permissions USE, MANAGE permissions"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:63
+msgid "use permission"
+msgstr "Use permission"
+
+#: community/apps/setting/serializers/team_serializers.py:64
+msgid "use permission True|False"
+msgstr "Use permission True|False"
+
+#: community/apps/setting/serializers/team_serializers.py:66
+msgid "manage permission"
+msgstr "Manage permission"
+
+#: community/apps/setting/serializers/team_serializers.py:67
+msgid "manage permission True|False"
+msgstr "Manage permission True|False"
+
+#: community/apps/setting/serializers/team_serializers.py:73
+msgid "target id"
+msgstr "Target ID"
+
+#: community/apps/setting/serializers/team_serializers.py:82
+#: community/apps/setting/serializers/team_serializers.py:83
+msgid "dataset id/application id"
+msgstr "Dataset ID/Application ID"
+
+#: community/apps/setting/serializers/team_serializers.py:105
+msgid "Non-existent application|knowledge base id["
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:139
+#: community/apps/setting/serializers/team_serializers.py:140
+msgid "Permission data"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:157
+#: community/apps/setting/serializers/team_serializers.py:158
+msgid "user id list"
+msgstr "User ID list"
+
+#: community/apps/setting/serializers/team_serializers.py:168
+#: community/apps/setting/serializers/team_serializers.py:169
+msgid "Username or email"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:217
+msgid "Username or email is required"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:221
+#: community/apps/users/serializers/user_serializers.py:800
+msgid "User does not exist"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:224
+msgid "The current members already exist in the team, do not add them again."
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:248
+msgid "member list"
+msgstr "Member List"
+
+#: community/apps/setting/serializers/team_serializers.py:263
+msgid "The member does not exist, please add a member first"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:297
+msgid "Administrator rights do not allow modification"
+msgstr ""
+
+#: community/apps/setting/serializers/team_serializers.py:311
+msgid "Unable to remove team admin"
+msgstr ""
+
+#: community/apps/setting/serializers/valid_serializers.py:32
+#: community/apps/users/serializers/user_serializers.py:190
+#: community/apps/users/serializers/user_serializers.py:777
+msgid ""
+"The community version supports up to 2 users. If you need more users, please "
+"contact us (https://fit2cloud.com/)."
+msgstr ""
+
+#: community/apps/setting/serializers/valid_serializers.py:41
+#: community/apps/setting/swagger_api/valid_api.py:27
+msgid "check quantity"
+msgstr "Check quantity"
+
+#: community/apps/setting/swagger_api/provide_api.py:43
+#: community/apps/setting/swagger_api/provide_api.py:44
+#: community/apps/setting/swagger_api/provide_api.py:71
+#: community/apps/setting/swagger_api/provide_api.py:72
+#: community/apps/setting/swagger_api/provide_api.py:190
+#: community/apps/setting/swagger_api/provide_api.py:191
+msgid "parameters required to call the function"
+msgstr "Parameters required to call the function"
+
+#: community/apps/setting/swagger_api/provide_api.py:60
+#: community/apps/setting/swagger_api/provide_api.py:61
+#: community/apps/setting/swagger_api/provide_api.py:90
+#: community/apps/setting/swagger_api/provide_api.py:91
+msgid "model certificate information"
+msgstr "Model certificate information"
+
+#: community/apps/setting/swagger_api/provide_api.py:114
+#: community/apps/setting/swagger_api/provide_api.py:115
+msgid "model type description"
+msgstr "Model type description"
+
+#: community/apps/setting/swagger_api/provide_api.py:115
+msgid "large language model"
+msgstr "LLM"
+
+#: community/apps/setting/swagger_api/provide_api.py:116
+#: community/apps/setting/swagger_api/provide_api.py:117
+#: community/apps/setting/swagger_api/provide_api.py:147
+#: community/apps/setting/swagger_api/provide_api.py:148
+msgid "model type value"
+msgstr "Model type value"
+
+#: community/apps/setting/swagger_api/provide_api.py:145
+#: community/apps/setting/swagger_api/provide_api.py:146
+msgid "model description"
+msgstr "Model description"
+
+#: community/apps/setting/swagger_api/provide_api.py:184
+msgid "function that needs to be executed"
+msgstr "Function that needs to be executed"
+
+#: community/apps/setting/swagger_api/system_setting.py:19
+#: community/apps/setting/swagger_api/system_setting.py:20
+#: community/apps/setting/swagger_api/system_setting.py:51
+#: community/apps/setting/swagger_api/system_setting.py:52
+msgid "Email related parameters"
+msgstr ""
+
+#: community/apps/setting/swagger_api/valid_api.py:22
+msgid "Verification type: application|dataset|user"
+msgstr ""
+
+#: community/apps/setting/views/Team.py:27
+#: community/apps/setting/views/Team.py:28
+msgid "Get a list of team members"
+msgstr ""
+
+#: community/apps/setting/views/Team.py:30
+#: community/apps/setting/views/Team.py:40
+#: community/apps/setting/views/Team.py:54
+#: community/apps/setting/views/Team.py:68
+#: community/apps/setting/views/Team.py:80
+#: community/apps/setting/views/Team.py:92
+#: community/apps/users/serializers/user_serializers.py:198
+#: community/apps/users/serializers/user_serializers.py:791
+msgid "team"
+msgstr "Team"
+
+#: community/apps/setting/views/Team.py:37
+#: community/apps/setting/views/Team.py:38
+msgid "Add member"
+msgstr ""
+
+#: community/apps/setting/views/Team.py:51
+#: community/apps/setting/views/Team.py:52
+msgid "Add members in batches"
+msgstr ""
+
+#: community/apps/setting/views/Team.py:65
+#: community/apps/setting/views/Team.py:66
+msgid "Get team member permissions"
+msgstr ""
+
+#: community/apps/setting/views/Team.py:76
+#: community/apps/setting/views/Team.py:77
+msgid "Update team member permissions"
+msgstr ""
+
+#: community/apps/setting/views/Team.py:89
+#: community/apps/setting/views/Team.py:90
+msgid "Remove member"
+msgstr ""
+
+#: community/apps/setting/views/model.py:30
+#: community/apps/setting/views/model.py:31
+msgid "Create model"
+msgstr ""
+
+#: community/apps/setting/views/model.py:33
+#: community/apps/setting/views/model.py:45
+#: community/apps/setting/views/model.py:57
+#: community/apps/setting/views/model.py:74
+#: community/apps/setting/views/model.py:88
+#: community/apps/setting/views/model.py:103
+#: community/apps/setting/views/model.py:114
+#: community/apps/setting/views/model.py:129
+#: community/apps/setting/views/model.py:141
+#: community/apps/setting/views/model.py:151
+#: community/apps/setting/views/model.py:170
+#: community/apps/setting/views/model.py:180
+#: community/apps/setting/views/model.py:204
+#: community/apps/setting/views/model.py:219
+#: community/apps/setting/views/model.py:239
+#: community/apps/setting/views/model.py:257
+#: community/apps/setting/views/model_apply.py:26
+#: community/apps/setting/views/model_apply.py:36
+#: community/apps/setting/views/model_apply.py:46
+msgid "model"
+msgstr "Model Settings"
+
+#: community/apps/setting/views/model.py:42
+#: community/apps/setting/views/model.py:43
+msgid "Download model, trial only with Ollama platform"
+msgstr ""
+
+#: community/apps/setting/views/model.py:54
+#: community/apps/setting/views/model.py:55
+msgid "Get model list"
+msgstr ""
+
+#: community/apps/setting/views/model.py:71
+#: community/apps/setting/views/model.py:73
+msgid ""
+"Query model meta information, this interface does not carry authentication "
+"information"
+msgstr ""
+
+#: community/apps/setting/views/model.py:86
+#: community/apps/setting/views/model.py:87
+msgid "Pause model download"
+msgstr ""
+
+#: community/apps/setting/views/model.py:111
+#: community/apps/setting/views/model.py:112
+msgid "Save model parameter form"
+msgstr ""
+
+#: community/apps/setting/views/model.py:126
+#: community/apps/setting/views/model.py:127
+msgid "Update model"
+msgstr ""
+
+#: community/apps/setting/views/model.py:138
+#: community/apps/setting/views/model.py:139
+msgid "Delete model"
+msgstr ""
+
+#: community/apps/setting/views/model.py:149
+#: community/apps/setting/views/model.py:150
+msgid "Query model details"
+msgstr ""
+
+#: community/apps/setting/views/model.py:166
+#: community/apps/setting/views/model.py:167
+msgid "Call the supplier function to obtain form data"
+msgstr ""
+
+#: community/apps/setting/views/model.py:178
+#: community/apps/setting/views/model.py:179
+msgid "Get a list of model suppliers"
+msgstr ""
+
+#: community/apps/setting/views/model.py:200
+#: community/apps/setting/views/model.py:201
+msgid "Get a list of model types"
+msgstr ""
+
+#: community/apps/setting/views/model.py:215
+#: community/apps/setting/views/model.py:216
+#: community/apps/setting/views/model.py:236
+#: community/apps/setting/views/model.py:254
+#: community/apps/setting/views/model.py:255
+msgid "Get the model creation form"
+msgstr ""
+
+#: community/apps/setting/views/model.py:235
+msgid "Get model default parameters"
+msgstr ""
+
+#: community/apps/setting/views/model_apply.py:23
+#: community/apps/setting/views/model_apply.py:24
+#: community/apps/setting/views/model_apply.py:33
+#: community/apps/setting/views/model_apply.py:34
+msgid "Vectorization documentation"
+msgstr ""
+
+#: community/apps/setting/views/model_apply.py:43
+#: community/apps/setting/views/model_apply.py:44
+msgid "Reorder documents"
+msgstr ""
+
+#: community/apps/setting/views/system_setting.py:29
+#: community/apps/setting/views/system_setting.py:30
+msgid "Create or update email settings"
+msgstr ""
+
+#: community/apps/setting/views/system_setting.py:31
+#: community/apps/setting/views/system_setting.py:45
+#: community/apps/setting/views/system_setting.py:57
+msgid "Email settings"
+msgstr ""
+
+#: community/apps/setting/views/system_setting.py:41
+#: community/apps/setting/views/system_setting.py:42
+msgid "Test email settings"
+msgstr ""
+
+#: community/apps/setting/views/system_setting.py:54
+#: community/apps/setting/views/system_setting.py:55
+msgid "Get email settings"
+msgstr ""
+
+#: community/apps/setting/views/valid.py:26
+#: community/apps/setting/views/valid.py:27
+msgid "Get verification results"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:62
+#: community/apps/users/serializers/user_serializers.py:63
+msgid "System version number"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:141
+#: community/apps/users/serializers/user_serializers.py:669
+msgid "Username must be 6-20 characters long"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:148
+#: community/apps/users/serializers/user_serializers.py:156
+#: community/apps/users/serializers/user_serializers.py:676
+#: community/apps/users/serializers/user_serializers.py:748
+msgid ""
+"The password must be 6-20 characters long and must be a combination of "
+"letters, numbers, and special characters."
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:151
+#: community/apps/users/serializers/user_serializers.py:212
+#: community/apps/users/serializers/user_serializers.py:213
+#: community/apps/users/serializers/user_serializers.py:300
+#: community/apps/users/serializers/user_serializers.py:347
+#: community/apps/users/serializers/user_serializers.py:348
+#: community/apps/users/serializers/user_serializers.py:749
+#: community/apps/users/serializers/user_serializers.py:765
+#: community/apps/users/serializers/user_serializers.py:766
+msgid "Confirm Password"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:158
+#: community/apps/users/serializers/user_serializers.py:214
+#: community/apps/users/serializers/user_serializers.py:215
+#: community/apps/users/serializers/user_serializers.py:229
+#: community/apps/users/serializers/user_serializers.py:257
+#: community/apps/users/serializers/user_serializers.py:258
+#: community/apps/users/serializers/user_serializers.py:291
+#: community/apps/users/serializers/user_serializers.py:344
+#: community/apps/users/serializers/user_serializers.py:345
+#: community/apps/users/views/user.py:107
+#: community/apps/users/views/user.py:108
+msgid "Verification code"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:232
+#: community/apps/users/serializers/user_serializers.py:259
+#: community/apps/users/serializers/user_serializers.py:360
+#: community/apps/users/serializers/user_serializers.py:439
+msgid "Type"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:236
+#: community/apps/users/serializers/user_serializers.py:362
+msgid "The type only supports register|reset_password"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:266
+msgid "Is it successful"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:268
+msgid "Error message"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:280
+msgid "language only support:"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:298
+#: community/apps/users/serializers/user_serializers.py:305
+#: community/apps/users/serializers/user_serializers.py:754
+msgid ""
+"The confirmation password must be 6-20 characters long and must be a "
+"combination of letters, numbers, and special characters."
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:380
+#, python-brace-format
+msgid "Do not send emails again within {seconds} seconds"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:410
+msgid ""
+"The email service has not been set up. Please contact the administrator to "
+"set up the email service in [Email Settings]."
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:421
+#, python-brace-format
+msgid "【Intelligent knowledge base question and answer system-{action}】"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:422
+#: community/apps/users/views/user.py:194
+#: community/apps/users/views/user.py:195
+msgid "User registration"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:422
+#: community/apps/users/views/user.py:212
+#: community/apps/users/views/user.py:213
+#: community/apps/users/views/user.py:301
+#: community/apps/users/views/user.py:302
+msgid "Change password"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:474
+#: community/apps/users/serializers/user_serializers.py:475
+msgid "Permissions"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:509
+#: community/apps/users/serializers/user_serializers.py:610
+#: community/apps/users/serializers/user_serializers.py:618
+msgid "Email or username"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:560
+msgid "All"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:561
+msgid "Me"
+msgstr "Mine"
+
+#: community/apps/users/serializers/user_serializers.py:583
+#: community/apps/users/serializers/user_serializers.py:680
+#: community/apps/users/serializers/user_serializers.py:705
+#: community/apps/users/serializers/user_serializers.py:719
+#: community/apps/users/serializers/user_serializers.py:736
+msgid "Phone"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:587
+msgid "Source"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:588
+#: community/apps/users/serializers/user_serializers.py:678
+#: community/apps/users/serializers/user_serializers.py:706
+#: community/apps/users/serializers/user_serializers.py:717
+#: community/apps/users/serializers/user_serializers.py:735
+msgid "Name"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:727
+msgid "Email is already in use"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:808
+msgid "Unable to delete administrator"
+msgstr ""
+
+#: community/apps/users/serializers/user_serializers.py:845
+msgid "Cannot modify administrator status"
+msgstr ""
+
+#: community/apps/users/views/user.py:37 community/apps/users/views/user.py:38
+msgid "Get MaxKB related information"
+msgstr ""
+
+#: community/apps/users/views/user.py:40
+msgid "System parameters"
+msgstr ""
+
+#: community/apps/users/views/user.py:50 community/apps/users/views/user.py:51
+msgid "Get current user information"
+msgstr ""
+
+#: community/apps/users/views/user.py:63 community/apps/users/views/user.py:64
+msgid "Get user list"
+msgstr ""
+
+#: community/apps/users/views/user.py:67 community/apps/users/views/user.py:90
+#: community/apps/users/views/user.py:116
+#: community/apps/users/views/user.py:136
+#: community/apps/users/views/user.py:152
+#: community/apps/users/views/user.py:178
+#: community/apps/users/views/user.py:199
+#: community/apps/users/views/user.py:217
+#: community/apps/users/views/user.py:234
+#: community/apps/users/views/user.py:249
+#: community/apps/users/views/user.py:373
+msgid "User"
+msgstr ""
+
+#: community/apps/users/views/user.py:79 community/apps/users/views/user.py:80
+msgid "Switch Language"
+msgstr ""
+
+#: community/apps/users/views/user.py:101
+#: community/apps/users/views/user.py:102
+msgid "Modify current user password"
+msgstr ""
+
+#: community/apps/users/views/user.py:125
+msgid "Failed to change password"
+msgstr ""
+
+#: community/apps/users/views/user.py:133
+#: community/apps/users/views/user.py:134
+msgid "Send email to current user"
+msgstr ""
+
+#: community/apps/users/views/user.py:149
+#: community/apps/users/views/user.py:150
+msgid "Sign out"
+msgstr ""
+
+#: community/apps/users/views/user.py:205
+msgid "Registration successful"
+msgstr ""
+
+#: community/apps/users/views/user.py:229
+#: community/apps/users/views/user.py:230
+msgid "Check whether the verification code is correct"
+msgstr ""
+
+#: community/apps/users/views/user.py:244
+#: community/apps/users/views/user.py:245
+msgid "Send email"
+msgstr ""
+
+#: community/apps/users/views/user.py:262
+#: community/apps/users/views/user.py:263
+msgid "Add user"
+msgstr ""
+
+#: community/apps/users/views/user.py:266
+#: community/apps/users/views/user.py:282
+#: community/apps/users/views/user.py:306
+#: community/apps/users/views/user.py:324
+#: community/apps/users/views/user.py:338
+#: community/apps/users/views/user.py:354
+msgid "User management"
+msgstr ""
+
+#: community/apps/users/views/user.py:280
+#: community/apps/users/views/user.py:281
+msgid "Get user paginated list"
+msgstr ""
+
+#: community/apps/users/views/user.py:320
+#: community/apps/users/views/user.py:321
+msgid "Delete user"
+msgstr ""
+
+#: community/apps/users/views/user.py:334
+#: community/apps/users/views/user.py:335
+msgid "Get user information"
+msgstr ""
+
+#: community/apps/users/views/user.py:349
+#: community/apps/users/views/user.py:350
+msgid "Update user information"
+msgstr ""
+
+#: community/apps/users/views/user.py:369
+#: community/apps/users/views/user.py:370
+msgid "Get user list by type"
+msgstr ""
+
+msgid "Fail"
+msgstr "Fail"
+
+msgid "Menu"
+msgstr "Operate menu"
+
+msgid "Operate"
+msgstr "Operate"
+
+msgid "Operate user"
+msgstr "Operate user"
+
+msgid "Ip Address"
+msgstr "IP Address"
+
+msgid "API Details"
+msgstr "API Details"
+
+msgid "Operate Time"
+msgstr "Operate time"
+
+msgid "System Settings/API Key"
+msgstr "System API Key"
+
+msgid "Appearance Settings"
+msgstr "Appearance Settings"
+
+msgid "Conversation Log"
+msgstr ""
+
+msgid "login authentication"
+msgstr "Login Authentication"
+
+msgid "Paragraph"
+msgstr ""
+
+msgid "Batch generate related"
+msgstr "Paragraph generate related problems"
+
+msgid "Application access"
+msgstr ""
+
+msgid "Add internal function"
+msgstr ""
+
+msgid "Batch generate related documents"
+msgstr "Batch generate related problems"
+
+msgid "No permission to use this function {name}"
+msgstr ""
+
+msgid "Function {name} is unavailable"
+msgstr ""
+
+msgid "Field: {name} Type: {_type} Value: {value} Type error"
+msgstr ""
+
+msgid "Field: {name} Type: {_type} Value: {value} Unsupported types"
+msgstr ""
+
+msgid "Field: {name} No value set"
+msgstr ""
+
+msgid "Generate related"
+msgstr ""
+
+msgid "Obtain graphical captcha"
+msgstr ""
+
+msgid "Captcha code error or expiration"
+msgstr ""
+
+msgid "captcha"
+msgstr ""
\ No newline at end of file
diff --git a/apps/locales/zh_CN/LC_MESSAGES/django.po b/apps/locales/zh_CN/LC_MESSAGES/django.po
new file mode 100644
index 00000000000..346fd5e47ad
--- /dev/null
+++ b/apps/locales/zh_CN/LC_MESSAGES/django.po
@@ -0,0 +1,7665 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR , YEAR.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2025-03-20 14:18+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language-Team: LANGUAGE \n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: apps/xpack/auth/user_key.py:26
+#: apps/xpack/serializers/license_serializers.py:96
+#: apps/xpack/serializers/license_tools.py:109
+msgid "The license is invalid"
+msgstr "License 无效"
+
+#: apps/xpack/auth/user_key.py:32 apps/xpack/auth/user_key.py:34
+msgid "secret_key is invalid"
+msgstr "secret key无效"
+
+#: apps/xpack/middleware/swagger_middleware.py:19
+msgid "The license has not been uploaded or the license has expired"
+msgstr "License 未上传或 License 已过期"
+
+#: apps/xpack/serializers/application_setting_serializer.py:20
+msgid "theme color"
+msgstr "主题颜色"
+
+#: apps/xpack/serializers/application_setting_serializer.py:22
+msgid "header font color"
+msgstr "头部字体颜色"
+
+#: apps/xpack/serializers/application_setting_serializer.py:26
+msgid "float location type"
+msgstr "浮窗位置类型"
+
+#: apps/xpack/serializers/application_setting_serializer.py:27
+msgid "float location value"
+msgstr "浮窗位置值"
+
+#: apps/xpack/serializers/application_setting_serializer.py:31
+msgid "float location x"
+msgstr "浮窗位置 x"
+
+#: apps/xpack/serializers/application_setting_serializer.py:32
+msgid "float location y"
+msgstr "浮窗位置 y"
+
+#: apps/xpack/serializers/application_setting_serializer.py:36
+#: apps/xpack/swagger_api/application_setting_api.py:23
+msgid "show source"
+msgstr "是否显示来源"
+
+#: apps/xpack/serializers/application_setting_serializer.py:37
+#: community/apps/application/serializers/application_serializers.py:354
+#: community/apps/application/swagger_api/application_api.py:169
+#: community/apps/application/swagger_api/application_api.py:170
+#: community/apps/users/serializers/user_serializers.py:273
+#: community/apps/users/views/user.py:85 community/apps/users/views/user.py:86
+msgid "language"
+msgstr "语言"
+
+#: apps/xpack/serializers/application_setting_serializer.py:38
+#: apps/xpack/swagger_api/application_setting_api.py:30
+msgid "show history"
+msgstr "是否显示历史记录"
+
+#: apps/xpack/serializers/application_setting_serializer.py:39
+#: apps/xpack/swagger_api/application_setting_api.py:37
+msgid "draggable"
+msgstr "是否可拖动"
+
+#: apps/xpack/serializers/application_setting_serializer.py:40
+#: apps/xpack/swagger_api/application_setting_api.py:44
+msgid "show guide"
+msgstr "是否显示引导图"
+
+#: apps/xpack/serializers/application_setting_serializer.py:41
+#: apps/xpack/swagger_api/application_setting_api.py:51
+msgid "avatar"
+msgstr "头像"
+
+#: apps/xpack/serializers/application_setting_serializer.py:42
+msgid "avatar url"
+msgstr "头像地址"
+
+#: apps/xpack/serializers/application_setting_serializer.py:43
+#: apps/xpack/swagger_api/application_setting_api.py:86
+msgid "user avatar"
+msgstr "用户头像"
+
+#: apps/xpack/serializers/application_setting_serializer.py:44
+msgid "user avatar url"
+msgstr "用户头像地址"
+
+#: apps/xpack/serializers/application_setting_serializer.py:45
+#: apps/xpack/swagger_api/application_setting_api.py:58
+msgid "float icon"
+msgstr "浮窗图标"
+
+#: apps/xpack/serializers/application_setting_serializer.py:46
+msgid "float icon url"
+msgstr "浮窗图标地址"
+
+#: apps/xpack/serializers/application_setting_serializer.py:47
+#: apps/xpack/swagger_api/application_setting_api.py:65
+msgid "disclaimer"
+msgstr "免责声明"
+
+#: apps/xpack/serializers/application_setting_serializer.py:48
+#: apps/xpack/swagger_api/application_setting_api.py:72
+msgid "disclaimer value"
+msgstr "免责声明的值"
+
+#: apps/xpack/serializers/application_setting_serializer.py:70
+#: apps/xpack/serializers/dataset_lark_serializer.py:373
+#: community/apps/dataset/serializers/dataset_serializers.py:548
+msgid "application id"
+msgstr "应用 id"
+
+#: apps/xpack/serializers/application_setting_serializer.py:96
+#: apps/xpack/serializers/platform_serializer.py:83
+#: apps/xpack/serializers/platform_serializer.py:105
+#: apps/xpack/serializers/platform_serializer.py:174
+#: apps/xpack/serializers/platform_serializer.py:185
+#: community/apps/application/serializers/application_serializers.py:1237
+#: community/apps/application/serializers/chat_message_serializers.py:424
+#: community/apps/application/serializers/chat_serializers.py:294
+#: community/apps/application/serializers/chat_serializers.py:396
+msgid "Application does not exist"
+msgstr "应用不存在"
+
+#: apps/xpack/serializers/application_setting_serializer.py:116
+msgid "Float location field type error"
+msgstr "浮窗位置字段类型错误"
+
+#: apps/xpack/serializers/application_setting_serializer.py:122
+msgid "Custom theme field type error"
+msgstr "自定义主题字段类型错误"
+
+#: apps/xpack/serializers/auth_config_serializer.py:19
+msgid "LDAP server cannot be empty"
+msgstr "LDAP 服务器不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:20
+msgid "Base DN cannot be empty"
+msgstr "Base DN 不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:21
+msgid "Password cannot be empty"
+msgstr "密码不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:22
+msgid "OU cannot be empty"
+msgstr "OU 不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:23
+msgid "LDAP filter cannot be empty"
+msgstr "LDAP 过滤器不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:24
+msgid "LDAP mapping cannot be empty"
+msgstr "LDAP 映射不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:29
+msgid "Authorization address cannot be empty"
+msgstr "授权地址不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:31
+msgid "Token address cannot be empty"
+msgstr "令牌地址不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:33
+msgid "User information address cannot be empty"
+msgstr "用户信息地址不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:34
+msgid "Scope cannot be empty"
+msgstr "Scope 不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:35
+msgid "Client ID cannot be empty"
+msgstr "Client ID 不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:36
+msgid "Client secret cannot be empty"
+msgstr "Client secret 不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:38
+msgid "Redirect address cannot be empty"
+msgstr "重定向地址不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:39
+msgid "Field mapping cannot be empty"
+msgstr "字段映射不能为空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:166
+#: apps/xpack/serializers/qr_login/qr_login.py:33
+#: community/apps/users/serializers/user_serializers.py:89
+msgid "The user has been disabled, please contact the administrator!"
+msgstr "用户已被禁用,请联系管理员!"
+
+#: apps/xpack/serializers/cas.py:32
+msgid "HttpClient query failed: "
+msgstr "HttpClient 查询失败:"
+
+#: apps/xpack/serializers/cas.py:56
+msgid "CAS authentication failed"
+msgstr "CAS 认证失败"
+
+#: apps/xpack/serializers/channel/chat_manage.py:76
+#: apps/xpack/serializers/channel/chat_manage.py:134
+msgid ""
+"Sorry, no relevant content was found. Please re-describe your problem or "
+"provide more information. "
+msgstr "抱歉,没有找到相关内容。请重新描述您的问题或提供更多信息。"
+
+#: apps/xpack/serializers/channel/chat_manage.py:82
+msgid "Think: "
+msgstr "思考过程: "
+
+#: apps/xpack/serializers/channel/chat_manage.py:85
+#: apps/xpack/serializers/channel/chat_manage.py:87
+msgid "AI reply: "
+msgstr "AI 回复: "
+
+#: apps/xpack/serializers/channel/chat_manage.py:298
+msgid "Thinking, please wait a moment!"
+msgstr "正在思考中,请稍后......"
+
+#: apps/xpack/serializers/channel/ding_talk.py:19
+#: apps/xpack/serializers/channel/wechat.py:89
+#: apps/xpack/serializers/channel/wechat.py:130
+#: apps/xpack/serializers/channel/wecom.py:76
+#: apps/xpack/serializers/channel/wecom.py:259
+msgid "The corresponding platform configuration was not found"
+msgstr "对应的平台配置未找到"
+
+#: apps/xpack/serializers/channel/ding_talk.py:27
+#: apps/xpack/serializers/channel/feishu.py:112
+msgid "Currently only text messages are supported"
+msgstr "目前只支持文本消息"
+
+#: apps/xpack/serializers/channel/ding_talk.py:91
+#: apps/xpack/serializers/channel/wechat.py:161
+#: apps/xpack/serializers/channel/wecom.py:189
+msgid "Image download failed, check network"
+msgstr "图片下载失败,请检查网络"
+
+#: apps/xpack/serializers/channel/ding_talk.py:92
+#: apps/xpack/serializers/channel/wechat.py:159
+#: apps/xpack/serializers/channel/wecom.py:185
+msgid "Please analyze the content of the image."
+msgstr "请分析图片内容。"
+
+#: apps/xpack/serializers/channel/ding_talk.py:95
+#, python-brace-format
+msgid "DingTalk application: {user}"
+msgstr "钉钉应用:{user}"
+
+#: apps/xpack/serializers/channel/ding_talk.py:106
+#: apps/xpack/serializers/channel/ding_talk.py:151
+msgid "Content generated by AI"
+msgstr "内容由 AI 生成"
+
+#: apps/xpack/serializers/channel/feishu.py:87
+#: apps/xpack/serializers/channel/feishu.py:107
+msgid "Lark application: "
+msgstr "飞书应用:"
+
+#: apps/xpack/serializers/channel/slack.py:116
+#| msgid "The corresponding platform configuration was not found"
+msgid "The corresponding platform configuration for Slack was not found"
+msgstr "对应的平台配置未找到"
+
+#: apps/xpack/serializers/channel/slack.py:206
+msgid "Thinking..."
+msgstr "思考中..."
+
+#: apps/xpack/serializers/channel/slack.py:321
+msgid "Invalid json format."
+msgstr "json 格式无效。"
+
+#: apps/xpack/serializers/channel/slack.py:327
+#| msgid "Invalid access_token"
+msgid "Invalid Slack request"
+msgstr "无效的 Slack 请求"
+
+#: apps/xpack/serializers/channel/slack.py:335
+#| msgid "DingTalk application: {user}"
+msgid "Slack application: {user}"
+msgstr "Slack 应用:{user}"
+
+#: apps/xpack/serializers/channel/slack.py:471
+msgid "Stop"
+msgstr "停止"
+
+#: apps/xpack/serializers/channel/wechat.py:141
+#, python-brace-format
+msgid "WeChat Official Account: {account}"
+msgstr "微信公众号:{account}"
+
+#: apps/xpack/serializers/channel/wechat.py:148
+#: apps/xpack/serializers/channel/wecom.py:171
+#: apps/xpack/serializers/channel/wecom.py:175
+msgid ""
+"The app does not enable the speech-to-text function or the speech-to-text "
+"function fails."
+msgstr "应用未开启语音转文字功能或语音转文字功能失败。"
+
+#: apps/xpack/serializers/channel/wechat.py:187
+msgid "Message types not supported yet"
+msgstr "暂时不支持该类型的消息"
+
+#: apps/xpack/serializers/channel/wechat.py:194
+msgid "Welcome to subscribe"
+msgstr "欢迎订阅"
+
+#: apps/xpack/serializers/channel/wecom.py:84
+msgid "Enterprise WeChat user: "
+msgstr "企业微信用户:"
+
+#: apps/xpack/serializers/channel/wecom.py:95
+msgid "Enterprise WeChat customer service: "
+msgstr "企业微信客服:"
+
+#: apps/xpack/serializers/channel/wecom.py:132
+#: apps/xpack/serializers/channel/wecom.py:148
+msgid "This type of message is not supported yet"
+msgstr "暂时不支持该类型的消息"
+
+#: apps/xpack/serializers/channel/wecom.py:254
+msgid "Signature missing"
+msgstr "签名缺失"
+
+#: apps/xpack/serializers/channel/wecom.py:266
+#: apps/xpack/serializers/channel/wecom.py:273
+#, python-brace-format
+msgid "An error occurred while processing the GET request {e}"
+msgstr "GET 请求处理时发生错误 {e}"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:58
+#: community/apps/dataset/serializers/dataset_serializers.py:82
+#: community/apps/dataset/serializers/dataset_serializers.py:214
+#: community/apps/dataset/serializers/dataset_serializers.py:295
+#: community/apps/dataset/serializers/dataset_serializers.py:296
+#: community/apps/dataset/serializers/dataset_serializers.py:357
+#: community/apps/dataset/serializers/dataset_serializers.py:358
+#: community/apps/dataset/serializers/dataset_serializers.py:502
+#: community/apps/dataset/serializers/dataset_serializers.py:503
+#: community/apps/dataset/serializers/dataset_serializers.py:568
+#: community/apps/dataset/serializers/dataset_serializers.py:607
+#: community/apps/dataset/serializers/dataset_serializers.py:701
+#: community/apps/dataset/serializers/dataset_serializers.py:933
+#: community/apps/dataset/serializers/dataset_serializers.py:934
+#: community/apps/dataset/serializers/document_serializers.py:816
+#: community/apps/function_lib/serializers/function_lib_serializer.py:141
+#: community/apps/function_lib/serializers/function_lib_serializer.py:186
+#: community/apps/function_lib/serializers/function_lib_serializer.py:203
+#: community/apps/function_lib/serializers/function_lib_serializer.py:262
+#: community/apps/setting/serializers/provider_serializers.py:76
+#: community/apps/setting/serializers/provider_serializers.py:127
+#: community/apps/setting/serializers/provider_serializers.py:174
+#: community/apps/setting/serializers/provider_serializers.py:256
+#: community/apps/setting/serializers/provider_serializers.py:277
+#: community/apps/setting/serializers/provider_serializers.py:301
+#: community/apps/setting/serializers/team_serializers.py:42
+#: community/apps/users/serializers/user_serializers.py:272
+msgid "user id"
+msgstr "用户 id"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:61
+#: apps/xpack/serializers/dataset_lark_serializer.py:112
+#: apps/xpack/serializers/dataset_lark_serializer.py:113
+#: apps/xpack/serializers/dataset_lark_serializer.py:367
+#: community/apps/dataset/serializers/dataset_serializers.py:137
+#: community/apps/dataset/serializers/dataset_serializers.py:201
+#: community/apps/dataset/serializers/dataset_serializers.py:221
+#: community/apps/dataset/serializers/dataset_serializers.py:244
+#: community/apps/dataset/serializers/dataset_serializers.py:273
+#: community/apps/dataset/serializers/dataset_serializers.py:274
+#: community/apps/dataset/serializers/dataset_serializers.py:291
+#: community/apps/dataset/serializers/dataset_serializers.py:292
+#: community/apps/dataset/serializers/dataset_serializers.py:319
+#: community/apps/dataset/serializers/dataset_serializers.py:353
+#: community/apps/dataset/serializers/dataset_serializers.py:354
+#: community/apps/dataset/serializers/dataset_serializers.py:382
+#: community/apps/dataset/serializers/dataset_serializers.py:383
+#: community/apps/dataset/serializers/dataset_serializers.py:498
+#: community/apps/dataset/serializers/dataset_serializers.py:499
+#: community/apps/dataset/serializers/dataset_serializers.py:527
+#: community/apps/dataset/serializers/dataset_serializers.py:528
+#: community/apps/dataset/serializers/dataset_serializers.py:542
+#: community/apps/dataset/serializers/dataset_serializers.py:907
+#: community/apps/dataset/serializers/dataset_serializers.py:908
+#: community/apps/dataset/serializers/dataset_serializers.py:929
+#: community/apps/dataset/serializers/dataset_serializers.py:930
+msgid "dataset name"
+msgstr "知识库名称"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:63
+#: apps/xpack/serializers/dataset_lark_serializer.py:114
+#: apps/xpack/serializers/dataset_lark_serializer.py:115
+#: apps/xpack/serializers/dataset_lark_serializer.py:369
+#: community/apps/dataset/serializers/dataset_serializers.py:142
+#: community/apps/dataset/serializers/dataset_serializers.py:206
+#: community/apps/dataset/serializers/dataset_serializers.py:226
+#: community/apps/dataset/serializers/dataset_serializers.py:249
+#: community/apps/dataset/serializers/dataset_serializers.py:278
+#: community/apps/dataset/serializers/dataset_serializers.py:279
+#: community/apps/dataset/serializers/dataset_serializers.py:293
+#: community/apps/dataset/serializers/dataset_serializers.py:294
+#: community/apps/dataset/serializers/dataset_serializers.py:324
+#: community/apps/dataset/serializers/dataset_serializers.py:355
+#: community/apps/dataset/serializers/dataset_serializers.py:356
+#: community/apps/dataset/serializers/dataset_serializers.py:384
+#: community/apps/dataset/serializers/dataset_serializers.py:385
+#: community/apps/dataset/serializers/dataset_serializers.py:500
+#: community/apps/dataset/serializers/dataset_serializers.py:501
+#: community/apps/dataset/serializers/dataset_serializers.py:529
+#: community/apps/dataset/serializers/dataset_serializers.py:530
+#: community/apps/dataset/serializers/dataset_serializers.py:544
+#: community/apps/dataset/serializers/dataset_serializers.py:909
+#: community/apps/dataset/serializers/dataset_serializers.py:910
+#: community/apps/dataset/serializers/dataset_serializers.py:931
+#: community/apps/dataset/serializers/dataset_serializers.py:932
+msgid "dataset description"
+msgstr "知识库描述"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:65
+#: apps/xpack/serializers/dataset_lark_serializer.py:118
+#: apps/xpack/serializers/dataset_lark_serializer.py:377
+msgid "app id"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:66
+#: apps/xpack/serializers/dataset_lark_serializer.py:119
+#: apps/xpack/serializers/dataset_lark_serializer.py:120
+#: apps/xpack/serializers/dataset_lark_serializer.py:378
+msgid "app secret"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:67
+#: apps/xpack/serializers/dataset_lark_serializer.py:121
+#: apps/xpack/serializers/dataset_lark_serializer.py:122
+#: apps/xpack/serializers/dataset_lark_serializer.py:132
+#: apps/xpack/serializers/dataset_lark_serializer.py:165
+#: apps/xpack/serializers/dataset_lark_serializer.py:379
+msgid "folder token"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:69
+#: apps/xpack/serializers/dataset_lark_serializer.py:116
+#: apps/xpack/serializers/dataset_lark_serializer.py:117
+#: community/apps/dataset/serializers/dataset_serializers.py:231
+#: community/apps/dataset/serializers/dataset_serializers.py:254
+#: community/apps/dataset/serializers/dataset_serializers.py:330
+#: community/apps/dataset/serializers/dataset_serializers.py:386
+#: community/apps/dataset/serializers/dataset_serializers.py:387
+#: community/apps/dataset/serializers/dataset_serializers.py:531
+#: community/apps/dataset/serializers/dataset_serializers.py:532
+msgid "embedding mode"
+msgstr "向量模型"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:79
+#: apps/xpack/serializers/dataset_lark_serializer.py:389
+msgid "Network error or folder token error!"
+msgstr "网络错误或文件夹 token 错误!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:87
+#: apps/xpack/serializers/dataset_lark_serializer.py:444
+#: community/apps/dataset/serializers/dataset_serializers.py:424
+#: community/apps/dataset/serializers/dataset_serializers.py:476
+#: community/apps/dataset/serializers/dataset_serializers.py:865
+msgid "Knowledge base name duplicate!"
+msgstr "知识库名称重复!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:130
+#: apps/xpack/serializers/dataset_lark_serializer.py:164
+#: apps/xpack/serializers/dataset_lark_serializer.py:201
+#: apps/xpack/serializers/dataset_lark_serializer.py:221
+#: apps/xpack/serializers/dataset_lark_serializer.py:346
+#: apps/xpack/serializers/dataset_lark_serializer.py:363
+#: community/apps/common/swagger_api/common_api.py:68
+#: community/apps/common/swagger_api/common_api.py:69
+#: community/apps/dataset/serializers/dataset_serializers.py:84
+#: community/apps/dataset/serializers/dataset_serializers.py:93
+#: community/apps/dataset/serializers/dataset_serializers.py:605
+#: community/apps/dataset/serializers/dataset_serializers.py:688
+#: community/apps/dataset/serializers/dataset_serializers.py:699
+#: community/apps/dataset/serializers/dataset_serializers.py:955
+#: community/apps/dataset/serializers/document_serializers.py:169
+#: community/apps/dataset/serializers/document_serializers.py:286
+#: community/apps/dataset/serializers/document_serializers.py:407
+#: community/apps/dataset/serializers/document_serializers.py:573
+#: community/apps/dataset/serializers/document_serializers.py:1055
+#: community/apps/dataset/serializers/document_serializers.py:1216
+#: community/apps/dataset/serializers/paragraph_serializers.py:96
+#: community/apps/dataset/serializers/paragraph_serializers.py:162
+#: community/apps/dataset/serializers/paragraph_serializers.py:195
+#: community/apps/dataset/serializers/paragraph_serializers.py:196
+#: community/apps/dataset/serializers/paragraph_serializers.py:208
+#: community/apps/dataset/serializers/paragraph_serializers.py:266
+#: community/apps/dataset/serializers/paragraph_serializers.py:285
+#: community/apps/dataset/serializers/paragraph_serializers.py:302
+#: community/apps/dataset/serializers/paragraph_serializers.py:459
+#: community/apps/dataset/serializers/paragraph_serializers.py:567
+#: community/apps/dataset/serializers/paragraph_serializers.py:638
+#: community/apps/dataset/serializers/paragraph_serializers.py:647
+#: community/apps/dataset/serializers/paragraph_serializers.py:715
+#: community/apps/dataset/serializers/paragraph_serializers.py:716
+#: community/apps/dataset/serializers/paragraph_serializers.py:732
+#: community/apps/dataset/serializers/problem_serializers.py:87
+#: community/apps/dataset/serializers/problem_serializers.py:112
+#: community/apps/dataset/serializers/problem_serializers.py:135
+#: community/apps/dataset/serializers/problem_serializers.py:192
+#: community/apps/dataset/swagger_api/problem_api.py:28
+#: community/apps/dataset/swagger_api/problem_api.py:29
+#: community/apps/dataset/swagger_api/problem_api.py:77
+#: community/apps/dataset/swagger_api/problem_api.py:96
+#: community/apps/dataset/swagger_api/problem_api.py:149
+#: community/apps/dataset/swagger_api/problem_api.py:177
+msgid "dataset id"
+msgstr "知识库 id"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:145
+#: apps/xpack/serializers/dataset_lark_serializer.py:146
+#: apps/xpack/serializers/dataset_lark_serializer.py:212
+#: community/apps/dataset/serializers/document_serializers.py:812
+#: community/apps/dataset/serializers/document_serializers.py:813
+#: community/apps/setting/swagger_api/provide_api.py:22
+#: community/apps/setting/swagger_api/provide_api.py:48
+#: community/apps/setting/swagger_api/provide_api.py:49
+#: community/apps/setting/swagger_api/provide_api.py:76
+#: community/apps/setting/swagger_api/provide_api.py:77
+#: community/apps/setting/swagger_api/provide_api.py:143
+#: community/apps/setting/swagger_api/provide_api.py:144
+msgid "name"
+msgstr "名称"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:147
+#: apps/xpack/serializers/dataset_lark_serializer.py:148
+#: apps/xpack/serializers/dataset_lark_serializer.py:211
+#: community/apps/application/serializers/application_serializers.py:257
+msgid "token"
+msgstr "token"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:149
+#: apps/xpack/serializers/dataset_lark_serializer.py:150
+#: apps/xpack/serializers/dataset_lark_serializer.py:210
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:26
+#: community/apps/dataset/serializers/document_serializers.py:229
+#: community/apps/function_lib/serializers/function_lib_serializer.py:72
+#: community/apps/function_lib/swagger_api/function_lib_api.py:92
+#: community/apps/function_lib/swagger_api/function_lib_api.py:138
+#: community/apps/function_lib/swagger_api/function_lib_api.py:184
+#: community/apps/setting/serializers/team_serializers.py:59
+#: community/apps/setting/serializers/team_serializers.py:74
+#: community/apps/setting/serializers/team_serializers.py:85
+#: community/apps/setting/serializers/valid_serializers.py:37
+msgid "type"
+msgstr "类型"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:151
+#: apps/xpack/serializers/dataset_lark_serializer.py:152
+#| msgid "id does not exist"
+msgid "is exist"
+msgstr "ID 不存在"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:173
+#: apps/xpack/serializers/dataset_lark_serializer.py:230
+#: apps/xpack/task/sync.py:120
+#| msgid "Knowledge base id"
+msgid "Knowledge base not found!"
+msgstr "知识库未找到!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:185
+#: apps/xpack/serializers/dataset_lark_serializer.py:252
+msgid "Failed to get lark document list!"
+msgstr "获取飞书文档列表失败!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:262
+#: community/apps/common/swagger_api/common_api.py:70
+#: community/apps/common/swagger_api/common_api.py:71
+#: community/apps/dataset/serializers/document_serializers.py:293
+#: community/apps/dataset/serializers/document_serializers.py:386
+#: community/apps/dataset/serializers/document_serializers.py:490
+#: community/apps/dataset/serializers/document_serializers.py:572
+#: community/apps/dataset/serializers/document_serializers.py:581
+#: community/apps/dataset/serializers/document_serializers.py:586
+#: community/apps/dataset/serializers/document_serializers.py:854
+#: community/apps/dataset/serializers/document_serializers.py:982
+#: community/apps/dataset/serializers/document_serializers.py:1191
+#: community/apps/dataset/serializers/paragraph_serializers.py:98
+#: community/apps/dataset/serializers/paragraph_serializers.py:167
+#: community/apps/dataset/serializers/paragraph_serializers.py:212
+#: community/apps/dataset/serializers/paragraph_serializers.py:271
+#: community/apps/dataset/serializers/paragraph_serializers.py:286
+#: community/apps/dataset/serializers/paragraph_serializers.py:303
+#: community/apps/dataset/serializers/paragraph_serializers.py:426
+#: community/apps/dataset/serializers/paragraph_serializers.py:431
+#: community/apps/dataset/serializers/paragraph_serializers.py:462
+#: community/apps/dataset/serializers/paragraph_serializers.py:570
+#: community/apps/dataset/serializers/paragraph_serializers.py:642
+#: community/apps/dataset/serializers/paragraph_serializers.py:650
+#: community/apps/dataset/serializers/paragraph_serializers.py:682
+#: community/apps/dataset/serializers/paragraph_serializers.py:717
+#: community/apps/dataset/serializers/paragraph_serializers.py:718
+#: community/apps/dataset/serializers/paragraph_serializers.py:733
+#: community/apps/dataset/serializers/problem_serializers.py:58
+#: community/apps/dataset/swagger_api/problem_api.py:64
+msgid "document id"
+msgstr "文档 id"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:269
+#: apps/xpack/serializers/dataset_lark_serializer.py:289
+#: community/apps/dataset/serializers/document_serializers.py:497
+#: community/apps/dataset/serializers/document_serializers.py:593
+#: community/apps/dataset/serializers/document_serializers.py:1197
+msgid "document id not exist"
+msgstr "文档 id 不存在"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:271
+#| msgid "Synchronization is only supported for web site types"
+msgid "Synchronization is only supported for lark documents"
+msgstr "只有飞书知识库类型才支持同步"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:374
+#: community/apps/dataset/serializers/dataset_serializers.py:549
+#: community/apps/dataset/serializers/dataset_serializers.py:914
+#: community/apps/dataset/serializers/dataset_serializers.py:915
+msgid "application id list"
+msgstr "应用 id 列表"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:416
+#: community/apps/dataset/serializers/dataset_serializers.py:175
+#: community/apps/dataset/serializers/dataset_serializers.py:837
+#: community/apps/function_lib/serializers/function_lib_serializer.py:125
+#: community/apps/function_lib/swagger_api/function_lib_api.py:119
+#: community/apps/function_lib/swagger_api/function_lib_api.py:120
+#: community/apps/function_lib/swagger_api/function_lib_api.py:165
+#: community/apps/function_lib/swagger_api/function_lib_api.py:166
+#: community/apps/setting/swagger_api/provide_api.py:81
+msgid "permission"
+msgstr "权限"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:463
+#: community/apps/dataset/serializers/dataset_serializers.py:884
+#, python-brace-format
+msgid "Unknown application id {dataset_id}, cannot be associated"
+msgstr "未知的应用id {dataset_id},无法关联"
+
+#: apps/xpack/serializers/license_serializers.py:52
+msgid "license file"
+msgstr "License 文件"
+
+#: apps/xpack/serializers/license_tools.py:134
+msgid "License usage limit exceeded."
+msgstr "超出许可证使用限制。"
+
+#: apps/xpack/serializers/license_tools.py:158
+msgid "The network is busy, try again later."
+msgstr "网络繁忙,请稍后再试。"
+
+#: apps/xpack/serializers/oauth2.py:79 apps/xpack/serializers/oauth2.py:82
+msgid "Failed to obtain user information"
+msgstr "获取用户信息失败"
+
+#: apps/xpack/serializers/operate_log.py:36
+#: community/apps/application/serializers/application_statistics_serializers.py:27
+#: community/apps/application/serializers/chat_serializers.py:116
+#: community/apps/application/swagger_api/application_statistics_api.py:26
+msgid "Start time"
+msgstr "开始时间"
+
+#: apps/xpack/serializers/operate_log.py:37
+#: community/apps/application/serializers/application_statistics_serializers.py:28
+#: community/apps/application/serializers/chat_serializers.py:117
+#: community/apps/application/swagger_api/application_statistics_api.py:31
+#: community/apps/application/swagger_api/chat_api.py:270
+msgid "End time"
+msgstr "结束时间"
+
+#: apps/xpack/serializers/operate_log.py:38
+#: apps/xpack/swagger_api/operate_log.py:17
+#: apps/xpack/swagger_api/operate_log.py:18
+#: apps/xpack/swagger_api/operate_log.py:45
+#: apps/xpack/swagger_api/operate_log.py:46
+msgid "menu"
+msgstr "菜单"
+
+#: apps/xpack/serializers/operate_log.py:39
+#: apps/xpack/swagger_api/operate_log.py:20
+#: apps/xpack/swagger_api/operate_log.py:21
+#: apps/xpack/swagger_api/operate_log.py:48
+#: apps/xpack/swagger_api/operate_log.py:49
+#| msgid "Temperature"
+msgid "operate"
+msgstr "操作"
+
+#: apps/xpack/serializers/operate_log.py:40
+#: apps/xpack/swagger_api/operate_log.py:51
+#: apps/xpack/swagger_api/operate_log.py:52
+#| msgid "user id"
+msgid "user"
+msgstr "用户"
+
+#: apps/xpack/serializers/operate_log.py:41
+#: apps/xpack/swagger_api/operate_log.py:54
+#: apps/xpack/swagger_api/operate_log.py:55
+#: community/apps/dataset/serializers/document_serializers.py:417
+msgid "status"
+msgstr "状态"
+
+#: apps/xpack/serializers/operate_log.py:42
+#: apps/xpack/swagger_api/operate_log.py:57
+#: apps/xpack/swagger_api/operate_log.py:58
+#| msgid "Forum address"
+msgid "ip_address"
+msgstr "IP 地址"
+
+#: apps/xpack/serializers/platform_serializer.py:14
+msgid "app_id is required"
+msgstr "app_id 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:15
+msgid "app_secret is required"
+msgstr "app_secret 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:16
+msgid "token is required"
+msgstr "token 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:17
+msgid "callback_url is required"
+msgstr "回调地址是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:23
+#: apps/xpack/serializers/platform_serializer.py:32
+msgid "App ID is required"
+msgstr "App ID 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:24
+#: apps/xpack/serializers/platform_source_serializer.py:24
+msgid "Agent ID is required"
+msgstr "Agent ID 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:25
+msgid "Secret is required"
+msgstr "Secret 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:26
+msgid "Token is required"
+msgstr "Token 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:28
+#: apps/xpack/serializers/platform_serializer.py:36
+#: apps/xpack/serializers/platform_serializer.py:42
+#: apps/xpack/serializers/platform_serializer.py:48
+#: apps/xpack/serializers/platform_source_serializer.py:19
+msgid "Callback URL is required"
+msgstr "Callback URL 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:33
+#: apps/xpack/serializers/platform_source_serializer.py:18
+msgid "App Secret is required"
+msgstr "App Secret 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:35
+msgid "Verification Token is required"
+msgstr "Verification Token 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:40
+msgid "Client ID is required"
+msgstr "Client ID 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:41
+msgid "Client Secret is required"
+msgstr "Client Secret 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:46
+#| msgid "Client Secret is required"
+msgid "Signing Secret is required"
+msgstr "Signing Secret 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:47
+#| msgid "Token is required"
+msgid "Bot User Token is required"
+msgstr "Bot User Token 是必填项"
+
+#: apps/xpack/serializers/platform_serializer.py:68
+msgid "Check if the fields are correct"
+msgstr "检查字段是否正确"
+
+#: apps/xpack/serializers/platform_serializer.py:114
+#: apps/xpack/views/platform.py:85 apps/xpack/views/platform.py:101
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:13
+#: community/apps/application/serializers/application_serializers.py:335
+#: community/apps/application/serializers/application_serializers.py:581
+#: community/apps/application/serializers/application_serializers.py:696
+#: community/apps/application/serializers/application_serializers.py:791
+#: community/apps/application/serializers/application_serializers.py:1230
+#: community/apps/application/serializers/application_serializers.py:1272
+#: community/apps/application/serializers/application_statistics_serializers.py:26
+#: community/apps/application/serializers/application_version_serializers.py:35
+#: community/apps/application/serializers/application_version_serializers.py:59
+#: community/apps/application/serializers/chat_message_serializers.py:207
+#: community/apps/application/serializers/chat_message_serializers.py:270
+#: community/apps/application/serializers/chat_serializers.py:77
+#: community/apps/application/serializers/chat_serializers.py:102
+#: community/apps/application/serializers/chat_serializers.py:119
+#: community/apps/application/serializers/chat_serializers.py:287
+#: community/apps/application/serializers/chat_serializers.py:363
+#: community/apps/application/serializers/chat_serializers.py:440
+#: community/apps/application/swagger_api/application_api.py:87
+#: community/apps/application/swagger_api/application_api.py:101
+#: community/apps/application/swagger_api/application_api.py:112
+#: community/apps/application/swagger_api/application_api.py:143
+#: community/apps/application/swagger_api/application_api.py:392
+#: community/apps/application/swagger_api/application_api.py:413
+#: community/apps/application/swagger_api/application_api.py:424
+#: community/apps/application/swagger_api/application_statistics_api.py:21
+#: community/apps/application/swagger_api/application_version_api.py:42
+#: community/apps/application/swagger_api/application_version_api.py:56
+#: community/apps/application/swagger_api/chat_api.py:23
+#: community/apps/application/swagger_api/chat_api.py:33
+#: community/apps/application/swagger_api/chat_api.py:167
+#: community/apps/application/swagger_api/chat_api.py:168
+#: community/apps/application/swagger_api/chat_api.py:199
+#: community/apps/application/swagger_api/chat_api.py:222
+#: community/apps/application/swagger_api/chat_api.py:249
+#: community/apps/application/swagger_api/chat_api.py:281
+#: community/apps/application/swagger_api/chat_api.py:350
+#: community/apps/application/swagger_api/chat_api.py:410
+#: community/apps/application/swagger_api/chat_api.py:427
+#: community/apps/application/swagger_api/chat_api.py:460
+#: community/apps/application/views/chat_views.py:477
+msgid "Application ID"
+msgstr "应用 ID"
+
+#: apps/xpack/serializers/platform_serializer.py:116
+msgid "Platform type, for example: wechat"
+msgstr "平台类型,例如:wechat"
+
+#: apps/xpack/serializers/platform_serializer.py:125
+#: apps/xpack/serializers/platform_serializer.py:126
+msgid "Platform type"
+msgstr "平台类型"
+
+#: apps/xpack/serializers/platform_serializer.py:128
+msgid "Status"
+msgstr "状态"
+
+#: apps/xpack/serializers/platform_serializer.py:138
+#: apps/xpack/serializers/platform_serializer.py:139
+msgid "Configuration information"
+msgstr "配置信息"
+
+#: apps/xpack/serializers/platform_serializer.py:191
+#, python-brace-format
+msgid "The platform configuration corresponding to {type} was not found"
+msgstr "平台配置 {type} 未找到"
+
+#: apps/xpack/serializers/platform_source_serializer.py:23
+#: apps/xpack/serializers/platform_source_serializer.py:32
+msgid "Corp ID is required"
+msgstr "Corp ID 是必填项"
+
+#: apps/xpack/serializers/platform_source_serializer.py:28
+#: apps/xpack/serializers/platform_source_serializer.py:33
+msgid "App Key is required"
+msgstr "App Key 是必填项"
+
+#: apps/xpack/serializers/platform_source_serializer.py:78
+msgid "Configuration information is wrong and failed to save"
+msgstr "配置信息错误,保存失败"
+
+#: apps/xpack/serializers/platform_source_serializer.py:104
+msgid "Connection failed"
+msgstr "连接失败"
+
+#: apps/xpack/serializers/platform_source_serializer.py:123
+msgid "Platform does not exist"
+msgstr "平台不存在"
+
+#: apps/xpack/serializers/platform_source_serializer.py:134
+#| msgid "Unsupported file format"
+msgid "Unsupported platform type"
+msgstr "三方平台类型不支持"
+
+#: apps/xpack/serializers/qr_login/qr_login.py:28
+msgid "Team"
+msgstr "团队成员"
+
+#: apps/xpack/serializers/system_params_serializers.py:63
+msgid "theme"
+msgstr "主题"
+
+#: apps/xpack/serializers/system_params_serializers.py:70
+msgid "website icon"
+msgstr "网站图标"
+
+#: apps/xpack/serializers/system_params_serializers.py:77
+msgid "login logo"
+msgstr "登录logo"
+
+#: apps/xpack/serializers/system_params_serializers.py:84
+msgid "Login background image"
+msgstr "登录背景图"
+
+#: apps/xpack/serializers/system_params_serializers.py:91
+msgid "website title"
+msgstr "网站标题"
+
+#: apps/xpack/serializers/system_params_serializers.py:98
+msgid "website slogan"
+msgstr "网站标语"
+
+#: apps/xpack/serializers/system_params_serializers.py:105
+msgid "Show user manual"
+msgstr "是否显示用户手册"
+
+#: apps/xpack/serializers/system_params_serializers.py:112
+msgid "User manual address"
+msgstr "用户手册地址"
+
+#: apps/xpack/serializers/system_params_serializers.py:119
+msgid "Show forum"
+msgstr "是否显示论坛"
+
+#: apps/xpack/serializers/system_params_serializers.py:126
+msgid "Forum address"
+msgstr "论坛地址"
+
+#: apps/xpack/serializers/system_params_serializers.py:133
+msgid "Show project"
+msgstr "是否显示项目"
+
+#: apps/xpack/serializers/system_params_serializers.py:140
+msgid "Project address"
+msgstr "项目地址"
+
+#: apps/xpack/serializers/tools.py:58
+#, python-brace-format
+msgid ""
+"Thinking about 【{question}】...If you want me to continue answering, please "
+"reply {trigger_message}"
+msgstr ""
+"思考中【{question}】...如果您希望我继续回答,请回复“{trigger_message}”。"
+
+#: apps/xpack/serializers/tools.py:158
+msgid ""
+"\n"
+" ------------\n"
+"[To be continued, reply \"Continue to answer the question]"
+msgstr ""
+"\n"
+" ------------\n"
+"【未完待续,回复“问题继续回答】"
+
+#: apps/xpack/serializers/tools.py:238
+#, python-brace-format
+msgid ""
+"To be continued, reply \"{trigger_message}\" to continue answering the "
+"question"
+msgstr "【未完待续,回复“{trigger_message}” 或 问题继续回答】"
+
+#: apps/xpack/swagger_api/application_setting_api.py:79
+msgid "Custom theme {theme_color: , header_font_color: }"
+msgstr "自定义主题 {theme_color:, header_font_color: }"
+
+#: apps/xpack/swagger_api/application_setting_api.py:93
+msgid "Float location {top: 0, left: 0}"
+msgstr "浮窗位置 {top: 0, left: 0}"
+
+#: apps/xpack/swagger_api/application_setting_api.py:101
+#: apps/xpack/swagger_api/application_setting_api.py:102
+#: apps/xpack/swagger_api/auth_api.py:10 apps/xpack/swagger_api/auth_api.py:11
+#: apps/xpack/swagger_api/auth_api.py:81 apps/xpack/swagger_api/auth_api.py:82
+msgid "Authentication configuration"
+msgstr "认证配置"
+
+#: apps/xpack/swagger_api/application_setting_api.py:106
+#: apps/xpack/swagger_api/application_setting_api.py:107
+#: apps/xpack/swagger_api/auth_api.py:15 apps/xpack/swagger_api/auth_api.py:16
+#: apps/xpack/swagger_api/auth_api.py:30 apps/xpack/swagger_api/auth_api.py:87
+#: apps/xpack/swagger_api/auth_api.py:88 apps/xpack/views/auth.py:27
+#: apps/xpack/views/auth.py:28
+msgid "Authentication type"
+msgstr "认证类型"
+
+#: apps/xpack/swagger_api/application_setting_api.py:109
+#: apps/xpack/swagger_api/application_setting_api.py:110
+#: apps/xpack/swagger_api/auth_api.py:18 apps/xpack/swagger_api/auth_api.py:19
+#: apps/xpack/swagger_api/auth_api.py:93 apps/xpack/swagger_api/auth_api.py:94
+msgid "Configuration"
+msgstr "配置"
+
+#: apps/xpack/swagger_api/application_setting_api.py:112
+#: apps/xpack/swagger_api/application_setting_api.py:113
+#: apps/xpack/swagger_api/auth_api.py:21 apps/xpack/swagger_api/auth_api.py:22
+#: community/apps/common/swagger_api/common_api.py:72
+#: community/apps/common/swagger_api/common_api.py:73
+#: community/apps/dataset/serializers/document_serializers.py:819
+#: community/apps/dataset/serializers/document_serializers.py:820
+#: community/apps/dataset/serializers/document_serializers.py:838
+#: community/apps/dataset/serializers/document_serializers.py:839
+#: community/apps/dataset/serializers/paragraph_serializers.py:57
+#: community/apps/dataset/serializers/paragraph_serializers.py:71
+#: community/apps/dataset/serializers/paragraph_serializers.py:719
+#: community/apps/dataset/serializers/paragraph_serializers.py:720
+#: community/apps/dataset/swagger_api/problem_api.py:130
+#: community/apps/function_lib/serializers/function_lib_serializer.py:110
+#: community/apps/function_lib/serializers/function_lib_serializer.py:129
+#: community/apps/function_lib/serializers/function_lib_serializer.py:139
+#: community/apps/function_lib/swagger_api/function_lib_api.py:121
+#: community/apps/function_lib/swagger_api/function_lib_api.py:122
+#: community/apps/function_lib/swagger_api/function_lib_api.py:167
+#: community/apps/function_lib/swagger_api/function_lib_api.py:168
+#: community/apps/setting/serializers/team_serializers.py:46
+#: community/apps/users/serializers/user_serializers.py:473
+#: community/apps/users/serializers/user_serializers.py:496
+#: community/apps/users/serializers/user_serializers.py:584
+#: community/apps/users/serializers/user_serializers.py:585
+#: community/apps/users/serializers/user_serializers.py:721
+#: community/apps/users/serializers/user_serializers.py:737
+#: community/apps/users/serializers/user_serializers.py:738
+msgid "Is active"
+msgstr "是否可用"
+
+#: apps/xpack/swagger_api/auth_api.py:37
+#| msgid "Form Configuration"
+msgid "Wecom configuration"
+msgstr "企业微信配置"
+
+#: apps/xpack/swagger_api/auth_api.py:38
+#| msgid "Get function details"
+msgid "Wecom configuration details"
+msgstr "企业微信配置详情"
+
+#: apps/xpack/swagger_api/auth_api.py:40 apps/xpack/swagger_api/auth_api.py:53
+msgid "Corp ID"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:41
+msgid "Agent ID"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:42 apps/xpack/swagger_api/auth_api.py:55
+#: apps/xpack/swagger_api/auth_api.py:67
+#| msgid "App Secret is required"
+msgid "App Secret"
+msgstr "App Secret 是必填项"
+
+#: apps/xpack/swagger_api/auth_api.py:43 apps/xpack/swagger_api/auth_api.py:56
+#: apps/xpack/swagger_api/auth_api.py:68
+#| msgid "Callback URL is required"
+msgid "Callback URL"
+msgstr "Callback URL 是必填项"
+
+#: apps/xpack/swagger_api/auth_api.py:50
+#| msgid "Configuration"
+msgid "Dingtalk configuration"
+msgstr "钉钉配置"
+
+#: apps/xpack/swagger_api/auth_api.py:51
+#| msgid "Get application details"
+msgid "Dingtalk configuration details"
+msgstr "钉钉配置详情"
+
+#: apps/xpack/swagger_api/auth_api.py:54 apps/xpack/swagger_api/auth_api.py:66
+msgid "App Key"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:63
+#| msgid "Form Configuration"
+msgid "Feishu configuration"
+msgstr "飞书配置"
+
+#: apps/xpack/swagger_api/auth_api.py:64
+#| msgid "Get function details"
+msgid "Feishu configuration details"
+msgstr "飞书配置详情"
+
+#: apps/xpack/swagger_api/license_api.py:22
+msgid "license status"
+msgstr "License 状态"
+
+#: apps/xpack/swagger_api/license_api.py:24
+msgid ""
+"License status, possible values are: valid, invalid, expired, which "
+"respectively represent: valid, invalid, expired"
+msgstr ""
+"license状态,可能值为:valid、invalid、expired,分别代表:有效、无效、已过期"
+
+#: apps/xpack/swagger_api/license_api.py:26
+msgid "license details"
+msgstr "License 详情"
+
+#: apps/xpack/swagger_api/license_api.py:30
+msgid "customer name"
+msgstr "客户名称"
+
+#: apps/xpack/swagger_api/license_api.py:31
+msgid "customer name. For example: *** company."
+msgstr "客户名称。例如:***公司。"
+
+#: apps/xpack/swagger_api/license_api.py:33
+msgid "independent software vendor"
+msgstr "独立软件供应商"
+
+#: apps/xpack/swagger_api/license_api.py:35
+msgid ""
+"Independent Software Vendor. For example: *** Company, suitable for the "
+"embedded version of the product."
+msgstr "独立软件供应商。例如:***公司,适用于产品的嵌入式版本。"
+
+#: apps/xpack/swagger_api/license_api.py:37
+msgid "Authorization deadline."
+msgstr "授权截止时间"
+
+#: apps/xpack/swagger_api/license_api.py:39
+msgid ""
+"Authorization deadline. For example: 2020-12-31, this license will expire on "
+"2021-01-01."
+msgstr "授权截止时间。例如:2020-12-31,此license将在2021-01-01到期。"
+
+#: apps/xpack/swagger_api/license_api.py:41
+msgid "product name."
+msgstr "产品名称"
+
+#: apps/xpack/swagger_api/license_api.py:43
+msgid "Product name. For example: JumpServer, CMP, etc."
+msgstr "产品名称。例如:CMP、KO、JS、MS。"
+
+#: apps/xpack/swagger_api/license_api.py:45
+msgid "product version."
+msgstr "产品版本"
+
+#: apps/xpack/swagger_api/license_api.py:47
+msgid "Product version. For example: JumpServer 2.0, CMP 1.0, etc."
+msgstr "产品版本。例如:Standard、Enterprise,代表标准版、企业版。"
+
+#: apps/xpack/swagger_api/license_api.py:49
+msgid "license version."
+msgstr "License 版本"
+
+#: apps/xpack/swagger_api/license_api.py:51
+msgid "License version. For example: 1.0, 2.0, etc."
+msgstr "License版本。例如:1.0、2.0、3.0等。"
+
+#: apps/xpack/swagger_api/license_api.py:53
+msgid "authorization quantity."
+msgstr "认证数量"
+
+#: apps/xpack/swagger_api/license_api.py:55
+msgid ""
+"Authorization quantity. For example: 100, this license can be used by 100 "
+"users."
+msgstr "授权数量。例如:cmp授权的cpu数量,或JS授权的资产数量。"
+
+#: apps/xpack/swagger_api/license_api.py:57
+msgid "Serial number, the unique identifier of the License."
+msgstr "序列号,License唯一标识。"
+
+#: apps/xpack/swagger_api/license_api.py:59
+msgid ""
+"Serial number, the unique identifier of the license. The customer support "
+"portal will save the serial number after generating the license. If the "
+"serial number is not recorded in the customer support portal, the license "
+"will be regarded as an unknown source."
+msgstr ""
+"序列号,License唯一标识。客户支持门户生成License后会保存序列号,如果序列号在"
+"客户支持门户中没有记录,则此License将被视为未知来源。"
+
+#: apps/xpack/swagger_api/license_api.py:61
+msgid "remarks"
+msgstr "备注"
+
+#: apps/xpack/swagger_api/license_api.py:63
+msgid ""
+"Remarks, record additional information, length limit is 50. For example, a "
+"customer purchases two identical JumpServer subscriptions and uses them in "
+"different computer rooms respectively. You can use this field to note the A "
+"computer room and B computer room to help distinguish the licenses."
+msgstr ""
+"备注,记录额外的信息,长度限制50。例如某个客户买了两个同样的JumpServer订阅分"
+"别在不同机房使用,可以用这个字段备注A机房B机房,帮助区别License。"
+
+#: apps/xpack/swagger_api/operate_log.py:12
+#: apps/xpack/swagger_api/operate_log.py:13
+#: apps/xpack/swagger_api/operate_log.py:38
+#: apps/xpack/swagger_api/operate_log.py:39 apps/xpack/views/operate_log.py:24
+#: apps/xpack/views/operate_log.py:36
+msgid "Operate log"
+msgstr "操作日志"
+
+#: apps/xpack/swagger_api/operate_log.py:23
+#: apps/xpack/swagger_api/operate_log.py:24
+msgid "menu_label"
+msgstr "操作菜单"
+
+#: apps/xpack/swagger_api/operate_log.py:26
+#: apps/xpack/swagger_api/operate_log.py:27
+msgid "operate_label"
+msgstr "操作"
+
+#: apps/xpack/swagger_api/operate_log.py:42
+#: apps/xpack/swagger_api/operate_log.py:43
+#: community/apps/dataset/serializers/dataset_serializers.py:104
+msgid "id"
+msgstr ""
+
+#: apps/xpack/swagger_api/operate_log.py:60
+#: apps/xpack/swagger_api/operate_log.py:61
+#| msgid "license details"
+msgid "details"
+msgstr "详情"
+
+#: apps/xpack/views/application_setting_views.py:22
+#: apps/xpack/views/application_setting_views.py:23
+#| msgid "Pro/Modify Application Settings"
+msgid "Modify Application Settings"
+msgstr "修改应用显示设置"
+
+#: apps/xpack/views/application_setting_views.py:24
+#: apps/xpack/views/application_setting_views.py:40
+msgid "Pro/Application/Public Access"
+msgstr "专业版/应用/公共访问"
+
+#: apps/xpack/views/application_setting_views.py:37
+#: apps/xpack/views/application_setting_views.py:38
+#| msgid "Pro/Get Application Settings"
+msgid "Get Application Settings"
+msgstr "获取应用设置"
+
+#: apps/xpack/views/auth.py:29
+msgid "Authentication"
+msgstr "认证"
+
+#: apps/xpack/views/auth.py:40 apps/xpack/views/auth.py:41
+msgid "Add or modify authentication configuration"
+msgstr "添加或修改认证信息"
+
+#: apps/xpack/views/auth.py:44 apps/xpack/views/auth.py:58
+#: apps/xpack/views/auth.py:72
+msgid "System settings/login authentication"
+msgstr "系统设置/登录认证"
+
+#: apps/xpack/views/auth.py:55 apps/xpack/views/auth.py:56
+msgid "Get authentication configuration"
+msgstr "获取认证配置"
+
+#: apps/xpack/views/auth.py:69 apps/xpack/views/auth.py:70
+msgid "test connection"
+msgstr "测试连接"
+
+#: apps/xpack/views/auth.py:96 apps/xpack/views/auth.py:97
+#: community/apps/users/views/user.py:173
+#: community/apps/users/views/user.py:174
+msgid "Log in"
+msgstr "登录"
+
+#: apps/xpack/views/auth.py:101 apps/xpack/views/auth.py:114
+#: apps/xpack/views/auth.py:130 apps/xpack/views/auth.py:146
+#: apps/xpack/views/auth.py:207 apps/xpack/views/auth.py:224
+#: apps/xpack/views/auth.py:242 apps/xpack/views/auth.py:260
+#: apps/xpack/views/auth.py:278 apps/xpack/views/auth.py:296
+msgid "Three-party login"
+msgstr "三方登录"
+
+#: apps/xpack/views/auth.py:111 apps/xpack/views/auth.py:112
+msgid "CAS login"
+msgstr "CAS 登录"
+
+#: apps/xpack/views/auth.py:127 apps/xpack/views/auth.py:128
+msgid "OIDC login"
+msgstr "OIDC 登录"
+
+#: apps/xpack/views/auth.py:143 apps/xpack/views/auth.py:144
+msgid "OAuth2 login"
+msgstr "OAuth2 登录"
+
+#: apps/xpack/views/auth.py:160 apps/xpack/views/auth.py:161
+#: apps/xpack/views/auth.py:162 apps/xpack/views/auth.py:170
+#: apps/xpack/views/auth.py:194 apps/xpack/views/auth.py:195
+#: apps/xpack/views/auth.py:196
+msgid "Get platform information"
+msgstr "获取平台信息"
+
+#: apps/xpack/views/auth.py:167 apps/xpack/views/auth.py:168
+msgid "Modify platform information"
+msgstr "修改平台信息"
+
+#: apps/xpack/views/auth.py:175 apps/xpack/views/auth.py:176
+#: apps/xpack/views/auth.py:178
+msgid "Test platform connection"
+msgstr "测试平台连接"
+
+#: apps/xpack/views/auth.py:185 apps/xpack/views/auth.py:186
+msgid "Scan code login type"
+msgstr "扫码登录类型"
+
+#: apps/xpack/views/auth.py:187
+msgid "Scan code to log in"
+msgstr "扫码登录"
+
+#: apps/xpack/views/auth.py:204 apps/xpack/views/auth.py:205
+msgid "DingTalk callback"
+msgstr "钉钉回调"
+
+#: apps/xpack/views/auth.py:221 apps/xpack/views/auth.py:222
+#| msgid "DingTalk callback"
+msgid "DingTalk OAuth2 callback"
+msgstr "钉钉回调"
+
+#: apps/xpack/views/auth.py:239 apps/xpack/views/auth.py:240
+msgid "Lark callback"
+msgstr "飞书回调"
+
+#: apps/xpack/views/auth.py:257 apps/xpack/views/auth.py:258
+#| msgid "Lark callback"
+msgid "Lark OAuth2 callback"
+msgstr "飞书回调"
+
+#: apps/xpack/views/auth.py:275 apps/xpack/views/auth.py:276
+msgid "Wecom callback"
+msgstr "企业微信回调"
+
+#: apps/xpack/views/auth.py:293 apps/xpack/views/auth.py:294
+#| msgid "Wecom callback"
+msgid "Wecom OAuth2 callback"
+msgstr "企业微信回调"
+
+#: apps/xpack/views/dataset_lark_views.py:22
+#: apps/xpack/views/dataset_lark_views.py:23
+#| msgid "Create a knowledge base"
+msgid "Create a lark knowledge base"
+msgstr "创建知识库"
+
+#: apps/xpack/views/dataset_lark_views.py:26
+#: apps/xpack/views/dataset_lark_views.py:40
+#: community/apps/dataset/views/dataset.py:39
+#: community/apps/dataset/views/dataset.py:62
+#: community/apps/dataset/views/dataset.py:82
+#: community/apps/dataset/views/dataset.py:98
+#: community/apps/dataset/views/dataset.py:109
+#: community/apps/dataset/views/dataset.py:123
+#: community/apps/dataset/views/dataset.py:137
+#: community/apps/dataset/views/dataset.py:157
+#: community/apps/dataset/views/dataset.py:172
+#: community/apps/dataset/views/dataset.py:187
+#: community/apps/dataset/views/dataset.py:202
+#: community/apps/dataset/views/dataset.py:217
+#: community/apps/dataset/views/dataset.py:231
+#: community/apps/dataset/views/dataset.py:250
+msgid "Knowledge Base"
+msgstr "知识库"
+
+#: apps/xpack/views/dataset_lark_views.py:36
+#: apps/xpack/views/dataset_lark_views.py:37
+#| msgid "Create a knowledge base"
+msgid "Update the lark knowledge base"
+msgstr "更新知识库"
+
+#: apps/xpack/views/dataset_lark_views.py:53
+#: apps/xpack/views/dataset_lark_views.py:54
+#| msgid "Get a list of applications available in the knowledge base"
+msgid "Get the list of documents in the lark knowledge base"
+msgstr "获取知识库文档列表"
+
+#: apps/xpack/views/dataset_lark_views.py:57
+#: apps/xpack/views/dataset_lark_views.py:74
+#: apps/xpack/views/dataset_lark_views.py:90
+#: apps/xpack/views/dataset_lark_views.py:110
+#: community/apps/dataset/views/document.py:34
+#: community/apps/dataset/views/document.py:47
+#: community/apps/dataset/views/document.py:62
+#: community/apps/dataset/views/document.py:81
+#: community/apps/dataset/views/document.py:102
+#: community/apps/dataset/views/document.py:123
+#: community/apps/dataset/views/document.py:137
+#: community/apps/dataset/views/document.py:158
+#: community/apps/dataset/views/document.py:178
+#: community/apps/dataset/views/document.py:193
+#: community/apps/dataset/views/document.py:208
+#: community/apps/dataset/views/document.py:224
+#: community/apps/dataset/views/document.py:244
+#: community/apps/dataset/views/document.py:265
+#: community/apps/dataset/views/document.py:284
+#: community/apps/dataset/views/document.py:306
+#: community/apps/dataset/views/document.py:324
+#: community/apps/dataset/views/document.py:349
+#: community/apps/dataset/views/document.py:364
+#: community/apps/dataset/views/document.py:380
+#: community/apps/dataset/views/document.py:396
+#: community/apps/dataset/views/document.py:413
+#: community/apps/dataset/views/document.py:429
+#: community/apps/dataset/views/document.py:442
+#: community/apps/dataset/views/document.py:467
+msgid "Knowledge Base/Documentation"
+msgstr "知识库/文档"
+
+#: apps/xpack/views/dataset_lark_views.py:70
+#: apps/xpack/views/dataset_lark_views.py:71
+#| msgid "Create a knowledge base"
+msgid "Import documents to the lark knowledge base"
+msgstr "创建知识库"
+
+#: apps/xpack/views/dataset_lark_views.py:86
+#: apps/xpack/views/dataset_lark_views.py:87
+#| msgid "Create document"
+msgid "Synchronize lark document"
+msgstr "同步飞书文档"
+
+#: apps/xpack/views/dataset_lark_views.py:104
+#: apps/xpack/views/dataset_lark_views.py:105
+#| msgid "Batch sync documents"
+msgid "Batch sync lark documents"
+msgstr "批量同步飞书文档"
+
+#: apps/xpack/views/display.py:17 apps/xpack/views/display.py:18
+msgid "View appearance settings"
+msgstr "查看外观设置"
+
+#: apps/xpack/views/display.py:19 apps/xpack/views/display.py:33
+msgid "System Settings/Appearance Settings"
+msgstr "系统设置/外观设置"
+
+#: apps/xpack/views/display.py:30 apps/xpack/views/display.py:31
+msgid "Update appearance settings"
+msgstr "更新外观设置"
+
+#: apps/xpack/views/license.py:29 apps/xpack/views/license.py:30
+msgid "Get license information"
+msgstr "获取 License 信息"
+
+#: apps/xpack/views/license.py:38 apps/xpack/views/license.py:39
+msgid "Update license information"
+msgstr "更新 License 信息"
+
+#: apps/xpack/views/license.py:44
+msgid "upload file"
+msgstr "上传文件"
+
+#: apps/xpack/views/operate_log.py:21 apps/xpack/views/operate_log.py:22
+#| msgid "Get model parameter form"
+msgid "Get menu operate log"
+msgstr "获取菜单操作日志"
+
+#: apps/xpack/views/operate_log.py:33 apps/xpack/views/operate_log.py:34
+#| msgid "Get model parameter form"
+msgid "Get operate log"
+msgstr "获取操作日志"
+
+#: apps/xpack/views/platform.py:56 apps/xpack/views/platform.py:57
+msgid "Get platform configuration"
+msgstr "获取平台配置"
+
+#: apps/xpack/views/platform.py:59 apps/xpack/views/platform.py:67
+msgid "Application/application access"
+msgstr "应用/应用访问"
+
+#: apps/xpack/views/platform.py:63 apps/xpack/views/platform.py:64
+msgid "Update platform configuration"
+msgstr "更新平台配置"
+
+#: apps/xpack/views/platform.py:80 apps/xpack/views/platform.py:81
+msgid "Get platform status"
+msgstr "获取平台状态"
+
+#: apps/xpack/views/platform.py:86
+msgid "Application/Get platform status"
+msgstr "应用/获取平台状态"
+
+#: apps/xpack/views/platform.py:96 apps/xpack/views/platform.py:97
+msgid "Update platform status"
+msgstr "更新平台状态"
+
+#: apps/xpack/views/platform.py:103
+msgid "Application/Update platform status"
+msgstr "应用/更新平台状态"
+
+#: apps/xpack/views/system_api_key_views.py:28
+#: apps/xpack/views/system_api_key_views.py:29
+msgid "Get personal system API_KEY list"
+msgstr "获取个人系统 API_KEY 列表"
+
+#: apps/xpack/views/system_api_key_views.py:30
+#: apps/xpack/views/system_api_key_views.py:39
+#: apps/xpack/views/system_api_key_views.py:53
+#: apps/xpack/views/system_api_key_views.py:62
+msgid "Personal system/API_KEY"
+msgstr "个人系统/API_KEY"
+
+#: apps/xpack/views/system_api_key_views.py:37
+#: apps/xpack/views/system_api_key_views.py:38
+msgid "Update personal system API_KEY"
+msgstr "更新个人系统 API_KEY"
+
+#: apps/xpack/views/system_api_key_views.py:51
+#: apps/xpack/views/system_api_key_views.py:52
+msgid "Delete personal system API_KEY"
+msgstr "删除个人系统 API_KEY"
+
+#: apps/xpack/views/system_api_key_views.py:60
+#: apps/xpack/views/system_api_key_views.py:61
+msgid "Add personal system API_KEY"
+msgstr "添加个人系统 API_KEY"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:27
+msgid "Model type error"
+msgstr "模型类型错误"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:37
+#: community/apps/common/field/common.py:21
+#: community/apps/common/field/common.py:34
+msgid "Message type error"
+msgstr "消息类型错误"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:56
+msgid "Conversation list"
+msgstr "对话列表"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:57
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:30
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:19
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:13
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:13
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:19
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:13
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:13
+#: community/apps/application/serializers/application_serializers.py:72
+#: community/apps/application/serializers/chat_serializers.py:365
+#: community/apps/application/swagger_api/application_api.py:53
+#: community/apps/application/swagger_api/application_api.py:185
+#: community/apps/application/swagger_api/application_api.py:186
+#: community/apps/application/swagger_api/application_api.py:334
+#: community/apps/application/swagger_api/application_api.py:335
+msgid "Model id"
+msgstr "模型 id"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:59
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:30
+msgid "Paragraph List"
+msgstr "段落列表"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:61
+#: community/apps/application/serializers/chat_message_serializers.py:201
+#: community/apps/application/serializers/chat_message_serializers.py:253
+#: community/apps/application/serializers/chat_serializers.py:76
+#: community/apps/application/serializers/chat_serializers.py:240
+#: community/apps/application/serializers/chat_serializers.py:439
+#: community/apps/application/serializers/chat_serializers.py:531
+#: community/apps/application/serializers/chat_serializers.py:587
+#: community/apps/application/serializers/chat_serializers.py:613
+#: community/apps/application/serializers/chat_serializers.py:672
+#: community/apps/application/serializers/chat_serializers.py:712
+#: community/apps/application/swagger_api/chat_api.py:38
+#: community/apps/application/swagger_api/chat_api.py:76
+#: community/apps/application/swagger_api/chat_api.py:171
+#: community/apps/application/swagger_api/chat_api.py:172
+#: community/apps/application/swagger_api/chat_api.py:286
+#: community/apps/application/swagger_api/chat_api.py:355
+#: community/apps/application/swagger_api/chat_api.py:432
+#: community/apps/application/swagger_api/chat_api.py:465
+#: community/apps/application/views/chat_views.py:482
+msgid "Conversation ID"
+msgstr "对话 ID"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:63
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:15
+#: community/apps/application/serializers/chat_message_serializers.py:254
+#: community/apps/application/serializers/chat_serializers.py:240
+msgid "User Questions"
+msgstr "用户问题"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:66
+msgid "Post-processor"
+msgstr "后置处理器"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:69
+msgid "Completion Question"
+msgstr "补全问题"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:71
+#: community/apps/application/serializers/chat_message_serializers.py:203
+msgid "Streaming Output"
+msgstr "流式输出"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:72
+#: community/apps/application/serializers/chat_message_serializers.py:208
+#: community/apps/application/serializers/chat_message_serializers.py:271
+#: community/apps/application/serializers/chat_serializers.py:103
+msgid "Client id"
+msgstr "客户端 id"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:73
+#: community/apps/application/serializers/chat_message_serializers.py:209
+#: community/apps/application/serializers/chat_message_serializers.py:272
+msgid "Client Type"
+msgstr "客户端类型"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:76
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:46
+#: community/apps/application/swagger_api/application_api.py:262
+msgid "No reference segment settings"
+msgstr "未查询到引用分段"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:78
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:31
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:48
+#: community/apps/application/serializers/application_serializers.py:70
+#: community/apps/application/serializers/application_serializers.py:511
+#: community/apps/application/serializers/application_serializers.py:582
+#: community/apps/application/serializers/application_serializers.py:627
+#: community/apps/application/serializers/application_serializers.py:697
+#: community/apps/application/serializers/application_serializers.py:718
+#: community/apps/application/serializers/application_serializers.py:792
+#: community/apps/application/serializers/application_serializers.py:1228
+#: community/apps/application/serializers/chat_serializers.py:118
+#: community/apps/application/serializers/chat_serializers.py:285
+#: community/apps/application/serializers/chat_serializers.py:338
+#: community/apps/application/serializers/chat_serializers.py:360
+#: community/apps/function_lib/serializers/function_lib_serializer.py:332
+#: community/apps/function_lib/serializers/function_lib_serializer.py:358
+#: community/apps/function_lib/serializers/function_lib_serializer.py:387
+msgid "User ID"
+msgstr "用户 ID"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:81
+#| msgid "Model parameter settings"
+msgid "Model settings"
+msgstr "模型参数设置"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:84
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:31
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:29
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:27
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:27
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:19
+#: community/apps/application/serializers/chat_serializers.py:382
+msgid "Model parameter settings"
+msgstr "模型参数设置"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:91
+msgid "message type error"
+msgstr "消息类型错误"
+
+#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:226
+#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:271
+msgid ""
+"Sorry, the AI model is not configured. Please go to the application to set "
+"up the AI model first."
+msgstr "抱歉,没有配置 AI 模型,请先去应用中设置 AI 模型。"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:27
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:25
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:25
+#: community/apps/application/serializers/chat_serializers.py:579
+msgid "question"
+msgstr "问题"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:33
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:28
+msgid "History Questions"
+msgstr "历史对答"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:35
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:25
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:21
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:18
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:24
+#: community/apps/application/swagger_api/application_api.py:55
+#: community/apps/application/swagger_api/application_api.py:56
+#: community/apps/application/swagger_api/application_api.py:188
+#: community/apps/application/swagger_api/application_api.py:189
+#: community/apps/application/swagger_api/application_api.py:337
+#: community/apps/application/swagger_api/application_api.py:338
+msgid "Number of multi-round conversations"
+msgstr "多轮对话数量"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:38
+msgid "Maximum length of the knowledge base paragraph"
+msgstr "最大携带知识库段落长度"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:40
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:22
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:16
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:22
+#: community/apps/application/serializers/application_serializers.py:108
+#: community/apps/application/serializers/application_serializers.py:138
+#: community/apps/application/swagger_api/application_api.py:286
+#: community/apps/application/swagger_api/application_api.py:287
+msgid "Prompt word"
+msgstr "提示词"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:42
+#: community/apps/application/swagger_api/application_api.py:300
+#: community/apps/application/swagger_api/application_api.py:301
+msgid "System prompt words (role)"
+msgstr "系统提示词(角色)"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:44
+msgid "Completion problem"
+msgstr "补齐问题"
+
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:34
+#: community/apps/application/serializers/application_serializers.py:237
+msgid "Question completion prompt"
+msgstr "问题补全提示词"
+
+#: community/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py:20
+#: community/apps/application/serializers/chat_message_serializers.py:99
+#: community/apps/application/swagger_api/application_api.py:210
+#: community/apps/application/swagger_api/application_api.py:355
+#, python-brace-format
+msgid ""
+"() contains the user's question. Answer the guessed user's question based on "
+"the context ({question}) Requirement: Output a complete question and put it "
+"in the tag"
+msgstr ""
+"()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问"
+"题,并且放在 标签中"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:28
+msgid "System completes question text"
+msgstr "系统补全问题文本"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:31
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:39
+msgid "Dataset id list"
+msgstr "知识库 ID 列表"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:34
+msgid "List of document ids to exclude"
+msgstr "要排除的文档 ID 列表"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:37
+msgid "List of exclusion vector ids"
+msgstr "排除向量 ID 列表"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:40
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:21
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:24
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:24
+#: community/apps/application/serializers/application_serializers.py:121
+#: community/apps/application/serializers/chat_serializers.py:243
+#: community/apps/application/swagger_api/application_api.py:249
+#: community/apps/application/swagger_api/application_api.py:250
+msgid "Reference segment number"
+msgstr "引用分段数"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:43
+#: community/apps/application/swagger_api/application_api.py:252
+#: community/apps/application/swagger_api/application_api.py:253
+msgid "Similarity"
+msgstr "相似度"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:46
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:30
+#: community/apps/application/serializers/application_serializers.py:129
+#: community/apps/application/serializers/application_serializers.py:590
+#: community/apps/dataset/serializers/dataset_serializers.py:576
+#| msgid "Retrieval pattern embedding|keywords|blend"
+msgid "The type only supports embedding|keywords|blend"
+msgstr "检索模式 embedding|keywords|blend"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:47
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:31
+#: community/apps/application/serializers/application_serializers.py:130
+#: community/apps/application/serializers/application_serializers.py:591
+#: community/apps/application/swagger_api/application_api.py:259
+msgid "Retrieval Mode"
+msgstr "检索方式"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:31
+#: community/apps/application/serializers/application_serializers.py:84
+#: community/apps/application/serializers/application_serializers.py:1026
+#: community/apps/application/serializers/application_serializers.py:1036
+#: community/apps/application/serializers/application_serializers.py:1046
+#: community/apps/dataset/serializers/dataset_serializers.py:801
+#: community/apps/dataset/serializers/document_serializers.py:746
+#: community/apps/setting/models_provider/tools.py:23
+msgid "Model does not exist"
+msgstr "模型不存在"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:33
+#, python-brace-format
+msgid "No permission to use this model {model_name}"
+msgstr "无权使用此模型 {model_name}"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:41
+msgid ""
+"The vector model of the associated knowledge base is inconsistent and the "
+"segmentation cannot be recalled."
+msgstr "关联知识库的向量模型不一致,无法召回分段。"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:43
+msgid "The knowledge base setting is wrong, please reset the knowledge base"
+msgstr "知识库设置错误,请重新设置知识库!"
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:21
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:15
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:21
+msgid "Role Setting"
+msgstr "角色设置"
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:28
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:24
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:29
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:47
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:26
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:22
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:26
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:15
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:15
+msgid "Whether to return content"
+msgstr "是否返回内容"
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:35
+msgid "Context Type"
+msgstr "内容类型"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:16
+msgid "API Input Fields"
+msgstr "api 输入字段"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:18
+msgid "User Input Fields"
+msgstr "用户输入字段"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:19
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:24
+#: community/apps/application/serializers/application_serializers.py:698
+#: community/apps/application/serializers/chat_message_serializers.py:274
+#: community/apps/function_lib/serializers/function_lib_serializer.py:359
+msgid "picture"
+msgstr "图片"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:20
+#: community/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py:13
+#: community/apps/application/serializers/chat_message_serializers.py:275
+msgid "document"
+msgstr "文档"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:21
+#: community/apps/application/serializers/chat_message_serializers.py:276
+msgid "Audio"
+msgstr "音频"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:23
+#: community/apps/application/serializers/chat_message_serializers.py:278
+msgid "Child Nodes"
+msgstr "子节点"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:24
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:21
+msgid "Form Data"
+msgstr "表单数据"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:58
+msgid ""
+"Parameter value error: The uploaded document lacks file_id, and the document "
+"upload fails"
+msgstr "参数值错误: 上传的文档中缺少 file_id,文档上传失败"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:67
+msgid ""
+"Parameter value error: The uploaded image lacks file_id, and the image "
+"upload fails"
+msgstr "参数值错误: 上传的图片中缺少 file_id,图片上传失败"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:77
+msgid ""
+"Parameter value error: The uploaded audio lacks file_id, and the audio "
+"upload fails."
+msgstr "参数值错误: 上传的音频中缺少file_id,音频上传失败"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:19
+#: community/apps/application/serializers/chat_serializers.py:124
+msgid "Comparator"
+msgstr "比较器"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:20
+#: community/apps/application/swagger_api/application_api.py:271
+msgid "value"
+msgstr "值"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:21
+msgid "Fields"
+msgstr "字段"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:25
+msgid "Branch id"
+msgstr "分支 id"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:26
+msgid "Branch Type"
+msgstr "分支类型"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:27
+msgid "Condition or|and"
+msgstr "条件 or|and"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:20
+msgid "Response Type"
+msgstr "响应类型"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:21
+#: community/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py:14
+msgid "Reference Field"
+msgstr "引用字段"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:23
+msgid "Direct answer content"
+msgstr "直接回答内容"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:30
+msgid "Reference field cannot be empty"
+msgstr "引用字段不能为空"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:32
+msgid "Reference field error"
+msgstr "引用字段错误"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:35
+msgid "Content cannot be empty"
+msgstr "内容不能为空"
+
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:19
+msgid "Form Configuration"
+msgstr "表单配置"
+
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:20
+msgid "Form output content"
+msgstr "表单输出内容"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:22
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:24
+msgid "Variable Name"
+msgstr "变量名"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:23
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:34
+msgid "Variable Value"
+msgstr "变量值"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:27
+msgid "Library ID"
+msgstr "函数库id"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:35
+msgid "The function has been deleted"
+msgstr "函数已被删除"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:25
+msgid "Is this field required"
+msgstr "字段是否必填"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:28
+msgid "The field only supports string|int|dict|array|float"
+msgstr "字段只支持 string|int|dict|array|float"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:30
+#: community/apps/function_lib/serializers/function_lib_serializer.py:76
+#: community/apps/function_lib/swagger_api/function_lib_api.py:98
+#: community/apps/function_lib/swagger_api/function_lib_api.py:144
+#: community/apps/function_lib/swagger_api/function_lib_api.py:190
+msgid "source"
+msgstr "来源"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:32
+#: community/apps/function_lib/serializers/function_lib_serializer.py:78
+msgid "The field only supports custom|reference"
+msgstr "字段只支持 custom|reference"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:40
+#, python-brace-format
+msgid "{field}, this field is required."
+msgstr "{field}, 此字段为必填项。"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:46
+#: community/apps/function_lib/views/function_lib_views.py:131
+#: community/apps/function_lib/views/function_lib_views.py:145
+msgid "function"
+msgstr "函数"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:15
+msgid "Prompt word (positive)"
+msgstr "提示词(正向)"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:17
+msgid "Prompt word (negative)"
+msgstr "提示词(负向)"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:24
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:20
+msgid "Conversation storage type"
+msgstr "对话存储类型"
+
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:26
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:33
+msgid "Maximum number of words in a quoted segment"
+msgstr "最大引用分段字数"
+
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:27
+#: community/apps/common/swagger_api/common_api.py:36
+#: community/apps/dataset/serializers/dataset_serializers.py:573
+msgid "similarity"
+msgstr "相似度"
+
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:17
+msgid "The audio file cannot be empty"
+msgstr "音频文件不能为空"
+
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:31
+msgid ""
+"Parameter value error: The uploaded audio lacks file_id, and the audio "
+"upload fails"
+msgstr "参数值错误:上传的音频缺少file_id,音频上传失败"
+
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:17
+msgid "Text content"
+msgstr "文本内容"
+
+#: community/apps/application/flow/workflow_manage.py:107
+#, python-brace-format
+msgid "The branch {branch} of the {node} node needs to be connected"
+msgstr "{node} 节点的{branch}分支需要连接"
+
+#: community/apps/application/flow/workflow_manage.py:113
+#, python-brace-format
+msgid "{node} Nodes cannot be considered as end nodes"
+msgstr "{node} 节点不能当做结束节点"
+
+#: community/apps/application/flow/workflow_manage.py:123
+msgid "The next node that does not exist"
+msgstr "不存在的下一个节点"
+
+#: community/apps/application/flow/workflow_manage.py:137
+msgid "The starting node is required"
+msgstr "开始节点必填"
+
+#: community/apps/application/flow/workflow_manage.py:139
+msgid "There can only be one starting node"
+msgstr "开始节点只能有一个"
+
+#: community/apps/application/flow/workflow_manage.py:147
+#, python-brace-format
+msgid "The node {node} model does not exist"
+msgstr "节点{node} 模型不存在 "
+
+#: community/apps/application/flow/workflow_manage.py:157
+#, python-brace-format
+msgid "Node {node} is unavailable"
+msgstr "节点{node} 不可用"
+
+#: community/apps/application/flow/workflow_manage.py:163
+#, python-brace-format
+msgid "The library ID of node {node} cannot be empty"
+msgstr "节点{node} 函数库id不能为空"
+
+#: community/apps/application/flow/workflow_manage.py:166
+#, python-brace-format
+msgid "The function library for node {node} is not available"
+msgstr "节点{node} 函数库不可用"
+
+#: community/apps/application/flow/workflow_manage.py:172
+msgid "Basic information node is required"
+msgstr "基本信息节点必填"
+
+#: community/apps/application/flow/workflow_manage.py:174
+msgid "There can only be one basic information node"
+msgstr "基本信息节点只能有一个"
+
+#: community/apps/application/serializers/application_serializers.py:75
+#: community/apps/application/serializers/chat_serializers.py:618
+#: community/apps/application/serializers/chat_serializers.py:677
+#: community/apps/application/serializers/chat_serializers.py:709
+#: community/apps/application/swagger_api/chat_api.py:365
+#: community/apps/application/swagger_api/chat_api.py:393
+#: community/apps/application/swagger_api/chat_api.py:394
+#: community/apps/application/swagger_api/chat_api.py:415
+#: community/apps/application/swagger_api/chat_api.py:494
+#: community/apps/application/swagger_api/chat_api.py:495
+msgid "Knowledge base id"
+msgstr "知识库 id"
+
+#: community/apps/application/serializers/application_serializers.py:76
+msgid "Knowledge Base List"
+msgstr "知识库列表"
+
+#: community/apps/application/serializers/application_serializers.py:90
+msgid "The knowledge base id does not exist"
+msgstr "知识库 id 不存在"
+
+#: community/apps/application/serializers/application_serializers.py:107
+msgid "No reference status"
+msgstr "无引用状态"
+
+#: community/apps/application/serializers/application_serializers.py:123
+msgid "Acquaintance"
+msgstr "相似度"
+
+#: community/apps/application/serializers/application_serializers.py:126
+#: community/apps/application/swagger_api/application_api.py:256
+#: community/apps/application/swagger_api/application_api.py:257
+msgid "Maximum number of quoted characters"
+msgstr "最多引用字符数"
+
+#: community/apps/application/serializers/application_serializers.py:133
+msgid "Segment settings not referenced"
+msgstr "未引用分段设置"
+
+#: community/apps/application/serializers/application_serializers.py:140
+msgid "Role prompts"
+msgstr "角色提示词"
+
+#: community/apps/application/serializers/application_serializers.py:142
+#: community/apps/application/swagger_api/application_api.py:303
+#: community/apps/application/swagger_api/application_api.py:305
+msgid "No citation segmentation prompt"
+msgstr "无引用分段提示词"
+
+#: community/apps/application/serializers/application_serializers.py:144
+msgid "Thinking process switch"
+msgstr "思考过程开关"
+
+#: community/apps/application/serializers/application_serializers.py:148
+msgid "The thinking process begins to mark"
+msgstr "思考过程开始标记"
+
+#: community/apps/application/serializers/application_serializers.py:151
+msgid "End of thinking process marker"
+msgstr "思考过程结束标记"
+
+#: community/apps/application/serializers/application_serializers.py:156
+#: community/apps/application/serializers/application_serializers.py:482
+#: community/apps/application/serializers/application_serializers.py:623
+#: community/apps/application/swagger_api/application_api.py:49
+#: community/apps/application/swagger_api/application_api.py:50
+#: community/apps/application/swagger_api/application_api.py:181
+#: community/apps/application/swagger_api/application_api.py:182
+#: community/apps/application/swagger_api/application_api.py:330
+#: community/apps/application/swagger_api/application_api.py:331
+#: community/apps/application/swagger_api/application_api.py:377
+msgid "Application Name"
+msgstr "应用名称"
+
+#: community/apps/application/serializers/application_serializers.py:159
+#: community/apps/application/serializers/application_serializers.py:484
+#: community/apps/application/serializers/application_serializers.py:625
+#: community/apps/application/swagger_api/application_api.py:51
+#: community/apps/application/swagger_api/application_api.py:52
+#: community/apps/application/swagger_api/application_api.py:183
+#: community/apps/application/swagger_api/application_api.py:184
+#: community/apps/application/swagger_api/application_api.py:332
+#: community/apps/application/swagger_api/application_api.py:333
+#: community/apps/application/swagger_api/application_api.py:382
+msgid "Application Description"
+msgstr "应用描述"
+
+#: community/apps/application/serializers/application_serializers.py:160
+msgid "Workflow Objects"
+msgstr "工作流对象"
+
+#: community/apps/application/serializers/application_serializers.py:162
+#: community/apps/application/serializers/application_serializers.py:225
+#: community/apps/application/serializers/application_serializers.py:492
+#: community/apps/application/swagger_api/application_api.py:57
+#: community/apps/application/swagger_api/application_api.py:58
+#: community/apps/application/swagger_api/application_api.py:190
+#: community/apps/application/swagger_api/application_api.py:191
+#: community/apps/application/swagger_api/application_api.py:339
+#: community/apps/application/swagger_api/application_api.py:340
+msgid "Opening remarks"
+msgstr "开场白"
+
+#: community/apps/application/serializers/application_serializers.py:214
+#: community/apps/dataset/serializers/dataset_serializers.py:105
+#: community/apps/dataset/serializers/dataset_serializers.py:106
+msgid "application name"
+msgstr "应用名称"
+
+#: community/apps/application/serializers/application_serializers.py:217
+msgid "application describe"
+msgstr "应用描述"
+
+#: community/apps/application/serializers/application_serializers.py:219
+#: community/apps/application/serializers/application_serializers.py:486
+msgid "Model"
+msgstr "模型"
+
+#: community/apps/application/serializers/application_serializers.py:223
+#: community/apps/application/serializers/application_serializers.py:490
+msgid "Historical chat records"
+msgstr "历史聊天记录"
+
+#: community/apps/application/serializers/application_serializers.py:228
+#: community/apps/application/serializers/application_serializers.py:494
+msgid "Related Knowledge Base"
+msgstr "关联知识库"
+
+#: community/apps/application/serializers/application_serializers.py:235
+#: community/apps/application/serializers/application_serializers.py:504
+#: community/apps/application/serializers/chat_serializers.py:379
+msgid "Question completion"
+msgstr "问题补全"
+
+#: community/apps/application/serializers/application_serializers.py:239
+#: community/apps/application/swagger_api/application_api.py:203
+#: community/apps/application/swagger_api/application_api.py:349
+msgid "Application Type"
+msgstr "应用类型"
+
+#: community/apps/application/serializers/application_serializers.py:243
+msgid "Application type only supports SIMPLE|WORK_FLOW"
+msgstr "应用类型只支持 SIMPLE|WORK_FLOW"
+
+#: community/apps/application/serializers/application_serializers.py:247
+#: community/apps/application/serializers/application_serializers.py:508
+msgid "Model parameters"
+msgstr "模型参数"
+
+#: community/apps/application/serializers/application_serializers.py:255
+msgid "Host"
+msgstr "主机"
+
+#: community/apps/application/serializers/application_serializers.py:256
+msgid "protocol"
+msgstr "协议"
+
+#: community/apps/application/serializers/application_serializers.py:339
+#: community/apps/application/swagger_api/application_api.py:153
+#: community/apps/application/swagger_api/application_api.py:154
+msgid "Reset Token"
+msgstr "重置 Token"
+
+#: community/apps/application/serializers/application_serializers.py:340
+msgid "Is it enabled"
+msgstr "是否开启"
+
+#: community/apps/application/serializers/application_serializers.py:343
+#: community/apps/application/swagger_api/application_api.py:158
+#: community/apps/application/swagger_api/application_api.py:159
+msgid "Number of visits"
+msgstr "访问次数"
+
+#: community/apps/application/serializers/application_serializers.py:345
+#: community/apps/application/swagger_api/application_api.py:160
+#: community/apps/application/swagger_api/application_api.py:161
+msgid "Whether to enable whitelist"
+msgstr "是否开启白名单"
+
+#: community/apps/application/serializers/application_serializers.py:348
+#: community/apps/application/serializers/application_serializers.py:349
+#: community/apps/application/swagger_api/application_api.py:163
+#: community/apps/application/swagger_api/application_api.py:164
+msgid "Whitelist"
+msgstr "白名单"
+
+#: community/apps/application/serializers/application_serializers.py:352
+#: community/apps/application/swagger_api/application_api.py:166
+#: community/apps/application/swagger_api/application_api.py:167
+msgid "Whether to display knowledge sources"
+msgstr "是否显示知识来源"
+
+#: community/apps/application/serializers/application_serializers.py:423
+msgid "access_token"
+msgstr "access_token"
+
+#: community/apps/application/serializers/application_serializers.py:425
+msgid "Certification Information"
+msgstr "认证信息"
+
+#: community/apps/application/serializers/application_serializers.py:462
+msgid "Invalid access_token"
+msgstr "无效的access_token"
+
+#: community/apps/application/serializers/application_serializers.py:473
+msgid "Wrong password"
+msgstr "密码错误"
+
+#: community/apps/application/serializers/application_serializers.py:498
+msgid "Dataset settings"
+msgstr "知识库设置"
+
+#: community/apps/application/serializers/application_serializers.py:501
+msgid "Model setup"
+msgstr "模型设置"
+
+#: community/apps/application/serializers/application_serializers.py:505
+msgid "Icon"
+msgstr "icon 图标"
+
+#: community/apps/application/serializers/application_serializers.py:515
+#: community/apps/application/serializers/application_serializers.py:722
+#: community/apps/setting/serializers/valid_serializers.py:29
+msgid ""
+"The community version supports up to 5 applications. If you need more "
+"applications, please contact us (https://fit2cloud.com/)."
+msgstr ""
+"社区版最多支持 5 个应用,如需拥有更多应用,请联系我们(https://"
+"fit2cloud.com/)"
+
+#: community/apps/application/serializers/application_serializers.py:583
+msgid "Query text"
+msgstr "查询文本"
+
+#: community/apps/application/serializers/application_serializers.py:585
+msgid "topN"
+msgstr "topN"
+
+#: community/apps/application/serializers/application_serializers.py:587
+msgid "Relevance"
+msgstr "相似度"
+
+#: community/apps/application/serializers/application_serializers.py:596
+#: community/apps/application/serializers/application_serializers.py:705
+#: community/apps/application/serializers/application_serializers.py:797
+msgid "Application id does not exist"
+msgstr "应用 ID 不存在"
+
+#: community/apps/application/serializers/application_serializers.py:628
+msgid "Select User ID"
+msgstr "选择用户 ID"
+
+#: community/apps/application/serializers/application_serializers.py:717
+#: community/apps/dataset/serializers/document_serializers.py:164
+#: community/apps/dataset/serializers/document_serializers.py:213
+#: community/apps/dataset/serializers/document_serializers.py:220
+#: community/apps/dataset/serializers/file_serializers.py:59
+#: community/apps/dataset/views/file.py:35
+#: community/apps/dataset/views/file.py:44
+#: community/apps/function_lib/serializers/function_lib_serializer.py:331
+msgid "file"
+msgstr "文件"
+
+#: community/apps/application/serializers/application_serializers.py:732
+#: community/apps/common/handle/impl/qa/zip_parse_qa_handle.py:62
+#: community/apps/common/handle/impl/zip_split_handle.py:56
+#: community/apps/dataset/serializers/document_serializers.py:874
+#: community/apps/dataset/serializers/document_serializers.py:882
+#: community/apps/function_lib/serializers/function_lib_serializer.py:343
+msgid "Unsupported file format"
+msgstr "文件格式不支持"
+
+#: community/apps/application/serializers/application_serializers.py:872
+msgid "work_flow is a required field"
+msgstr "work_flow是必填字段"
+
+#: community/apps/application/serializers/application_serializers.py:934
+#: community/apps/application/serializers/application_serializers.py:1076
+#, python-brace-format
+msgid "Unknown knowledge base id {dataset_id}, unable to associate"
+msgstr "未知的知识库 id {dataset_id},无法关联"
+
+#: community/apps/application/serializers/application_serializers.py:954
+msgid "Illegal User"
+msgstr "非法用户"
+
+#: community/apps/application/serializers/application_serializers.py:1028
+#: community/apps/application/serializers/application_serializers.py:1038
+#: community/apps/application/serializers/application_serializers.py:1048
+#, python-brace-format
+msgid "No permission to use this model:{model_name}"
+msgstr "用户没有使用该模型:{model_name}的权限"
+
+#: community/apps/application/serializers/application_serializers.py:1259
+#: community/apps/application/swagger_api/chat_api.py:498
+#: community/apps/application/swagger_api/chat_api.py:499
+msgid "Availability"
+msgstr "是否可用"
+
+#: community/apps/application/serializers/application_serializers.py:1263
+#: community/apps/application/swagger_api/application_api.py:129
+#: community/apps/application/swagger_api/application_api.py:130
+msgid "Is cross-domain allowed"
+msgstr "是否允许跨域"
+
+#: community/apps/application/serializers/application_serializers.py:1268
+msgid "Cross-domain address"
+msgstr "跨域地址"
+
+#: community/apps/application/serializers/application_serializers.py:1269
+#: community/apps/application/swagger_api/application_api.py:131
+msgid "Cross-domain list"
+msgstr "跨域列表"
+
+#: community/apps/application/serializers/application_serializers.py:1274
+msgid "ApiKeyid"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1295
+msgid "APIKey does not exist"
+msgstr "APIKey 不存在"
+
+#: community/apps/application/serializers/application_version_serializers.py:30
+#: community/apps/application/swagger_api/application_version_api.py:24
+#: community/apps/application/swagger_api/application_version_api.py:25
+#: community/apps/application/swagger_api/application_version_api.py:47
+#: community/apps/application/swagger_api/application_version_api.py:70
+#: community/apps/application/swagger_api/application_version_api.py:71
+msgid "Version Name"
+msgstr "版本名称"
+
+#: community/apps/application/serializers/application_version_serializers.py:37
+#: community/apps/application/serializers/chat_serializers.py:115
+#: community/apps/application/serializers/chat_serializers.py:240
+msgid "summary"
+msgstr "摘要"
+
+#: community/apps/application/serializers/application_version_serializers.py:61
+msgid "Workflow version id"
+msgstr "工作流版本 id"
+
+#: community/apps/application/serializers/application_version_serializers.py:71
+#: community/apps/application/serializers/application_version_serializers.py:86
+msgid "Workflow version does not exist"
+msgstr "工作流版本不存在"
+
+#: community/apps/application/serializers/chat_message_serializers.py:195
+#: community/apps/dataset/serializers/paragraph_serializers.py:47
+#: community/apps/dataset/serializers/paragraph_serializers.py:180
+#: community/apps/dataset/serializers/paragraph_serializers.py:692
+#: community/apps/dataset/serializers/paragraph_serializers.py:705
+#: community/apps/dataset/serializers/paragraph_serializers.py:706
+#: community/apps/dataset/serializers/problem_serializers.py:41
+#: community/apps/dataset/serializers/problem_serializers.py:52
+#: community/apps/dataset/serializers/problem_serializers.py:113
+#: community/apps/dataset/swagger_api/problem_api.py:24
+#: community/apps/dataset/swagger_api/problem_api.py:25
+#: community/apps/dataset/swagger_api/problem_api.py:109
+#: community/apps/dataset/swagger_api/problem_api.py:110
+#: community/apps/dataset/swagger_api/problem_api.py:126
+#: community/apps/dataset/swagger_api/problem_api.py:127
+#: community/apps/dataset/swagger_api/problem_api.py:154
+#: community/apps/dataset/swagger_api/problem_api.py:169
+msgid "content"
+msgstr "内容"
+
+#: community/apps/application/serializers/chat_message_serializers.py:196
+#: community/apps/setting/serializers/team_serializers.py:45
+#: community/apps/users/serializers/user_serializers.py:472
+#: community/apps/users/serializers/user_serializers.py:495
+#: community/apps/users/serializers/user_serializers.py:586
+msgid "Role"
+msgstr "角色"
+
+#: community/apps/application/serializers/chat_message_serializers.py:202
+msgid "Regenerate"
+msgstr "重新生成"
+
+#: community/apps/application/serializers/chat_message_serializers.py:256
+msgid "Is the answer in streaming mode"
+msgstr "是否流式回答"
+
+#: community/apps/application/serializers/chat_message_serializers.py:257
+msgid "Do you want to reply again"
+msgstr "是否重新回答"
+
+#: community/apps/application/serializers/chat_message_serializers.py:259
+#: community/apps/application/serializers/chat_serializers.py:442
+#: community/apps/application/serializers/chat_serializers.py:534
+#: community/apps/application/serializers/chat_serializers.py:590
+#: community/apps/application/serializers/chat_serializers.py:616
+#: community/apps/application/serializers/chat_serializers.py:675
+#: community/apps/application/swagger_api/chat_api.py:148
+#: community/apps/application/swagger_api/chat_api.py:149
+#: community/apps/application/swagger_api/chat_api.py:360
+#: community/apps/application/swagger_api/chat_api.py:437
+#: community/apps/application/swagger_api/chat_api.py:470
+msgid "Conversation record id"
+msgstr "对话记录 ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:262
+msgid "Node id"
+msgstr "节点 ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:265
+#: community/apps/application/swagger_api/chat_api.py:142
+#: community/apps/application/swagger_api/chat_api.py:143
+msgid "Runtime node id"
+msgstr "运行时节点 ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:268
+msgid "Node parameters"
+msgstr "节点参数"
+
+#: community/apps/application/serializers/chat_message_serializers.py:273
+msgid "Global variables"
+msgstr "全局变量"
+
+#: community/apps/application/serializers/chat_message_serializers.py:286
+#: community/apps/application/serializers/chat_message_serializers.py:421
+#: community/apps/application/serializers/chat_serializers.py:469
+msgid "Conversation does not exist"
+msgstr "对话不存在"
+
+#: community/apps/application/serializers/chat_message_serializers.py:303
+msgid "The number of visits exceeds today's visits"
+msgstr "访问次数超过今日访问量"
+
+#: community/apps/application/serializers/chat_message_serializers.py:314
+msgid "The current model is not available"
+msgstr "当前模型不可用"
+
+#: community/apps/application/serializers/chat_message_serializers.py:316
+msgid "The model is downloading, please try again later"
+msgstr "模型正在下载中,请稍后再试"
+
+#: community/apps/application/serializers/chat_message_serializers.py:361
+#: community/apps/application/serializers/chat_serializers.py:599
+#: community/apps/application/serializers/chat_serializers.py:645
+#: community/apps/application/serializers/chat_serializers.py:694
+msgid "Conversation record does not exist"
+msgstr "对话记录不存在"
+
+#: community/apps/application/serializers/chat_message_serializers.py:454
+#: community/apps/application/serializers/chat_serializers.py:314
+msgid "The application has not been published. Please use it after publishing."
+msgstr "应用未发布,请发布后使用"
+
+#: community/apps/application/serializers/chat_serializers.py:55
+msgid "node"
+msgstr "节点"
+
+#: community/apps/application/serializers/chat_serializers.py:56
+msgid "Connection"
+msgstr "连线"
+
+#: community/apps/application/serializers/chat_serializers.py:71
+#: community/apps/application/swagger_api/chat_api.py:48
+#: community/apps/application/swagger_api/chat_api.py:49
+#: community/apps/application/swagger_api/chat_api.py:169
+#: community/apps/application/swagger_api/chat_api.py:170
+#: community/apps/application/swagger_api/chat_api.py:256
+msgid "abstract"
+msgstr "摘要"
+
+#: community/apps/application/serializers/chat_serializers.py:121
+#: community/apps/application/swagger_api/chat_api.py:258
+msgid "Minimum number of likes"
+msgstr "最小点赞数"
+
+#: community/apps/application/serializers/chat_serializers.py:123
+#: community/apps/application/swagger_api/chat_api.py:260
+msgid "Minimum number of clicks"
+msgstr "最小点踩数"
+
+#: community/apps/application/serializers/chat_serializers.py:126
+msgid "Only supports and|or"
+msgstr "只支持 and|or"
+
+#: community/apps/application/serializers/chat_serializers.py:241
+msgid "Problem after optimization"
+msgstr "优化后的问题"
+
+#: community/apps/application/serializers/chat_serializers.py:242
+msgid "answer"
+msgstr "回答"
+
+#: community/apps/application/serializers/chat_serializers.py:242
+msgid "User feedback"
+msgstr "用户反馈"
+
+#: community/apps/application/serializers/chat_serializers.py:244
+msgid "Section title + content"
+msgstr "分段标题+内容"
+
+#: community/apps/application/serializers/chat_serializers.py:245
+#: community/apps/application/views/chat_views.py:385
+#: community/apps/application/views/chat_views.py:386
+msgid "Annotation"
+msgstr "标注"
+
+#: community/apps/application/serializers/chat_serializers.py:245
+msgid "Consuming tokens"
+msgstr "消耗tokens"
+
+#: community/apps/application/serializers/chat_serializers.py:245
+msgid "Time consumed (s)"
+msgstr "耗时(s)"
+
+#: community/apps/application/serializers/chat_serializers.py:246
+msgid "Question Time"
+msgstr "提问时间"
+
+#: community/apps/application/serializers/chat_serializers.py:337
+msgid "Workflow"
+msgstr "工作流"
+
+#: community/apps/application/serializers/chat_serializers.py:369
+msgid "Multi-round conversation"
+msgstr "多轮对话"
+
+#: community/apps/application/serializers/chat_serializers.py:372
+msgid "Related Datasets"
+msgstr "关联数据集"
+
+#: community/apps/application/serializers/chat_serializers.py:449
+msgid "Application authentication information does not exist"
+msgstr "不存在的应用认证信息"
+
+#: community/apps/application/serializers/chat_serializers.py:451
+msgid "Displaying knowledge sources is not enabled"
+msgstr "未开启显示知识来源"
+
+#: community/apps/application/serializers/chat_serializers.py:537
+msgid "Bidding Status"
+msgstr "投标状态"
+
+#: community/apps/application/serializers/chat_serializers.py:546
+msgid ""
+"Voting on the current session minutes, please do not send repeated requests"
+msgstr "正在对当前会话纪要进行投票中,请勿重复发送请求"
+
+#: community/apps/application/serializers/chat_serializers.py:551
+msgid "Non-existent conversation chat_record_id"
+msgstr "不存在的对话 chat_record_id"
+
+#: community/apps/application/serializers/chat_serializers.py:568
+msgid "Already voted, please cancel first and then vote again"
+msgstr "已经投票过,请先取消后再进行投票"
+
+#: community/apps/application/serializers/chat_serializers.py:575
+#: community/apps/application/swagger_api/chat_api.py:379
+#: community/apps/application/swagger_api/chat_api.py:380
+#: community/apps/dataset/swagger_api/problem_api.py:128
+#: community/apps/dataset/swagger_api/problem_api.py:129
+msgid "Section title"
+msgstr "段落标题"
+
+#: community/apps/application/serializers/chat_serializers.py:576
+#: community/apps/application/swagger_api/chat_api.py:381
+#: community/apps/application/swagger_api/chat_api.py:382
+#: community/apps/application/swagger_api/chat_api.py:483
+#: community/apps/application/swagger_api/chat_api.py:484
+#: community/apps/common/swagger_api/common_api.py:57
+#: community/apps/common/swagger_api/common_api.py:58
+msgid "Paragraph content"
+msgstr "段落内容"
+
+#: community/apps/application/serializers/chat_serializers.py:620
+#: community/apps/application/serializers/chat_serializers.py:679
+#: community/apps/application/serializers/chat_serializers.py:710
+#: community/apps/application/swagger_api/chat_api.py:370
+#: community/apps/application/swagger_api/chat_api.py:395
+#: community/apps/application/swagger_api/chat_api.py:396
+#: community/apps/application/swagger_api/chat_api.py:496
+#: community/apps/application/swagger_api/chat_api.py:497
+msgid "Document id"
+msgstr "文档 ID"
+
+#: community/apps/application/serializers/chat_serializers.py:626
+#: community/apps/application/serializers/chat_serializers.py:717
+#: community/apps/dataset/serializers/paragraph_serializers.py:576
+msgid "The document id is incorrect"
+msgstr "文档 id 不正确"
+
+#: community/apps/application/serializers/chat_serializers.py:681
+#: community/apps/application/swagger_api/chat_api.py:310
+#: community/apps/application/swagger_api/chat_api.py:311
+msgid "Paragraph id"
+msgstr "段落 ID"
+
+#: community/apps/application/serializers/chat_serializers.py:697
+#, python-brace-format
+msgid ""
+"The paragraph id is wrong. The current conversation record does not exist. "
+"[{paragraph_id}] paragraph id"
+msgstr "段落id错误。当前对话记录不存在。[{paragraph_id}] 段落id"
+
+#: community/apps/application/serializers/chat_serializers.py:736
+msgid "Conversation records that do not exist"
+msgstr "存在不存在的对话记录"
+
+#: community/apps/application/swagger_api/application_api.py:24
+#: community/apps/application/views/chat_views.py:470
+#: community/apps/application/views/chat_views.py:471
+msgid "Upload files"
+msgstr "上传文件"
+
+#: community/apps/application/swagger_api/application_api.py:35
+#: community/apps/application/swagger_api/application_api.py:36
+msgid "Application authentication token"
+msgstr "应用认证 token"
+
+#: community/apps/application/swagger_api/application_api.py:48
+#: community/apps/application/swagger_api/application_version_api.py:22
+#: community/apps/application/swagger_api/application_version_api.py:23
+msgid "Primary key id"
+msgstr "主键 id"
+
+#: community/apps/application/swagger_api/application_api.py:60
+msgid "Example List"
+msgstr "示例列表"
+
+#: community/apps/application/swagger_api/application_api.py:61
+#: community/apps/application/swagger_api/application_api.py:62
+msgid "Affiliation user"
+msgstr "所属用户"
+
+#: community/apps/application/swagger_api/application_api.py:64
+msgid "Is publish"
+msgstr "是否发布"
+
+#: community/apps/application/swagger_api/application_api.py:66
+#: community/apps/application/swagger_api/application_api.py:67
+#: community/apps/application/swagger_api/application_version_api.py:28
+#: community/apps/application/swagger_api/application_version_api.py:29
+#: community/apps/application/swagger_api/chat_api.py:185
+#: community/apps/application/swagger_api/chat_api.py:186
+#: community/apps/application/swagger_api/chat_api.py:335
+#: community/apps/application/swagger_api/chat_api.py:336
+#: community/apps/application/swagger_api/chat_api.py:503
+#: community/apps/application/swagger_api/chat_api.py:504
+msgid "Creation time"
+msgstr "创建时间"
+
+#: community/apps/application/swagger_api/application_api.py:69
+#: community/apps/application/swagger_api/application_api.py:70
+#: community/apps/application/swagger_api/application_version_api.py:30
+#: community/apps/application/swagger_api/application_version_api.py:31
+#: community/apps/application/swagger_api/chat_api.py:332
+#: community/apps/application/swagger_api/chat_api.py:333
+#: community/apps/application/swagger_api/chat_api.py:500
+#: community/apps/application/swagger_api/chat_api.py:501
+msgid "Modification time"
+msgstr "修改时间"
+
+#: community/apps/application/swagger_api/application_api.py:74
+#: community/apps/application/swagger_api/application_api.py:194
+#: community/apps/application/swagger_api/application_api.py:195
+#: community/apps/application/swagger_api/application_api.py:343
+#: community/apps/application/swagger_api/application_api.py:344
+#: community/apps/application/swagger_api/chat_api.py:229
+#: community/apps/application/swagger_api/chat_api.py:230
+msgid "List of associated knowledge base IDs"
+msgstr "关联知识库 ID 列表"
+
+#: community/apps/application/swagger_api/application_api.py:76
+msgid "List of associated knowledge base IDs (returned when querying details)"
+msgstr "关联知识库ID列表(查询详情时返回)"
+
+#: community/apps/application/swagger_api/application_api.py:91
+msgid "Model Type"
+msgstr "模型类型"
+
+#: community/apps/application/swagger_api/application_api.py:117
+msgid "Application api_key id"
+msgstr "应用 api_key id"
+
+#: community/apps/application/swagger_api/application_api.py:126
+#: community/apps/application/swagger_api/application_api.py:127
+#: community/apps/application/swagger_api/application_api.py:156
+#: community/apps/application/swagger_api/application_api.py:157
+msgid "Is activation"
+msgstr "是否可用"
+
+#: community/apps/application/swagger_api/application_api.py:198
+#: community/apps/application/swagger_api/application_api.py:347
+#: community/apps/application/swagger_api/application_api.py:348
+msgid "Problem Optimization"
+msgstr "问题优化"
+
+#: community/apps/application/swagger_api/application_api.py:199
+msgid "Whether to enable problem optimization"
+msgstr "是否开启问题优化"
+
+#: community/apps/application/swagger_api/application_api.py:204
+#: community/apps/application/swagger_api/application_api.py:350
+msgid "Application Type SIMPLE | WORK_FLOW"
+msgstr "应用类型 SIMPLE | WORK_FLOW"
+
+#: community/apps/application/swagger_api/application_api.py:207
+#: community/apps/application/swagger_api/application_api.py:208
+#: community/apps/application/swagger_api/application_api.py:352
+#: community/apps/application/swagger_api/application_api.py:353
+msgid "Question optimization tips"
+msgstr "问题优化提示词"
+
+#: community/apps/application/swagger_api/application_api.py:211
+#: community/apps/application/swagger_api/application_api.py:212
+#: community/apps/application/swagger_api/application_api.py:356
+#: community/apps/application/swagger_api/application_api.py:357
+msgid "Text-to-speech model ID"
+msgstr "文本转语音模型 ID"
+
+#: community/apps/application/swagger_api/application_api.py:213
+#: community/apps/application/swagger_api/application_api.py:214
+#: community/apps/application/swagger_api/application_api.py:358
+#: community/apps/application/swagger_api/application_api.py:359
+msgid "Speech-to-text model id"
+msgstr "语音转文本模型 ID"
+
+#: community/apps/application/swagger_api/application_api.py:215
+#: community/apps/application/swagger_api/application_api.py:216
+#: community/apps/application/swagger_api/application_api.py:360
+#: community/apps/application/swagger_api/application_api.py:361
+msgid "Is speech-to-text enabled"
+msgstr "是否开启语音转文本"
+
+#: community/apps/application/swagger_api/application_api.py:217
+#: community/apps/application/swagger_api/application_api.py:218
+#: community/apps/application/swagger_api/application_api.py:362
+#: community/apps/application/swagger_api/application_api.py:363
+msgid "Is text-to-speech enabled"
+msgstr "是否开启文本转语音"
+
+#: community/apps/application/swagger_api/application_api.py:219
+#: community/apps/application/swagger_api/application_api.py:220
+#: community/apps/application/swagger_api/application_api.py:364
+#: community/apps/application/swagger_api/application_api.py:365
+msgid "Text-to-speech type"
+msgstr "文本转语音类型"
+
+#: community/apps/application/swagger_api/application_api.py:233
+msgid "Node List"
+msgstr "节点列表"
+
+#: community/apps/application/swagger_api/application_api.py:236
+msgid "Connection List"
+msgstr "连线列表"
+
+#: community/apps/application/swagger_api/application_api.py:266
+msgid "state"
+msgstr "状态"
+
+#: community/apps/application/swagger_api/application_api.py:268
+msgid "ai_questioning|designated_answer"
+msgstr "ai作答|指定答案"
+
+#: community/apps/application/swagger_api/application_api.py:273
+msgid ""
+"ai_questioning: is the title, designated_answer: is the designated answer "
+"content"
+msgstr "ai作答:就是题词,指定回答:就是指定回答内容"
+
+#: community/apps/application/swagger_api/application_api.py:403
+#: community/apps/function_lib/swagger_api/function_lib_api.py:216
+msgid "Upload image files"
+msgstr "上传图片文件"
+
+#: community/apps/application/swagger_api/application_api.py:434
+#: community/apps/application/swagger_api/application_api.py:435
+msgid "Text"
+msgstr "文本"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:41
+#: community/apps/application/swagger_api/application_statistics_api.py:42
+#: community/apps/application/swagger_api/chat_api.py:490
+#: community/apps/application/swagger_api/chat_api.py:491
+msgid "Number of Likes"
+msgstr "点赞数"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:44
+#: community/apps/application/swagger_api/chat_api.py:492
+#: community/apps/application/swagger_api/chat_api.py:493
+msgid "Number of thumbs-downs"
+msgstr "点踩数"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:45
+#: community/apps/application/swagger_api/application_statistics_api.py:46
+msgid "Number of tokens used"
+msgstr "token使用数量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:47
+#: community/apps/application/swagger_api/application_statistics_api.py:48
+msgid "Number of conversations"
+msgstr "对话次数"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:49
+#: community/apps/application/swagger_api/application_statistics_api.py:50
+msgid "Number of customers"
+msgstr "客户数量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:51
+#: community/apps/application/swagger_api/application_statistics_api.py:52
+msgid "Number of new customers"
+msgstr "客户新增数量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:54
+#: community/apps/application/swagger_api/application_statistics_api.py:69
+#: community/apps/application/swagger_api/application_statistics_api.py:70
+msgid "time"
+msgstr "日期"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:55
+msgid "Time, this field is only available when querying trends"
+msgstr "日期,只有查询趋势的时候才有该字段"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:66
+#: community/apps/application/swagger_api/application_statistics_api.py:83
+msgid "New quantity"
+msgstr "新增数量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:81
+#: community/apps/application/swagger_api/application_statistics_api.py:82
+msgid "Today's new quantity"
+msgstr "今日新增数量"
+
+#: community/apps/application/swagger_api/application_version_api.py:26
+#: community/apps/application/swagger_api/application_version_api.py:27
+msgid "Workflow data"
+msgstr "工作流数据"
+
+#: community/apps/application/swagger_api/application_version_api.py:61
+msgid "Application version id"
+msgstr "应用版本 id"
+
+#: community/apps/application/swagger_api/chat_api.py:61
+#: community/apps/application/swagger_api/chat_api.py:62
+#: community/apps/application/swagger_api/chat_api.py:92
+#: community/apps/dataset/serializers/problem_serializers.py:91
+msgid "problem"
+msgstr "问题"
+
+#: community/apps/application/swagger_api/chat_api.py:68
+msgid "Question content"
+msgstr "问题内容"
+
+#: community/apps/application/swagger_api/chat_api.py:72
+msgid "role"
+msgstr "角色"
+
+#: community/apps/application/swagger_api/chat_api.py:77
+#: community/apps/application/swagger_api/chat_api.py:93
+msgid "regenerate"
+msgstr "重新生成"
+
+#: community/apps/application/swagger_api/chat_api.py:79
+msgid "Stream Output"
+msgstr "流式输出"
+
+#: community/apps/application/swagger_api/chat_api.py:94
+msgid "Is it streaming output"
+msgstr "是否流式输出"
+
+#: community/apps/application/swagger_api/chat_api.py:96
+#: community/apps/application/swagger_api/chat_api.py:97
+#| msgid "Form Data"
+msgid "Form data"
+msgstr "表单数据"
+
+#: community/apps/application/swagger_api/chat_api.py:101
+#: community/apps/application/swagger_api/chat_api.py:102
+#| msgid "state list"
+msgid "Image list"
+msgstr "图片列表"
+
+#: community/apps/application/swagger_api/chat_api.py:107
+msgid "Image name"
+msgstr "图片名称"
+
+#: community/apps/application/swagger_api/chat_api.py:109
+msgid "Image URL"
+msgstr "图片地址"
+
+#: community/apps/application/swagger_api/chat_api.py:115
+#: community/apps/application/swagger_api/chat_api.py:116
+#: community/apps/dataset/views/document.py:133
+#: community/apps/dataset/views/document.py:134
+msgid "Document list"
+msgstr "文档列表"
+
+#: community/apps/application/swagger_api/chat_api.py:122
+msgid "Document name"
+msgstr "文档名称"
+
+#: community/apps/application/swagger_api/chat_api.py:124
+msgid "Document URL"
+msgstr "文档地址"
+
+#: community/apps/application/swagger_api/chat_api.py:129
+#: community/apps/application/swagger_api/chat_api.py:130
+#| msgid "id list"
+msgid "Audio list"
+msgstr "音频列表"
+
+#: community/apps/application/swagger_api/chat_api.py:135
+msgid "Audio name"
+msgstr "音频名称"
+
+#: community/apps/application/swagger_api/chat_api.py:137
+msgid "Audio URL"
+msgstr "音频地址"
+
+#: community/apps/application/swagger_api/chat_api.py:145
+#: community/apps/application/swagger_api/chat_api.py:146
+msgid "Node data"
+msgstr "节点数据"
+
+#: community/apps/application/swagger_api/chat_api.py:151
+#: community/apps/application/swagger_api/chat_api.py:152
+msgid "Child node"
+msgstr "子节点数据"
+
+#: community/apps/application/swagger_api/chat_api.py:173
+#: community/apps/application/swagger_api/chat_api.py:174
+msgid "Number of dialogue questions"
+msgstr "对话提问数量"
+
+#: community/apps/application/swagger_api/chat_api.py:176
+#: community/apps/application/swagger_api/chat_api.py:177
+msgid "Number of tags"
+msgstr "标记数量"
+
+#: community/apps/application/swagger_api/chat_api.py:178
+#: community/apps/application/swagger_api/chat_api.py:179
+#: community/apps/common/swagger_api/common_api.py:64
+#: community/apps/common/swagger_api/common_api.py:65
+#: community/apps/dataset/serializers/paragraph_serializers.py:711
+#: community/apps/dataset/serializers/paragraph_serializers.py:712
+msgid "Number of likes"
+msgstr "点赞数量"
+
+#: community/apps/application/swagger_api/chat_api.py:180
+#: community/apps/application/swagger_api/chat_api.py:181
+msgid "Number of clicks"
+msgstr "点踩数量"
+
+#: community/apps/application/swagger_api/chat_api.py:182
+#: community/apps/application/swagger_api/chat_api.py:183
+msgid "Change time"
+msgstr "修改时间"
+
+#: community/apps/application/swagger_api/chat_api.py:224
+msgid "Application ID, pass when modifying, do not pass when creating"
+msgstr "应用id,修改的时候传,创建的时候不传"
+
+#: community/apps/application/swagger_api/chat_api.py:225
+#: community/apps/application/swagger_api/chat_api.py:226
+msgid "Model ID"
+msgstr "模型 ID"
+
+#: community/apps/application/swagger_api/chat_api.py:232
+#: community/apps/application/swagger_api/chat_api.py:234
+msgid "Do you want to initiate multiple sessions"
+msgstr "是否开启多轮会话"
+
+#: community/apps/application/swagger_api/chat_api.py:237
+msgid "Problem optimization"
+msgstr "问题优化"
+
+#: community/apps/application/swagger_api/chat_api.py:238
+msgid "Do you want to enable problem optimization"
+msgstr "是否开启问题优化"
+
+#: community/apps/application/swagger_api/chat_api.py:254
+msgid "Historical days"
+msgstr "历史天数"
+
+#: community/apps/application/swagger_api/chat_api.py:262
+msgid "or|and comparator"
+msgstr "or|and 比较器"
+
+#: community/apps/application/swagger_api/chat_api.py:266
+#| msgid "Start time"
+msgid "start time"
+msgstr "开始时间"
+
+#: community/apps/application/swagger_api/chat_api.py:291
+msgid "Is it ascending order"
+msgstr "是否升序"
+
+#: community/apps/application/swagger_api/chat_api.py:304
+msgid "Session log id"
+msgstr "会话日志 id"
+
+#: community/apps/application/swagger_api/chat_api.py:305
+msgid "Conversation log id"
+msgstr "对话日志 ID"
+
+#: community/apps/application/swagger_api/chat_api.py:306
+#: community/apps/application/swagger_api/chat_api.py:307
+#: community/apps/application/swagger_api/chat_api.py:446
+msgid "Voting Status"
+msgstr "投票状态"
+
+#: community/apps/application/swagger_api/chat_api.py:308
+#: community/apps/application/swagger_api/chat_api.py:309
+msgid "Dataset id"
+msgstr "数据集 id"
+
+#: community/apps/application/swagger_api/chat_api.py:312
+#: community/apps/application/swagger_api/chat_api.py:313
+msgid "Resource ID"
+msgstr "资源 ID"
+
+#: community/apps/application/swagger_api/chat_api.py:314
+#: community/apps/application/swagger_api/chat_api.py:315
+msgid "Resource Type"
+msgstr "资源类型"
+
+#: community/apps/application/swagger_api/chat_api.py:317
+#: community/apps/application/swagger_api/chat_api.py:318
+msgid "Number of tokens consumed by the question"
+msgstr "问题消耗 token 数量"
+
+#: community/apps/application/swagger_api/chat_api.py:320
+#: community/apps/application/swagger_api/chat_api.py:321
+msgid "The number of tokens consumed by the answer"
+msgstr "答案消耗 token 数量"
+
+#: community/apps/application/swagger_api/chat_api.py:324
+#: community/apps/application/swagger_api/chat_api.py:325
+msgid "Improved annotation list"
+msgstr "改进标注列表"
+
+#: community/apps/application/swagger_api/chat_api.py:328
+msgid "Corresponding session Corresponding subscript"
+msgstr "对应会话对应下标"
+
+#: community/apps/application/swagger_api/chat_api.py:329
+msgid "Corresponding session id corresponding subscript"
+msgstr "对应会话id对应下标"
+
+#: community/apps/application/swagger_api/chat_api.py:397
+#: community/apps/application/swagger_api/chat_api.py:398
+msgid "Conversation id list"
+msgstr "会话 id 列表"
+
+#: community/apps/application/swagger_api/chat_api.py:447
+msgid "-1: Cancel vote | 0: Agree | 1: Oppose"
+msgstr "-1:取消投票|0:赞同|1:反对"
+
+#: community/apps/application/swagger_api/chat_api.py:485
+#: community/apps/application/swagger_api/chat_api.py:486
+#: community/apps/common/swagger_api/common_api.py:59
+#: community/apps/common/swagger_api/common_api.py:60
+#: community/apps/dataset/serializers/paragraph_serializers.py:687
+#: community/apps/dataset/serializers/paragraph_serializers.py:707
+#: community/apps/dataset/serializers/paragraph_serializers.py:708
+msgid "title"
+msgstr "标题"
+
+#: community/apps/application/swagger_api/chat_api.py:486
+#: community/apps/common/swagger_api/common_api.py:60
+msgid "Description of xxx"
+msgstr "xxx 描述"
+
+#: community/apps/application/swagger_api/chat_api.py:487
+#: community/apps/application/swagger_api/chat_api.py:488
+#: community/apps/common/swagger_api/common_api.py:61
+#: community/apps/common/swagger_api/common_api.py:62
+msgid "Number of hits"
+msgstr "命中数量"
+
+#: community/apps/application/views/application_version_views.py:28
+#: community/apps/application/views/application_version_views.py:29
+#: community/apps/application/views/application_views.py:489
+#: community/apps/application/views/application_views.py:490
+msgid "Get the application list"
+msgstr "获取应用列表"
+
+#: community/apps/application/views/application_version_views.py:32
+#: community/apps/application/views/application_version_views.py:50
+#: community/apps/application/views/application_version_views.py:68
+#: community/apps/application/views/application_version_views.py:83
+msgid "Application/Version"
+msgstr "应用/版本"
+
+#: community/apps/application/views/application_version_views.py:45
+#: community/apps/application/views/application_version_views.py:46
+msgid "Get the list of application versions by page"
+msgstr "获取应用版本列表分页"
+
+#: community/apps/application/views/application_version_views.py:64
+#: community/apps/application/views/application_version_views.py:65
+msgid "Get application version details"
+msgstr "获取应用版本详情"
+
+#: community/apps/application/views/application_version_views.py:78
+#: community/apps/application/views/application_version_views.py:79
+msgid "Modify application version information"
+msgstr "修改应用版本信息"
+
+#: community/apps/application/views/application_views.py:42
+#: community/apps/application/views/application_views.py:43
+msgid "User Statistics"
+msgstr "用户统计"
+
+#: community/apps/application/views/application_views.py:44
+#: community/apps/application/views/application_views.py:70
+#: community/apps/application/views/application_views.py:95
+#: community/apps/application/views/application_views.py:121
+msgid "Application/Statistics"
+msgstr "应用/统计"
+
+#: community/apps/application/views/application_views.py:68
+#: community/apps/application/views/application_views.py:69
+msgid "User demographic trends"
+msgstr "用户统计趋势"
+
+#: community/apps/application/views/application_views.py:93
+#: community/apps/application/views/application_views.py:94
+msgid "Conversation statistics"
+msgstr "对话相关统计"
+
+#: community/apps/application/views/application_views.py:119
+#: community/apps/application/views/application_views.py:120
+msgid "Dialogue-related statistical trends"
+msgstr "对话相关统计趋势"
+
+#: community/apps/application/views/application_views.py:150
+#: community/apps/application/views/application_views.py:151
+msgid "Modify application icon"
+msgstr "修改应用图标"
+
+#: community/apps/application/views/application_views.py:152
+#: community/apps/application/views/application_views.py:175
+#: community/apps/application/views/application_views.py:189
+#: community/apps/application/views/application_views.py:202
+#: community/apps/application/views/application_views.py:216
+#: community/apps/application/views/application_views.py:236
+#: community/apps/application/views/application_views.py:255
+#: community/apps/application/views/application_views.py:274
+#: community/apps/application/views/application_views.py:313
+#: community/apps/application/views/application_views.py:482
+#: community/apps/application/views/application_views.py:493
+#: community/apps/application/views/application_views.py:508
+#: community/apps/application/views/application_views.py:535
+#: community/apps/application/views/application_views.py:555
+#: community/apps/application/views/application_views.py:575
+#: community/apps/application/views/application_views.py:593
+#: community/apps/application/views/application_views.py:614
+#: community/apps/application/views/application_views.py:635
+#: community/apps/application/views/application_views.py:670
+msgid "Application"
+msgstr "应用"
+
+#: community/apps/application/views/application_views.py:173
+msgid "Import Application"
+msgstr "导入应用"
+
+#: community/apps/application/views/application_views.py:187
+msgid "Export Application"
+msgstr "导出应用"
+
+#: community/apps/application/views/application_views.py:200
+#: community/apps/application/views/application_views.py:201
+msgid "Get embedded js"
+msgstr "获取嵌入 js"
+
+#: community/apps/application/views/application_views.py:214
+#: community/apps/application/views/application_views.py:215
+msgid "Get a list of models"
+msgstr "获取模型列表"
+
+#: community/apps/application/views/application_views.py:234
+#: community/apps/application/views/application_views.py:235
+#: community/apps/setting/views/model.py:100
+#: community/apps/setting/views/model.py:101
+msgid "Get model parameter form"
+msgstr "获取模型参数表单"
+
+#: community/apps/application/views/application_views.py:253
+#: community/apps/application/views/application_views.py:254
+msgid "Get a list of function libraries"
+msgstr "获取函数库列表"
+
+#: community/apps/application/views/application_views.py:272
+#: community/apps/application/views/application_views.py:273
+msgid "Get library details"
+msgstr "获取函数库详情"
+
+#: community/apps/application/views/application_views.py:292
+#: community/apps/application/views/application_views.py:293
+msgid "Get the list of apps created by the current user"
+msgstr "获取当前用户创建的应用列表"
+
+#: community/apps/application/views/application_views.py:294
+#: community/apps/application/views/application_views.py:333
+#: community/apps/application/views/chat_views.py:74
+#: community/apps/application/views/chat_views.py:93
+#: community/apps/application/views/chat_views.py:105
+#: community/apps/application/views/chat_views.py:118
+#: community/apps/application/views/chat_views.py:347
+msgid "Application/Chat"
+msgstr "应用/对话"
+
+#: community/apps/application/views/application_views.py:311
+#: community/apps/application/views/application_views.py:312
+msgid "Get application data"
+msgstr "获取应用数据"
+
+#: community/apps/application/views/application_views.py:331
+#: community/apps/application/views/application_views.py:332
+msgid "Get application related information"
+msgstr "获取应用相关信息"
+
+#: community/apps/application/views/application_views.py:346
+#: community/apps/application/views/application_views.py:347
+msgid "Add ApiKey"
+msgstr "添加 ApiKey"
+
+#: community/apps/application/views/application_views.py:348
+#: community/apps/application/views/application_views.py:364
+#: community/apps/application/views/application_views.py:383
+#: community/apps/application/views/application_views.py:402
+msgid "Application/API_KEY"
+msgstr "应用/API_KEY"
+
+#: community/apps/application/views/application_views.py:362
+#: community/apps/application/views/application_views.py:363
+msgid "Get the application API_KEY list"
+msgstr "获取应用 API_KEY 列表"
+
+#: community/apps/application/views/application_views.py:381
+#: community/apps/application/views/application_views.py:382
+msgid "Modify application API_KEY"
+msgstr "修改应用 API_KEY"
+
+#: community/apps/application/views/application_views.py:400
+#: community/apps/application/views/application_views.py:401
+msgid "Delete Application API_KEY"
+msgstr "删除应用 API_KEY"
+
+#: community/apps/application/views/application_views.py:421
+#: community/apps/application/views/application_views.py:422
+msgid "Modify Application AccessToken"
+msgstr "修改应用访问限制"
+
+#: community/apps/application/views/application_views.py:423
+#: community/apps/application/views/application_views.py:441
+msgid "Application/Public Access"
+msgstr "应用/公共访问"
+
+#: community/apps/application/views/application_views.py:438
+#: community/apps/application/views/application_views.py:439
+msgid "Get the application AccessToken information"
+msgstr "获取应用 AccessToken 信息"
+
+#: community/apps/application/views/application_views.py:462
+#: community/apps/application/views/application_views.py:463
+msgid "Application Certification"
+msgstr "应用认证"
+
+#: community/apps/application/views/application_views.py:465
+msgid "Application/Certification"
+msgstr "应用/认证"
+
+#: community/apps/application/views/application_views.py:479
+#: community/apps/application/views/application_views.py:480
+msgid "Create an application"
+msgstr "创建应用"
+
+#: community/apps/application/views/application_views.py:505
+msgid "Hit Test List"
+msgstr "命中测试列表"
+
+#: community/apps/application/views/application_views.py:530
+#: community/apps/application/views/application_views.py:531
+msgid "Publishing an application"
+msgstr "发布应用"
+
+#: community/apps/application/views/application_views.py:551
+#: community/apps/application/views/application_views.py:552
+msgid "Deleting application"
+msgstr "删除应用"
+
+#: community/apps/application/views/application_views.py:570
+#: community/apps/application/views/application_views.py:571
+msgid "Modify the application"
+msgstr "修改应用"
+
+#: community/apps/application/views/application_views.py:589
+#: community/apps/application/views/application_views.py:590
+msgid "Get application details"
+msgstr "获取应用详情"
+
+#: community/apps/application/views/application_views.py:609
+#: community/apps/application/views/application_views.py:610
+msgid "Get the knowledge base available to the current application"
+msgstr "获取当前应用可用的知识库"
+
+#: community/apps/application/views/application_views.py:630
+#: community/apps/application/views/application_views.py:631
+msgid "Get the application list by page"
+msgstr "获取应用列表分页"
+
+#: community/apps/application/views/application_views.py:665
+#: community/apps/application/views/application_views.py:666
+msgid "text to speech"
+msgstr "文本转语音类型"
+
+#: community/apps/application/views/chat_views.py:36
+#: community/apps/application/views/chat_views.py:37
+msgid "OpenAI Interface Dialogue"
+msgstr "openai接口对话"
+
+#: community/apps/application/views/chat_views.py:39
+msgid "OpenAI Dialogue"
+msgstr "openai对话"
+
+#: community/apps/application/views/chat_views.py:52
+#: community/apps/application/views/chat_views.py:53
+msgid "Export conversation"
+msgstr "导出对话"
+
+#: community/apps/application/views/chat_views.py:55
+#: community/apps/application/views/chat_views.py:156
+#: community/apps/application/views/chat_views.py:174
+#: community/apps/application/views/chat_views.py:197
+#: community/apps/application/views/chat_views.py:217
+#: community/apps/application/views/chat_views.py:235
+#: community/apps/application/views/chat_views.py:257
+#: community/apps/application/views/chat_views.py:282
+#: community/apps/application/views/chat_views.py:302
+#: community/apps/application/views/chat_views.py:324
+#: community/apps/application/views/chat_views.py:489
+msgid "Application/Conversation Log"
+msgstr "应用/对话日志"
+
+#: community/apps/application/views/chat_views.py:71
+#: community/apps/application/views/chat_views.py:72
+msgid "Get the session id according to the application id"
+msgstr "获取应用id对应的会话id"
+
+#: community/apps/application/views/chat_views.py:90
+#: community/apps/application/views/chat_views.py:91
+msgid "Get the workflow temporary session id"
+msgstr "获取工作流临时会话id"
+
+#: community/apps/application/views/chat_views.py:102
+#: community/apps/application/views/chat_views.py:103
+msgid "Get a temporary session id"
+msgstr "获取临时会话id"
+
+#: community/apps/application/views/chat_views.py:115
+#: community/apps/application/views/chat_views.py:116
+msgid "dialogue"
+msgstr "对话"
+
+#: community/apps/application/views/chat_views.py:152
+#: community/apps/application/views/chat_views.py:153
+msgid "Get the conversation list"
+msgstr "获取对话列表"
+
+#: community/apps/application/views/chat_views.py:172
+#: community/apps/application/views/chat_views.py:173
+msgid "Delete a conversation"
+msgstr "删除对话"
+
+#: community/apps/application/views/chat_views.py:192
+#: community/apps/application/views/chat_views.py:193
+msgid "Get client conversation list by paging"
+msgstr "获取客户对话列表分页"
+
+#: community/apps/application/views/chat_views.py:215
+#: community/apps/application/views/chat_views.py:216
+msgid "Client deletes conversation"
+msgstr "客户端删除对话"
+
+#: community/apps/application/views/chat_views.py:232
+#: community/apps/application/views/chat_views.py:233
+msgid "Client modifies dialogue summary"
+msgstr "客户端修改对话摘要"
+
+#: community/apps/application/views/chat_views.py:253
+#: community/apps/application/views/chat_views.py:254
+msgid "Get the conversation list by page"
+msgstr "获取对话列表分页"
+
+#: community/apps/application/views/chat_views.py:278
+#: community/apps/application/views/chat_views.py:279
+msgid "Get conversation record details"
+msgstr "获取对话记录详情"
+
+#: community/apps/application/views/chat_views.py:298
+#: community/apps/application/views/chat_views.py:299
+msgid "Get a list of conversation records"
+msgstr "获取对话记录列表"
+
+#: community/apps/application/views/chat_views.py:319
+#: community/apps/application/views/chat_views.py:320
+msgid "Get the conversation history list by page"
+msgstr "获取对话历史列表分页"
+
+#: community/apps/application/views/chat_views.py:342
+#: community/apps/application/views/chat_views.py:343
+msgid "Like, Dislike"
+msgstr "点赞,点踩"
+
+#: community/apps/application/views/chat_views.py:365
+#: community/apps/application/views/chat_views.py:366
+msgid "Get the list of marked paragraphs"
+msgstr "获取标记段落列表"
+
+#: community/apps/application/views/chat_views.py:369
+#: community/apps/application/views/chat_views.py:390
+#: community/apps/application/views/chat_views.py:442
+msgid "Application/Conversation Log/Annotation"
+msgstr "应用/对话日志/标注"
+
+#: community/apps/application/views/chat_views.py:412
+#: community/apps/application/views/chat_views.py:413
+msgid "Add to Knowledge Base"
+msgstr "添加到知识库"
+
+#: community/apps/application/views/chat_views.py:416
+msgid "Application/Conversation Log/Add to Knowledge Base"
+msgstr "应用/对话日志/添加到知识库"
+
+#: community/apps/application/views/chat_views.py:438
+#: community/apps/application/views/chat_views.py:439
+msgid "Delete a Annotation"
+msgstr "删除标注"
+
+#: community/apps/application/views/chat_views.py:487
+#: community/apps/dataset/views/file.py:28
+#: community/apps/dataset/views/file.py:29
+#: community/apps/dataset/views/file.py:34
+msgid "Upload file"
+msgstr "上传文件"
+
+#: community/apps/common/auth/authenticate.py:62
+#: community/apps/common/auth/authenticate.py:83
+msgid "Not logged in, please log in first"
+msgstr "未登录,请先登录"
+
+#: community/apps/common/auth/authenticate.py:68
+#: community/apps/common/auth/authenticate.py:74
+#: community/apps/common/auth/authenticate.py:89
+#: community/apps/common/auth/authenticate.py:95
+msgid "Authentication information is incorrect! illegal user"
+msgstr "非法用户!认证信息不正确"
+
+#: community/apps/common/auth/authentication.py:94
+msgid "No permission to access"
+msgstr "没有权限访问"
+
+#: community/apps/common/auth/handle/impl/application_key.py:23
+#: community/apps/common/auth/handle/impl/application_key.py:25
+msgid "Secret key is invalid"
+msgstr "secret key无效"
+
+#: community/apps/common/auth/handle/impl/public_access_token.py:48
+#: community/apps/common/auth/handle/impl/public_access_token.py:50
+#: community/apps/common/auth/handle/impl/public_access_token.py:52
+#: community/apps/common/auth/handle/impl/public_access_token.py:54
+msgid "Authentication information is incorrect"
+msgstr "认证信息不正确"
+
+#: community/apps/common/auth/handle/impl/user_token.py:34
+msgid "Login expired"
+msgstr "登录过期"
+
+#: community/apps/common/constants/exception_code_constants.py:31
+msgid "The username or password is incorrect"
+msgstr "用户名或密码错误"
+
+#: community/apps/common/constants/exception_code_constants.py:32
+msgid "Please log in first and bring the user Token"
+msgstr "请先登录并携带用户Token"
+
+#: community/apps/common/constants/exception_code_constants.py:33
+#: community/apps/users/serializers/user_serializers.py:429
+msgid "Email sending failed"
+msgstr "邮箱发送失败"
+
+#: community/apps/common/constants/exception_code_constants.py:34
+msgid "Email format error"
+msgstr "邮箱格式错误"
+
+#: community/apps/common/constants/exception_code_constants.py:35
+msgid "The email has been registered, please log in directly"
+msgstr "邮箱已注册,请直接登录"
+
+#: community/apps/common/constants/exception_code_constants.py:36
+msgid "The email is not registered, please register first"
+msgstr "邮箱未注册,请先注册"
+
+#: community/apps/common/constants/exception_code_constants.py:38
+msgid "The verification code is incorrect or the verification code has expired"
+msgstr "验证码错误或验证码已过期"
+
+#: community/apps/common/constants/exception_code_constants.py:39
+msgid "The username has been registered, please log in directly"
+msgstr "用户名已注册,请直接登录"
+
+#: community/apps/common/constants/exception_code_constants.py:41
+msgid ""
+"The username cannot be empty and must be between 6 and 20 characters long."
+msgstr "用户名不能为空,且长度必须在6-20个字符之间。"
+
+#: community/apps/common/constants/exception_code_constants.py:43
+msgid "Password and confirmation password are inconsistent"
+msgstr "密码和确认密码不一致"
+
+#: community/apps/common/constants/permission_constants.py:61
+msgid "ADMIN"
+msgstr "管理员"
+
+#: community/apps/common/constants/permission_constants.py:61
+msgid "Admin, prefabs are not currently used"
+msgstr "管理员,预制目前不会使用"
+
+#: community/apps/common/constants/permission_constants.py:62
+msgid "USER"
+msgstr "用户"
+
+#: community/apps/common/constants/permission_constants.py:62
+msgid "All user permissions"
+msgstr "所有用户权限"
+
+#: community/apps/common/constants/permission_constants.py:63
+msgid "chat"
+msgstr "对话"
+
+#: community/apps/common/constants/permission_constants.py:63
+msgid "Only has application dialog interface permissions"
+msgstr "只拥有应用对话接口权限"
+
+#: community/apps/common/constants/permission_constants.py:64
+msgid "Apply private key"
+msgstr "应用私钥"
+
+#: community/apps/common/event/__init__.py:30
+msgid "The download process was interrupted, please try again"
+msgstr "下载过程中断,请重试"
+
+#: community/apps/common/event/listener_manage.py:91
+#, python-brace-format
+msgid "Query vector data: {paragraph_id_list} error {error} {traceback}"
+msgstr "向量数据查询: {paragraph_id_list} 错误 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:96
+#, python-brace-format
+msgid "Start--->Embedding paragraph: {paragraph_id_list}"
+msgstr "开始--->嵌入段落: {paragraph_id_list}"
+
+#: community/apps/common/event/listener_manage.py:108
+#, python-brace-format
+msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}"
+msgstr "向量化段落: {paragraph_id_list} 错误 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:114
+#, python-brace-format
+msgid "End--->Embedding paragraph: {paragraph_id_list}"
+msgstr "结束--->嵌入段落: {paragraph_id_list}"
+
+#: community/apps/common/event/listener_manage.py:123
+#, python-brace-format
+msgid "Start--->Embedding paragraph: {paragraph_id}"
+msgstr "开始--->嵌入段落: {paragraph_id}"
+
+#: community/apps/common/event/listener_manage.py:148
+#, python-brace-format
+msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}"
+msgstr "向量化段落: {paragraph_id} 错误 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:153
+#, python-brace-format
+msgid "End--->Embedding paragraph: {paragraph_id}"
+msgstr "结束--->嵌入段落: {paragraph_id}"
+
+#: community/apps/common/event/listener_manage.py:269
+#, python-brace-format
+msgid "Start--->Embedding document: {document_id}"
+msgstr "开始--->嵌入文档: {document_id}"
+
+#: community/apps/common/event/listener_manage.py:291
+#, python-brace-format
+msgid "Vectorized document: {document_id} error {error} {traceback}"
+msgstr "向量化文档: {document_id} 错误 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:296
+#, python-brace-format
+msgid "End--->Embedding document: {document_id}"
+msgstr "结束--->嵌入文档: {document_id}"
+
+#: community/apps/common/event/listener_manage.py:307
+#, python-brace-format
+msgid "Start--->Embedding dataset: {dataset_id}"
+msgstr "开始--->嵌入知识库: {dataset_id}"
+
+#: community/apps/common/event/listener_manage.py:311
+#, python-brace-format
+msgid "Start--->Embedding document: {document_list}"
+msgstr "开始--->嵌入文档: {document_list}"
+
+#: community/apps/common/event/listener_manage.py:315
+#: community/apps/embedding/task/embedding.py:123
+#, python-brace-format
+msgid "Vectorized dataset: {dataset_id} error {error} {traceback}"
+msgstr "向量化知识库: {dataset_id} 错误 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:318
+#, python-brace-format
+msgid "End--->Embedding dataset: {dataset_id}"
+msgstr "结束--->嵌入知识库: {dataset_id}"
+
+#: community/apps/common/field/common.py:45
+msgid "not a function"
+msgstr "不是函数"
+
+#: community/apps/common/forms/base_field.py:64
+#, python-brace-format
+msgid "The field {field_label} is required"
+msgstr "字段 {field_label} 是必填的"
+
+#: community/apps/common/forms/slider_field.py:56
+#, python-brace-format
+msgid "The {field_label} cannot be less than {min}"
+msgstr "字段 {field_label} 不能小于 {min}"
+
+#: community/apps/common/forms/slider_field.py:62
+#, python-brace-format
+msgid "The {field_label} cannot be greater than {max}"
+msgstr "{field_label} 不能大于 {max}"
+
+#: community/apps/common/handle/handle_exception.py:30
+msgid "Unknown exception"
+msgstr "未知异常"
+
+#: community/apps/common/handle/impl/pdf_split_handle.py:278
+#, python-brace-format
+msgid "This document has no preface and is treated as ordinary text: {e}"
+msgstr "文档没有前言,视为普通文本: {e}"
+
+#: community/apps/common/init/init_doc.py:26
+#: community/apps/common/init/init_doc.py:45
+msgid "Intelligent customer service platform"
+msgstr "智能客服平台"
+
+#: community/apps/common/job/clean_chat_job.py:25
+msgid "start clean chat log"
+msgstr "开始清理对话日志"
+
+#: community/apps/common/job/clean_chat_job.py:71
+msgid "end clean chat log"
+msgstr "结束清理对话日志"
+
+#: community/apps/common/job/clean_debug_file_job.py:21
+msgid "start clean debug file"
+msgstr "开始清理调试文件"
+
+#: community/apps/common/job/clean_debug_file_job.py:25
+msgid "end clean debug file"
+msgstr "结束清理调试文件"
+
+#: community/apps/common/job/client_access_num_job.py:25
+msgid "start reset access_num"
+msgstr "开始重置访问次数"
+
+#: community/apps/common/job/client_access_num_job.py:27
+msgid "end reset access_num"
+msgstr "结束重置访问次数"
+
+#: community/apps/common/log/log.py:37
+msgid "unknown"
+msgstr "未知的"
+
+#: community/apps/common/response/result.py:24
+msgid "Success"
+msgstr "成功"
+
+#: community/apps/common/response/result.py:36
+#: community/apps/common/response/result.py:80
+#: community/apps/common/response/result.py:82
+msgid "current page"
+msgstr "当前页"
+
+#: community/apps/common/response/result.py:42
+#: community/apps/common/response/result.py:85
+#: community/apps/common/response/result.py:87
+msgid "page size"
+msgstr "每页数量"
+
+#: community/apps/common/response/result.py:53
+#: community/apps/common/response/result.py:101
+#: community/apps/common/response/result.py:130
+msgid "response parameters"
+msgstr "响应参数"
+
+#: community/apps/common/response/result.py:59
+#: community/apps/common/response/result.py:107
+#: community/apps/common/response/result.py:136
+msgid "response code"
+msgstr "响应码"
+
+#: community/apps/common/response/result.py:61
+#: community/apps/common/response/result.py:109
+#: community/apps/common/response/result.py:138
+msgid "success:200 fail:other"
+msgstr "成功:200 失败:其他"
+
+#: community/apps/common/response/result.py:64
+#: community/apps/common/response/result.py:112
+#: community/apps/common/response/result.py:141
+msgid "prompt"
+msgstr "提示"
+
+#: community/apps/common/response/result.py:65
+#: community/apps/common/response/result.py:113
+#: community/apps/common/response/result.py:142
+msgid "success"
+msgstr "成功"
+
+#: community/apps/common/response/result.py:66
+#: community/apps/common/response/result.py:114
+#: community/apps/common/response/result.py:143
+msgid "error prompt"
+msgstr "错误提示"
+
+#: community/apps/common/response/result.py:72
+#: community/apps/common/response/result.py:74
+msgid "total number of data"
+msgstr "总条数"
+
+#: community/apps/common/swagger_api/common_api.py:24
+#: community/apps/dataset/serializers/dataset_serializers.py:569
+msgid "query text"
+msgstr "查询文本"
+
+#: community/apps/common/swagger_api/common_api.py:42
+msgid "Retrieval pattern embedding|keywords|blend"
+msgstr "检索模式 embedding|keywords|blend"
+
+#: community/apps/common/swagger_api/common_api.py:66
+#: community/apps/common/swagger_api/common_api.py:67
+msgid "Number of clicks and dislikes"
+msgstr "点踩数"
+
+#: community/apps/common/swagger_api/common_api.py:74
+#: community/apps/common/swagger_api/common_api.py:75
+msgid "relevance score"
+msgstr "相关性得分"
+
+#: community/apps/common/swagger_api/common_api.py:76
+#: community/apps/common/swagger_api/common_api.py:77
+msgid "Comprehensive score, used for ranking"
+msgstr "综合得分,用于排序"
+
+#: community/apps/common/swagger_api/common_api.py:78
+#: community/apps/common/swagger_api/common_api.py:79
+#: community/apps/users/serializers/user_serializers.py:591
+#: community/apps/users/serializers/user_serializers.py:592
+msgid "Update time"
+msgstr "更新时间"
+
+#: community/apps/common/swagger_api/common_api.py:81
+#: community/apps/common/swagger_api/common_api.py:82
+#: community/apps/users/serializers/user_serializers.py:589
+#: community/apps/users/serializers/user_serializers.py:590
+msgid "Create time"
+msgstr "创建时间"
+
+#: community/apps/common/util/common.py:239
+msgid "Text-to-speech node, the text content must be of string type"
+msgstr "文本转语音节点,文本内容必须是字符串类型"
+
+#: community/apps/common/util/common.py:241
+msgid "Text-to-speech node, the text content cannot be empty"
+msgstr "文本转语音节点,文本内容不能为空"
+
+#: community/apps/dataset/serializers/common_serializers.py:87
+msgid "source url"
+msgstr "文档地址"
+
+#: community/apps/dataset/serializers/common_serializers.py:89
+#: community/apps/dataset/serializers/dataset_serializers.py:333
+#: community/apps/dataset/serializers/dataset_serializers.py:390
+#: community/apps/dataset/serializers/dataset_serializers.py:391
+#: community/apps/dataset/serializers/document_serializers.py:155
+#: community/apps/dataset/serializers/document_serializers.py:181
+msgid "selector"
+msgstr "选择器"
+
+#: community/apps/dataset/serializers/common_serializers.py:96
+#: community/apps/dataset/serializers/dataset_serializers.py:341
+#, python-brace-format
+msgid "URL error, cannot parse [{source_url}]"
+msgstr "URL错误,无法解析 [{source_url}]"
+
+#: community/apps/dataset/serializers/common_serializers.py:105
+#: community/apps/dataset/serializers/common_serializers.py:124
+#: community/apps/dataset/serializers/common_serializers.py:125
+#: community/apps/dataset/serializers/document_serializers.py:85
+#: community/apps/dataset/swagger_api/document_api.py:23
+#: community/apps/dataset/swagger_api/document_api.py:24
+#: community/apps/dataset/swagger_api/document_api.py:49
+#: community/apps/dataset/swagger_api/document_api.py:50
+msgid "id list"
+msgstr "id 列表"
+
+#: community/apps/dataset/serializers/common_serializers.py:115
+#, python-brace-format
+msgid "The following id does not exist: {error_id_list}"
+msgstr "id不存在: {error_id_list}"
+
+#: community/apps/dataset/serializers/common_serializers.py:183
+#: community/apps/dataset/serializers/common_serializers.py:207
+msgid "The knowledge base is inconsistent with the vector model"
+msgstr "知识库未向量模型不一致"
+
+#: community/apps/dataset/serializers/common_serializers.py:185
+#: community/apps/dataset/serializers/common_serializers.py:209
+msgid "Knowledge base setting error, please reset the knowledge base"
+msgstr "知识库设置错误,请重新设置知识库"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:109
+#: community/apps/dataset/serializers/dataset_serializers.py:110
+#: community/apps/setting/serializers/model_apply_serializers.py:51
+msgid "model id"
+msgstr "模型 id"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:112
+#: community/apps/dataset/serializers/dataset_serializers.py:114
+msgid "Whether to start multiple rounds of dialogue"
+msgstr "是否开启多轮对话"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:115
+#: community/apps/dataset/serializers/dataset_serializers.py:116
+msgid "opening remarks"
+msgstr "开场白"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:118
+msgid "example"
+msgstr "示例"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:119
+msgid "User id"
+msgstr "用户 id"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:121
+#: community/apps/dataset/serializers/dataset_serializers.py:122
+msgid "Whether to publish"
+msgstr "是否发布"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:124
+#: community/apps/dataset/serializers/dataset_serializers.py:125
+#: community/apps/dataset/serializers/dataset_serializers.py:304
+#: community/apps/dataset/serializers/dataset_serializers.py:305
+#: community/apps/dataset/serializers/dataset_serializers.py:366
+#: community/apps/dataset/serializers/dataset_serializers.py:367
+#: community/apps/dataset/serializers/dataset_serializers.py:511
+#: community/apps/dataset/serializers/dataset_serializers.py:512
+#: community/apps/dataset/serializers/dataset_serializers.py:942
+#: community/apps/dataset/serializers/dataset_serializers.py:943
+#: community/apps/dataset/serializers/document_serializers.py:824
+#: community/apps/dataset/serializers/document_serializers.py:825
+#: community/apps/dataset/serializers/paragraph_serializers.py:200
+#: community/apps/dataset/serializers/paragraph_serializers.py:201
+#: community/apps/dataset/serializers/paragraph_serializers.py:724
+#: community/apps/dataset/serializers/paragraph_serializers.py:725
+#: community/apps/dataset/swagger_api/problem_api.py:33
+#: community/apps/dataset/swagger_api/problem_api.py:34
+#: community/apps/dataset/swagger_api/problem_api.py:135
+#: community/apps/dataset/swagger_api/problem_api.py:136
+#: community/apps/function_lib/swagger_api/function_lib_api.py:32
+#: community/apps/function_lib/swagger_api/function_lib_api.py:33
+msgid "create time"
+msgstr "创建时间"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:127
+#: community/apps/dataset/serializers/dataset_serializers.py:128
+#: community/apps/dataset/serializers/dataset_serializers.py:301
+#: community/apps/dataset/serializers/dataset_serializers.py:302
+#: community/apps/dataset/serializers/dataset_serializers.py:363
+#: community/apps/dataset/serializers/dataset_serializers.py:364
+#: community/apps/dataset/serializers/dataset_serializers.py:508
+#: community/apps/dataset/serializers/dataset_serializers.py:509
+#: community/apps/dataset/serializers/dataset_serializers.py:939
+#: community/apps/dataset/serializers/dataset_serializers.py:940
+#: community/apps/dataset/serializers/document_serializers.py:821
+#: community/apps/dataset/serializers/document_serializers.py:822
+#: community/apps/dataset/serializers/paragraph_serializers.py:197
+#: community/apps/dataset/serializers/paragraph_serializers.py:198
+#: community/apps/dataset/serializers/paragraph_serializers.py:721
+#: community/apps/dataset/serializers/paragraph_serializers.py:722
+#: community/apps/dataset/swagger_api/problem_api.py:30
+#: community/apps/dataset/swagger_api/problem_api.py:31
+#: community/apps/dataset/swagger_api/problem_api.py:132
+#: community/apps/dataset/swagger_api/problem_api.py:133
+#: community/apps/function_lib/swagger_api/function_lib_api.py:34
+#: community/apps/function_lib/swagger_api/function_lib_api.py:35
+msgid "update time"
+msgstr "更新时间"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:257
+#: community/apps/dataset/serializers/dataset_serializers.py:260
+#: community/apps/dataset/serializers/document_serializers.py:211
+#: community/apps/dataset/serializers/document_serializers.py:218
+#: community/apps/dataset/serializers/document_serializers.py:987
+#: community/apps/dataset/serializers/document_serializers.py:1016
+msgid "file list"
+msgstr "文件列表"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:269
+msgid "upload files "
+msgstr "上传文件"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:297
+#: community/apps/dataset/serializers/dataset_serializers.py:298
+#: community/apps/dataset/serializers/dataset_serializers.py:359
+#: community/apps/dataset/serializers/dataset_serializers.py:360
+#: community/apps/dataset/serializers/dataset_serializers.py:504
+#: community/apps/dataset/serializers/dataset_serializers.py:505
+#: community/apps/dataset/serializers/dataset_serializers.py:935
+#: community/apps/dataset/serializers/dataset_serializers.py:936
+#: community/apps/dataset/serializers/document_serializers.py:814
+#: community/apps/dataset/serializers/document_serializers.py:815
+msgid "char length"
+msgstr "字符长度"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:299
+#: community/apps/dataset/serializers/dataset_serializers.py:300
+#: community/apps/dataset/serializers/dataset_serializers.py:361
+#: community/apps/dataset/serializers/dataset_serializers.py:362
+#: community/apps/dataset/serializers/dataset_serializers.py:506
+#: community/apps/dataset/serializers/dataset_serializers.py:507
+#: community/apps/dataset/serializers/dataset_serializers.py:937
+#: community/apps/dataset/serializers/dataset_serializers.py:938
+msgid "document count"
+msgstr "文档数量"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:308
+#: community/apps/dataset/serializers/dataset_serializers.py:309
+#: community/apps/dataset/serializers/dataset_serializers.py:370
+#: community/apps/dataset/serializers/dataset_serializers.py:371
+#: community/apps/dataset/serializers/dataset_serializers.py:515
+#: community/apps/dataset/serializers/dataset_serializers.py:516
+#: community/apps/dataset/serializers/document_serializers.py:290
+#: community/apps/dataset/serializers/document_serializers.py:485
+msgid "document list"
+msgstr "文档列表"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:327
+#: community/apps/dataset/serializers/dataset_serializers.py:388
+#: community/apps/dataset/serializers/dataset_serializers.py:389
+msgid "web source url"
+msgstr "web站点url"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:414
+#: community/apps/setting/serializers/valid_serializers.py:26
+msgid ""
+"The community version supports up to 50 knowledge bases. If you need more "
+"knowledge bases, please contact us (https://fit2cloud.com/)."
+msgstr ""
+"社区版最多支持 50 个知识库,如需拥有更多知识库,请联系我们(https://"
+"fit2cloud.com/)。"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:533
+#: community/apps/dataset/serializers/dataset_serializers.py:534
+msgid "documents"
+msgstr "文档"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:577
+msgid "search mode"
+msgstr "搜索模式"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:582
+#: community/apps/dataset/serializers/dataset_serializers.py:618
+#: community/apps/dataset/serializers/dataset_serializers.py:706
+msgid "id does not exist"
+msgstr "ID 不存在"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:609
+msgid "sync type"
+msgstr "同步类型"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:611
+msgid "The synchronization type only supports:replace|complete"
+msgstr "同步类型只支持:replace|complete"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:620
+#: community/apps/dataset/serializers/document_serializers.py:499
+msgid "Synchronization is only supported for web site types"
+msgstr "只有web站点类型才支持同步"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:694
+msgid ""
+"Synchronization type->replace: replacement synchronization, complete: "
+"complete synchronization"
+msgstr "同步类型->replace:替换同步,complete:完整同步"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:803
+#: community/apps/dataset/serializers/document_serializers.py:748
+#: community/apps/setting/models_provider/tools.py:25
+msgid "No permission to use this model"
+msgstr "无权限使用该模型"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:815
+msgid "Failed to send the vectorization task, please try again later!"
+msgstr "向量化任务发送失败,请稍后再试!"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:911
+#: community/apps/dataset/serializers/document_serializers.py:846
+msgid "meta"
+msgstr "知识库元数据"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:913
+msgid "Knowledge base metadata->web:{source_url:xxx,selector:'xxx'},base:{}"
+msgstr "知识库元数据->web:{source_url:xxx,selector:'xxx'},base:{}"
+
+#: community/apps/dataset/serializers/document_serializers.py:87
+#: community/apps/dataset/serializers/document_serializers.py:100
+#: community/apps/dataset/serializers/document_serializers.py:416
+#: community/apps/dataset/swagger_api/document_api.py:37
+#: community/apps/dataset/swagger_api/document_api.py:51
+msgid "task type"
+msgstr "任务类型"
+
+#: community/apps/dataset/serializers/document_serializers.py:95
+#: community/apps/dataset/serializers/document_serializers.py:108
+msgid "task type not support"
+msgstr "任务类型不支持"
+
+#: community/apps/dataset/serializers/document_serializers.py:115
+#: community/apps/dataset/serializers/document_serializers.py:188
+#: community/apps/dataset/serializers/document_serializers.py:200
+#: community/apps/dataset/serializers/document_serializers.py:201
+#: community/apps/dataset/serializers/document_serializers.py:412
+#: community/apps/dataset/serializers/document_serializers.py:476
+#: community/apps/dataset/serializers/document_serializers.py:836
+#: community/apps/dataset/serializers/document_serializers.py:837
+msgid "document name"
+msgstr "文档名称"
+
+#: community/apps/dataset/serializers/document_serializers.py:118
+msgid "The type only supports optimization|directly_return"
+msgstr "类型只支持 optimization|directly_return"
+
+#: community/apps/dataset/serializers/document_serializers.py:120
+#: community/apps/dataset/serializers/document_serializers.py:414
+#: community/apps/dataset/serializers/document_serializers.py:480
+#: community/apps/dataset/serializers/document_serializers.py:840
+#: community/apps/dataset/swagger_api/document_api.py:25
+msgid "hit handling method"
+msgstr "命中处理方法"
+
+#: community/apps/dataset/serializers/document_serializers.py:126
+#: community/apps/dataset/serializers/document_serializers.py:844
+#: community/apps/dataset/swagger_api/document_api.py:27
+msgid "directly return similarity"
+msgstr "直接返回相似度"
+
+#: community/apps/dataset/serializers/document_serializers.py:129
+#: community/apps/dataset/serializers/document_serializers.py:415
+msgid "document is active"
+msgstr "文档是否可用"
+
+#: community/apps/dataset/serializers/document_serializers.py:150
+#: community/apps/dataset/serializers/document_serializers.py:152
+msgid "document url list"
+msgstr "文档 url 列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:178
+#: community/apps/dataset/serializers/document_serializers.py:179
+msgid "source url list"
+msgstr "文档地址列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:202
+#: community/apps/dataset/serializers/document_serializers.py:203
+msgid "paragraphs"
+msgstr "段落"
+
+#: community/apps/dataset/serializers/document_serializers.py:227
+msgid "The template type only supports excel|csv"
+msgstr "模版类型只支持 excel|csv"
+
+#: community/apps/dataset/serializers/document_serializers.py:237
+msgid "Export template type csv|excel"
+msgstr "导出模版类型 csv|excel"
+
+#: community/apps/dataset/serializers/document_serializers.py:289
+#: community/apps/dataset/serializers/paragraph_serializers.py:304
+#: community/apps/dataset/serializers/paragraph_serializers.py:436
+msgid "target dataset id"
+msgstr "目标知识库 id"
+
+#: community/apps/dataset/serializers/document_serializers.py:391
+#: community/apps/dataset/serializers/paragraph_serializers.py:305
+#: community/apps/dataset/serializers/paragraph_serializers.py:441
+msgid "target document id"
+msgstr "目标文档 id"
+
+#: community/apps/dataset/serializers/document_serializers.py:399
+#: community/apps/dataset/serializers/document_serializers.py:400
+msgid "document id list"
+msgstr "文档 id 列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:418
+msgid "order by"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:653
+msgid "Section title (optional)"
+msgstr "分段标题(选填)"
+
+#: community/apps/dataset/serializers/document_serializers.py:654
+msgid ""
+"Section content (required, question answer, no more than 4096 characters)"
+msgstr "分段内容(必填,问题答案,最长不超过4096个字符)"
+
+#: community/apps/dataset/serializers/document_serializers.py:655
+msgid "Question (optional, one per line in the cell)"
+msgstr "问题(选填,单元格内一行一个)"
+
+#: community/apps/dataset/serializers/document_serializers.py:765
+msgid "The task is being executed, please do not send it repeatedly."
+msgstr "任务正在执行中,请勿重复发送"
+
+#: community/apps/dataset/serializers/document_serializers.py:842
+msgid "ai optimization: optimization, direct return: directly_return"
+msgstr "ai优化: optimization, 直接返回: directly_return"
+
+#: community/apps/dataset/serializers/document_serializers.py:848
+msgid "Document metadata->web:{source_url:xxx,selector:'xxx'},base:{}"
+msgstr "文档元数据->web:{source_url:xxx,selector:'xxx'},base:{}"
+
+#: community/apps/dataset/serializers/document_serializers.py:859
+msgid "dataset id not exist"
+msgstr "知识库 id 不存在"
+
+#: community/apps/dataset/serializers/document_serializers.py:990
+#: community/apps/dataset/serializers/document_serializers.py:1020
+msgid "limit"
+msgstr "分段长度"
+
+#: community/apps/dataset/serializers/document_serializers.py:994
+#: community/apps/dataset/serializers/document_serializers.py:996
+msgid "patterns"
+msgstr "分段标识列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:999
+msgid "Auto Clean"
+msgstr "自动清洗"
+
+#: community/apps/dataset/serializers/document_serializers.py:1006
+msgid "The maximum size of the uploaded file cannot exceed 100MB"
+msgstr "文件上传最大大小不能超过100MB"
+
+#: community/apps/dataset/serializers/document_serializers.py:1025
+msgid "Segmented regular list"
+msgstr "分段正则列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:1029
+#: community/apps/dataset/serializers/document_serializers.py:1030
+msgid "Whether to clear special characters"
+msgstr "是否清除特殊字符"
+
+#: community/apps/dataset/serializers/document_serializers.py:1049
+msgid "space"
+msgstr "空格"
+
+#: community/apps/dataset/serializers/document_serializers.py:1050
+msgid "semicolon"
+msgstr "分号"
+
+#: community/apps/dataset/serializers/document_serializers.py:1050
+msgid "comma"
+msgstr "逗号"
+
+#: community/apps/dataset/serializers/document_serializers.py:1051
+msgid "period"
+msgstr "句号"
+
+#: community/apps/dataset/serializers/document_serializers.py:1051
+msgid "enter"
+msgstr "回车"
+
+#: community/apps/dataset/serializers/document_serializers.py:1052
+msgid "blank line"
+msgstr "空行"
+
+#: community/apps/dataset/serializers/document_serializers.py:1165
+msgid "Hit handling method is required"
+msgstr "命中处理方式必填"
+
+#: community/apps/dataset/serializers/document_serializers.py:1167
+msgid "The hit processing method must be directly_return|optimization"
+msgstr "命中处理方式必须是 directly_return|optimization"
+
+#: community/apps/dataset/serializers/document_serializers.py:1213
+#: community/apps/dataset/serializers/paragraph_serializers.py:753
+msgid "The task is being executed, please do not send it again."
+msgstr "任务正在执行中,请勿重复发送"
+
+#: community/apps/dataset/serializers/file_serializers.py:82
+msgid "File not found"
+msgstr "文件不存在"
+
+#: community/apps/dataset/serializers/image_serializers.py:23
+msgid "image"
+msgstr "图片"
+
+#: community/apps/dataset/serializers/image_serializers.py:42
+msgid "Image not found"
+msgstr "图片不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:52
+#: community/apps/dataset/serializers/paragraph_serializers.py:68
+#: community/apps/dataset/serializers/paragraph_serializers.py:69
+#: community/apps/dataset/serializers/paragraph_serializers.py:82
+#: community/apps/dataset/serializers/paragraph_serializers.py:85
+#: community/apps/dataset/serializers/paragraph_serializers.py:91
+#: community/apps/dataset/serializers/paragraph_serializers.py:93
+#: community/apps/dataset/serializers/paragraph_serializers.py:653
+msgid "section title"
+msgstr "段落标题"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:65
+#: community/apps/dataset/serializers/paragraph_serializers.py:66
+msgid "section content"
+msgstr "段落内容"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:73
+#: community/apps/dataset/serializers/paragraph_serializers.py:74
+#: community/apps/dataset/serializers/problem_serializers.py:88
+msgid "problem list"
+msgstr "问题列表"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:100
+#: community/apps/dataset/serializers/paragraph_serializers.py:172
+#: community/apps/dataset/serializers/paragraph_serializers.py:214
+#: community/apps/dataset/serializers/paragraph_serializers.py:276
+#: community/apps/dataset/serializers/paragraph_serializers.py:308
+#: community/apps/dataset/serializers/paragraph_serializers.py:456
+#: community/apps/dataset/serializers/paragraph_serializers.py:563
+#: community/apps/dataset/serializers/problem_serializers.py:57
+#: community/apps/dataset/swagger_api/problem_api.py:61
+msgid "paragraph id"
+msgstr "段落 id"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:105
+#: community/apps/dataset/serializers/paragraph_serializers.py:467
+msgid "Paragraph id does not exist"
+msgstr "段落 id 不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:134
+msgid "Already associated, please do not associate again"
+msgstr "已经关联,请勿重复关联"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:191
+#: community/apps/dataset/serializers/paragraph_serializers.py:192
+msgid "question content"
+msgstr "问题内容"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:193
+#: community/apps/dataset/serializers/paragraph_serializers.py:709
+#: community/apps/dataset/swagger_api/problem_api.py:26
+msgid "hit num"
+msgstr "命中数量"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:210
+#: community/apps/dataset/serializers/paragraph_serializers.py:281
+#: community/apps/dataset/serializers/problem_serializers.py:39
+#: community/apps/dataset/serializers/problem_serializers.py:64
+#: community/apps/dataset/serializers/problem_serializers.py:194
+#: community/apps/dataset/swagger_api/problem_api.py:101
+msgid "problem id"
+msgstr "问题 id"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:222
+msgid "Paragraph does not exist"
+msgstr "段落不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:224
+msgid "Problem does not exist"
+msgstr "问题不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:306
+#: community/apps/dataset/serializers/paragraph_serializers.py:449
+#: community/apps/dataset/serializers/paragraph_serializers.py:450
+msgid "paragraph id list"
+msgstr "段落 id 列表"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:317
+msgid "The document to be migrated is consistent with the target document"
+msgstr "文档迁移的文档与目标文档一致"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:319
+#, python-brace-format
+msgid "The document id does not exist [{document_id}]"
+msgstr "文档 id 不存在 [{document_id}]"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:323
+#, python-brace-format
+msgid "The target document id does not exist [{document_id}]"
+msgstr "目标文档 id 不存在 [{document_id}]"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:503
+msgid "Problem id does not exist"
+msgstr "问题 id 不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:713
+#: community/apps/dataset/serializers/paragraph_serializers.py:714
+msgid "Number of dislikes"
+msgstr "点踩数量"
+
+#: community/apps/dataset/serializers/problem_serializers.py:50
+msgid "Issue ID is passed when modifying, not when creating."
+msgstr "问题 ID 在修改时传递,创建时不传递"
+
+#: community/apps/dataset/serializers/problem_serializers.py:62
+#: community/apps/dataset/swagger_api/problem_api.py:51
+#: community/apps/dataset/swagger_api/problem_api.py:52
+#: community/apps/dataset/swagger_api/problem_api.py:83
+#: community/apps/dataset/swagger_api/problem_api.py:84
+msgid "problem id list"
+msgstr "问题 id 列表"
+
+#: community/apps/dataset/swagger_api/document_api.py:38
+#: community/apps/dataset/swagger_api/document_api.py:52
+msgid "1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents"
+msgstr "1|2|3 1:向量化|2:生成问题|3:同步文档"
+
+#: community/apps/dataset/swagger_api/document_api.py:64
+#: community/apps/dataset/swagger_api/document_api.py:65
+msgid "state list"
+msgstr "状态列表"
+
+#: community/apps/dataset/swagger_api/image_api.py:22
+msgid "image file"
+msgstr "图片文件"
+
+#: community/apps/dataset/swagger_api/problem_api.py:54
+#: community/apps/dataset/swagger_api/problem_api.py:55
+msgid "Associated paragraph information list"
+msgstr "关联段落信息列表"
+
+#: community/apps/dataset/swagger_api/problem_api.py:131
+msgid "Hit num"
+msgstr "命中数量"
+
+#: community/apps/dataset/task/generate.py:95
+#, python-brace-format
+msgid ""
+"Generate issue based on document: {document_id} error {error}{traceback}"
+msgstr "生成问题基于文档: {document_id} 错误 {error}{traceback}"
+
+#: community/apps/dataset/task/generate.py:99
+#, python-brace-format
+msgid "End--->Generate problem: {document_id}"
+msgstr "结束--->生成问题: {document_id}"
+
+#: community/apps/dataset/task/sync.py:29
+#: community/apps/dataset/task/sync.py:43
+#, python-brace-format
+msgid "Start--->Start synchronization web knowledge base:{dataset_id}"
+msgstr "开始--->开始同步web知识库:{dataset_id}"
+
+#: community/apps/dataset/task/sync.py:34
+#: community/apps/dataset/task/sync.py:47
+#, python-brace-format
+msgid "End--->End synchronization web knowledge base:{dataset_id}"
+msgstr "结束--->结束同步web知识库:{dataset_id}"
+
+#: community/apps/dataset/task/sync.py:36
+#: community/apps/dataset/task/sync.py:49
+#, python-brace-format
+msgid "Synchronize web knowledge base:{dataset_id} error{error}{traceback}"
+msgstr "同步web知识库:{dataset_id} 错误{error}{traceback}"
+
+#: community/apps/dataset/task/tools.py:114
+#, python-brace-format
+msgid "Association problem failed {error}"
+msgstr "关联问题失败 {error}"
+
+#: community/apps/dataset/views/dataset.py:35
+#: community/apps/dataset/views/dataset.py:36
+msgid "Synchronize the knowledge base of the website"
+msgstr "同步Web站点知识库"
+
+#: community/apps/dataset/views/dataset.py:57
+#: community/apps/dataset/views/dataset.py:58
+msgid "Create QA knowledge base"
+msgstr "创建QA知识库"
+
+#: community/apps/dataset/views/dataset.py:77
+#: community/apps/dataset/views/dataset.py:78
+msgid "Create a web site knowledge base"
+msgstr "创建web站点知识库"
+
+#: community/apps/dataset/views/dataset.py:93
+#: community/apps/dataset/views/dataset.py:94
+msgid "Get a list of applications available in the knowledge base"
+msgstr "获取知识库中可用的应用列表"
+
+#: community/apps/dataset/views/dataset.py:105
+#: community/apps/dataset/views/dataset.py:106
+msgid "Get a list of knowledge bases"
+msgstr "获取知识库列表"
+
+#: community/apps/dataset/views/dataset.py:119
+#: community/apps/dataset/views/dataset.py:120
+msgid "Create a knowledge base"
+msgstr "创建知识库"
+
+#: community/apps/dataset/views/dataset.py:134
+msgid "Hit test list"
+msgstr "命中测试列表"
+
+#: community/apps/dataset/views/dataset.py:154
+msgid "Re-vectorize"
+msgstr "重新向量化"
+
+#: community/apps/dataset/views/dataset.py:170
+msgid "Export knowledge base"
+msgstr "导出知识库"
+
+#: community/apps/dataset/views/dataset.py:184
+#: community/apps/dataset/views/dataset.py:185
+msgid "Export knowledge base containing images"
+msgstr "导出ZIP知识库"
+
+#: community/apps/dataset/views/dataset.py:199
+msgid "Delete knowledge base"
+msgstr "删除知识库"
+
+#: community/apps/dataset/views/dataset.py:213
+#: community/apps/dataset/views/dataset.py:214
+msgid "Query knowledge base details based on knowledge base id"
+msgstr "根据知识库id查询知识库详情"
+
+#: community/apps/dataset/views/dataset.py:226
+#: community/apps/dataset/views/dataset.py:227
+msgid "Modify knowledge base information"
+msgstr "修改知识库信息"
+
+#: community/apps/dataset/views/dataset.py:245
+#: community/apps/dataset/views/dataset.py:246
+#: community/apps/dataset/views/document.py:463
+#: community/apps/dataset/views/document.py:464
+msgid "Get the knowledge base paginated list"
+msgstr "获取知识库文档分页列表"
+
+#: community/apps/dataset/views/document.py:31
+#: community/apps/dataset/views/document.py:32
+msgid "Get QA template"
+msgstr "获取问答模版"
+
+#: community/apps/dataset/views/document.py:44
+#: community/apps/dataset/views/document.py:45
+msgid "Get form template"
+msgstr "获取表单模版"
+
+#: community/apps/dataset/views/document.py:57
+#: community/apps/dataset/views/document.py:58
+msgid "Create Web site documents"
+msgstr "创建web站点文档"
+
+#: community/apps/dataset/views/document.py:77
+#: community/apps/dataset/views/document.py:78
+msgid "Import QA and create documentation"
+msgstr "导入问答并创建文档"
+
+#: community/apps/dataset/views/document.py:98
+#: community/apps/dataset/views/document.py:99
+msgid "Import tables and create documents"
+msgstr "导入表格并创建文档"
+
+#: community/apps/dataset/views/document.py:118
+#: community/apps/dataset/views/document.py:119
+msgid "Create document"
+msgstr "创建文档"
+
+#: community/apps/dataset/views/document.py:152
+#: community/apps/dataset/views/document.py:153
+msgid "Modify document hit processing methods in batches"
+msgstr "修改文档命中处理方式批量"
+
+#: community/apps/dataset/views/document.py:171
+#: community/apps/dataset/views/document.py:172
+msgid "Create documents in batches"
+msgstr "批量创建文档"
+
+#: community/apps/dataset/views/document.py:187
+#: community/apps/dataset/views/document.py:188
+msgid "Batch sync documents"
+msgstr "批量同步文档"
+
+#: community/apps/dataset/views/document.py:202
+#: community/apps/dataset/views/document.py:203
+msgid "Delete documents in batches"
+msgstr "批量删除文档"
+
+#: community/apps/dataset/views/document.py:220
+#: community/apps/dataset/views/document.py:221
+msgid "Synchronize web site types"
+msgstr "同步web站点类型"
+
+#: community/apps/dataset/views/document.py:239
+#: community/apps/dataset/views/document.py:240
+msgid "Cancel task"
+msgstr "取消任务"
+
+#: community/apps/dataset/views/document.py:260
+#: community/apps/dataset/views/document.py:261
+msgid "Cancel tasks in batches"
+msgstr "批量取消任务"
+
+#: community/apps/dataset/views/document.py:279
+#: community/apps/dataset/views/document.py:280
+msgid "Refresh document vector library"
+msgstr "文档向量化"
+
+#: community/apps/dataset/views/document.py:300
+#: community/apps/dataset/views/document.py:301
+msgid "Batch refresh document vector library"
+msgstr "批量文档向量化"
+
+#: community/apps/dataset/views/document.py:319
+#: community/apps/dataset/views/document.py:320
+msgid "Migrate documents in batches"
+msgstr "批量迁移文档"
+
+#: community/apps/dataset/views/document.py:346
+#: community/apps/dataset/views/document.py:347
+msgid "Export document"
+msgstr "导出文档"
+
+#: community/apps/dataset/views/document.py:361
+#: community/apps/dataset/views/document.py:362
+msgid "Export Zip document"
+msgstr "导出Zip文档"
+
+#: community/apps/dataset/views/document.py:376
+#: community/apps/dataset/views/document.py:377
+msgid "Get document details"
+msgstr "获取文档详情"
+
+#: community/apps/dataset/views/document.py:391
+#: community/apps/dataset/views/document.py:392
+msgid "Modify document"
+msgstr "修改文档"
+
+#: community/apps/dataset/views/document.py:409
+#: community/apps/dataset/views/document.py:410
+msgid "Delete document"
+msgstr "删除文档"
+
+#: community/apps/dataset/views/document.py:427
+#: community/apps/dataset/views/document.py:428
+msgid "Get a list of segment IDs"
+msgstr "获取分段id列表"
+
+#: community/apps/dataset/views/document.py:439
+#: community/apps/dataset/views/document.py:440
+msgid "Segmented document"
+msgstr "分段文档"
+
+#: community/apps/dataset/views/file.py:42
+#: community/apps/dataset/views/file.py:43
+msgid "Get file"
+msgstr "获取文件"
+
+#: community/apps/dataset/views/image.py:28
+#: community/apps/dataset/views/image.py:29
+#: community/apps/dataset/views/image.py:34
+msgid "Upload image"
+msgstr "上传图片"
+
+#: community/apps/dataset/views/image.py:35
+#: community/apps/dataset/views/image.py:44
+msgid "Image"
+msgstr "图片"
+
+#: community/apps/dataset/views/image.py:42
+#: community/apps/dataset/views/image.py:43
+msgid "Get Image"
+msgstr "获取图片"
+
+#: community/apps/dataset/views/paragraph.py:28
+#: community/apps/dataset/views/paragraph.py:29
+msgid "Paragraph list"
+msgstr "段落列表"
+
+#: community/apps/dataset/views/paragraph.py:32
+#: community/apps/dataset/views/paragraph.py:51
+#: community/apps/dataset/views/paragraph.py:69
+#: community/apps/dataset/views/paragraph.py:85
+#: community/apps/dataset/views/paragraph.py:103
+#: community/apps/dataset/views/paragraph.py:121
+#: community/apps/dataset/views/paragraph.py:140
+#: community/apps/dataset/views/paragraph.py:156
+#: community/apps/dataset/views/paragraph.py:172
+#: community/apps/dataset/views/paragraph.py:193
+#: community/apps/dataset/views/paragraph.py:211
+#: community/apps/dataset/views/paragraph.py:238
+msgid "Knowledge Base/Documentation/Paragraph"
+msgstr "知识库/文档/段落"
+
+#: community/apps/dataset/views/paragraph.py:46
+#: community/apps/dataset/views/paragraph.py:47
+msgid "Create Paragraph"
+msgstr "创建段落"
+
+#: community/apps/dataset/views/paragraph.py:64
+#: community/apps/dataset/views/paragraph.py:65
+msgid "Add associated questions"
+msgstr "添加关联问题"
+
+#: community/apps/dataset/views/paragraph.py:80
+#: community/apps/dataset/views/paragraph.py:81
+msgid "Get a list of paragraph questions"
+msgstr "获取段落问题列表"
+
+#: community/apps/dataset/views/paragraph.py:99
+#: community/apps/dataset/views/paragraph.py:100
+msgid "Disassociation issue"
+msgstr "取消关联问题"
+
+#: community/apps/dataset/views/paragraph.py:117
+#: community/apps/dataset/views/paragraph.py:118
+msgid "Related questions"
+msgstr "关联问题"
+
+#: community/apps/dataset/views/paragraph.py:135
+#: community/apps/dataset/views/paragraph.py:136
+msgid "Modify paragraph data"
+msgstr "修改段落数据"
+
+#: community/apps/dataset/views/paragraph.py:152
+#: community/apps/dataset/views/paragraph.py:153
+msgid "Get paragraph details"
+msgstr "获取段落详情"
+
+#: community/apps/dataset/views/paragraph.py:168
+#: community/apps/dataset/views/paragraph.py:169
+msgid "Delete paragraph"
+msgstr "删除段落"
+
+#: community/apps/dataset/views/paragraph.py:187
+#: community/apps/dataset/views/paragraph.py:188
+msgid "Delete paragraphs in batches"
+msgstr "批量删除段落"
+
+#: community/apps/dataset/views/paragraph.py:206
+#: community/apps/dataset/views/paragraph.py:207
+msgid "Migrate paragraphs in batches"
+msgstr "批量迁移段落"
+
+#: community/apps/dataset/views/paragraph.py:233
+#: community/apps/dataset/views/paragraph.py:234
+msgid "Get paragraph list by pagination"
+msgstr "获取分页段落列表"
+
+#: community/apps/dataset/views/problem.py:28
+#: community/apps/dataset/views/problem.py:29
+msgid "Question list"
+msgstr "问题列表"
+
+#: community/apps/dataset/views/problem.py:32
+#: community/apps/dataset/views/problem.py:50
+#: community/apps/dataset/views/problem.py:68
+#: community/apps/dataset/views/problem.py:88
+#: community/apps/dataset/views/problem.py:103
+#: community/apps/dataset/views/problem.py:120
+#: community/apps/dataset/views/problem.py:136
+#: community/apps/dataset/views/problem.py:155
+msgid "Knowledge Base/Documentation/Paragraph/Question"
+msgstr "知识库/文档/段落/问题"
+
+#: community/apps/dataset/views/problem.py:45
+#: community/apps/dataset/views/problem.py:46
+msgid "Create question"
+msgstr "创建问题"
+
+#: community/apps/dataset/views/problem.py:64
+#: community/apps/dataset/views/problem.py:65
+msgid "Get a list of associated paragraphs"
+msgstr "获取关联段落列表"
+
+#: community/apps/dataset/views/problem.py:82
+#: community/apps/dataset/views/problem.py:83
+msgid "Batch deletion issues"
+msgstr "批量删除问题"
+
+#: community/apps/dataset/views/problem.py:98
+#: community/apps/dataset/views/problem.py:99
+msgid "Batch associated paragraphs"
+msgstr "批量关联段落"
+
+#: community/apps/dataset/views/problem.py:116
+#: community/apps/dataset/views/problem.py:117
+msgid "Delete question"
+msgstr "删除问题"
+
+#: community/apps/dataset/views/problem.py:131
+#: community/apps/dataset/views/problem.py:132
+msgid "Modify question"
+msgstr "修改问题"
+
+#: community/apps/dataset/views/problem.py:150
+#: community/apps/dataset/views/problem.py:151
+msgid "Get the list of questions by page"
+msgstr "获取分页问题列表"
+
+#: community/apps/embedding/task/embedding.py:30
+#: community/apps/embedding/task/embedding.py:81
+#, python-brace-format
+msgid "Failed to obtain vector model: {error} {traceback}"
+msgstr "获取向量模型失败: {error} {traceback}"
+
+#: community/apps/embedding/task/embedding.py:110
+#, python-brace-format
+msgid "Start--->Vectorized dataset: {dataset_id}"
+msgstr "开始--->向量化知识库: {dataset_id}"
+
+#: community/apps/embedding/task/embedding.py:114
+#, python-brace-format
+msgid "Dataset documentation: {document_names}"
+msgstr "知识库文档: {document_names}"
+
+#: community/apps/embedding/task/embedding.py:127
+#, python-brace-format
+msgid "End--->Vectorized dataset: {dataset_id}"
+msgstr "结束--->向量化知识库: {dataset_id}"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:70
+#: community/apps/function_lib/serializers/function_lib_serializer.py:83
+#: community/apps/function_lib/swagger_api/function_lib_api.py:68
+#: community/apps/function_lib/swagger_api/function_lib_api.py:69
+#: community/apps/function_lib/swagger_api/function_lib_api.py:84
+#: community/apps/function_lib/swagger_api/function_lib_api.py:85
+#: community/apps/function_lib/swagger_api/function_lib_api.py:130
+#: community/apps/function_lib/swagger_api/function_lib_api.py:131
+#: community/apps/function_lib/swagger_api/function_lib_api.py:176
+#: community/apps/function_lib/swagger_api/function_lib_api.py:177
+msgid "variable name"
+msgstr "变量名"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:71
+#: community/apps/function_lib/swagger_api/function_lib_api.py:88
+#: community/apps/function_lib/swagger_api/function_lib_api.py:89
+#: community/apps/function_lib/swagger_api/function_lib_api.py:134
+#: community/apps/function_lib/swagger_api/function_lib_api.py:135
+#: community/apps/function_lib/swagger_api/function_lib_api.py:180
+#: community/apps/function_lib/swagger_api/function_lib_api.py:181
+msgid "required"
+msgstr "必填"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:74
+msgid "fields only support string|int|dict|array|float"
+msgstr "字段只支持string|int|dict|array|float"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:85
+#: community/apps/function_lib/swagger_api/function_lib_api.py:72
+#: community/apps/function_lib/swagger_api/function_lib_api.py:73
+msgid "variable value"
+msgstr "变量值"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:93
+#: community/apps/function_lib/serializers/function_lib_serializer.py:104
+#: community/apps/function_lib/serializers/function_lib_serializer.py:119
+#: community/apps/function_lib/serializers/py_lint_serializer.py:23
+#: community/apps/function_lib/swagger_api/function_lib_api.py:28
+#: community/apps/function_lib/swagger_api/function_lib_api.py:29
+#: community/apps/function_lib/swagger_api/function_lib_api.py:75
+#: community/apps/function_lib/swagger_api/function_lib_api.py:76
+#: community/apps/function_lib/swagger_api/function_lib_api.py:117
+#: community/apps/function_lib/swagger_api/function_lib_api.py:118
+#: community/apps/function_lib/swagger_api/function_lib_api.py:163
+#: community/apps/function_lib/swagger_api/function_lib_api.py:164
+#: community/apps/function_lib/swagger_api/py_lint_api.py:22
+#: community/apps/function_lib/swagger_api/py_lint_api.py:23
+msgid "function content"
+msgstr "函数内容"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:98
+#: community/apps/function_lib/serializers/function_lib_serializer.py:114
+#: community/apps/function_lib/serializers/function_lib_serializer.py:135
+#: community/apps/function_lib/serializers/function_lib_serializer.py:388
+#: community/apps/function_lib/swagger_api/function_lib_api.py:24
+#: community/apps/function_lib/swagger_api/function_lib_api.py:25
+#: community/apps/function_lib/swagger_api/function_lib_api.py:46
+#: community/apps/function_lib/swagger_api/function_lib_api.py:113
+#: community/apps/function_lib/swagger_api/function_lib_api.py:114
+#: community/apps/function_lib/swagger_api/function_lib_api.py:159
+#: community/apps/function_lib/swagger_api/function_lib_api.py:160
+msgid "function name"
+msgstr "函数名"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:101
+#: community/apps/function_lib/serializers/function_lib_serializer.py:117
+#: community/apps/function_lib/serializers/function_lib_serializer.py:138
+#: community/apps/function_lib/swagger_api/function_lib_api.py:26
+#: community/apps/function_lib/swagger_api/function_lib_api.py:27
+#: community/apps/function_lib/swagger_api/function_lib_api.py:51
+#: community/apps/function_lib/swagger_api/function_lib_api.py:115
+#: community/apps/function_lib/swagger_api/function_lib_api.py:116
+#: community/apps/function_lib/swagger_api/function_lib_api.py:161
+#: community/apps/function_lib/swagger_api/function_lib_api.py:162
+msgid "function description"
+msgstr "函数描述"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:232
+msgid "field has no value set"
+msgstr "字段没有设置值"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:248
+#: community/apps/function_lib/serializers/function_lib_serializer.py:253
+msgid "type error"
+msgstr "类型错误"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:256
+#, python-brace-format
+msgid "Field: {name} Type: {_type} Value: {value} Type conversion error"
+msgstr "字段: {name} 类型: {_type} 值: {value} 类型转换错误"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:261
+msgid "function id"
+msgstr "函数 id"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:267
+#: community/apps/function_lib/serializers/function_lib_serializer.py:303
+#: community/apps/function_lib/serializers/function_lib_serializer.py:366
+#: community/apps/function_lib/serializers/function_lib_serializer.py:396
+msgid "Function does not exist"
+msgstr "函数不存在"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:357
+#: community/apps/function_lib/serializers/function_lib_serializer.py:386
+#| msgid "function"
+msgid "function ID"
+msgstr "函数"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:23
+#: community/apps/function_lib/swagger_api/function_lib_api.py:205
+msgid "ID"
+msgstr ""
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:30
+#: community/apps/function_lib/swagger_api/function_lib_api.py:31
+msgid "input field"
+msgstr "输入字段"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:62
+#: community/apps/function_lib/swagger_api/function_lib_api.py:78
+#: community/apps/function_lib/swagger_api/function_lib_api.py:124
+#: community/apps/function_lib/swagger_api/function_lib_api.py:170
+msgid "Input variable list"
+msgstr "输入变量列表"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:94
+#: community/apps/function_lib/swagger_api/function_lib_api.py:140
+#: community/apps/function_lib/swagger_api/function_lib_api.py:186
+msgid "Field type string|int|dict|array|float"
+msgstr "字段类型 string|int|dict|array|float"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:100
+#: community/apps/function_lib/swagger_api/function_lib_api.py:146
+#: community/apps/function_lib/swagger_api/function_lib_api.py:192
+msgid "The source only supports custom|reference"
+msgstr "来源只支持custom|reference"
+
+#: community/apps/function_lib/views/function_lib_views.py:28
+#: community/apps/function_lib/views/function_lib_views.py:29
+msgid "Get function list"
+msgstr "获取函数列表"
+
+#: community/apps/function_lib/views/function_lib_views.py:30
+#: community/apps/function_lib/views/function_lib_views.py:46
+#: community/apps/function_lib/views/function_lib_views.py:59
+#: community/apps/function_lib/views/function_lib_views.py:74
+#: community/apps/function_lib/views/function_lib_views.py:85
+#: community/apps/function_lib/views/function_lib_views.py:95
+#: community/apps/function_lib/views/function_lib_views.py:111
+#: community/apps/function_lib/views/py_lint.py:29
+msgid "Function"
+msgstr "函数库"
+
+#: community/apps/function_lib/views/function_lib_views.py:43
+#: community/apps/function_lib/views/function_lib_views.py:44
+msgid "Create function"
+msgstr "创建函数"
+
+#: community/apps/function_lib/views/function_lib_views.py:56
+#: community/apps/function_lib/views/function_lib_views.py:57
+msgid "Debug function"
+msgstr "调试函数"
+
+#: community/apps/function_lib/views/function_lib_views.py:71
+#: community/apps/function_lib/views/function_lib_views.py:72
+msgid "Update function"
+msgstr "更新函数"
+
+#: community/apps/function_lib/views/function_lib_views.py:83
+#: community/apps/function_lib/views/function_lib_views.py:84
+msgid "Delete function"
+msgstr "删除函数"
+
+#: community/apps/function_lib/views/function_lib_views.py:93
+#: community/apps/function_lib/views/function_lib_views.py:94
+msgid "Get function details"
+msgstr "获取函数详情"
+
+#: community/apps/function_lib/views/function_lib_views.py:106
+#: community/apps/function_lib/views/function_lib_views.py:107
+msgid "Get function list by pagination"
+msgstr "获取分页函数列表"
+
+#: community/apps/function_lib/views/function_lib_views.py:129
+#| msgid "not a function"
+msgid "Import function"
+msgstr "导入函数"
+
+#: community/apps/function_lib/views/function_lib_views.py:143
+#| msgid "not a function"
+msgid "Export function"
+msgstr "导出函数"
+
+#: community/apps/function_lib/views/py_lint.py:26
+#: community/apps/function_lib/views/py_lint.py:27
+msgid "Check code"
+msgstr "检查代码"
+
+#: community/apps/setting/models_provider/base_model_provider.py:66
+msgid "Model type cannot be empty"
+msgstr "模型类型不能为空"
+
+#: community/apps/setting/models_provider/base_model_provider.py:91
+msgid "The current platform does not support downloading models"
+msgstr "当前平台不支持下载模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:146
+msgid "LLM"
+msgstr "大语言模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:147
+msgid "Embedding Model"
+msgstr "向量模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:148
+msgid "Speech2Text"
+msgstr "语音识别"
+
+#: community/apps/setting/models_provider/base_model_provider.py:149
+msgid "TTS"
+msgstr "语音合成"
+
+#: community/apps/setting/models_provider/base_model_provider.py:150
+msgid "Vision Model"
+msgstr "视觉模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:151
+msgid "Image Generation"
+msgstr "图片生成"
+
+#: community/apps/setting/models_provider/base_model_provider.py:152
+msgid "Rerank"
+msgstr "重排模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:226
+msgid "The model does not support"
+msgstr "模型不支持"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42
+msgid ""
+"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi "
+"Lab, developers can integrate high-quality text retrieval and sorting "
+"through the LlamaIndex framework."
+msgstr ""
+"阿里巴巴通义实验室开发的GTE-Rerank文本排序系列模型,开发者可以通过LlamaIndex"
+"框架进行集成高质量文本检索、排序。"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45
+msgid ""
+"Chinese (including various dialects such as Cantonese), English, Japanese, "
+"and Korean support free switching between multiple languages."
+msgstr "中文(含粤语等各种方言)、英文、日语、韩语支持多个语种自由切换"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48
+msgid ""
+"CosyVoice is based on a new generation of large generative speech models, "
+"which can predict emotions, intonation, rhythm, etc. based on context, and "
+"has better anthropomorphic effects."
+msgstr ""
+"CosyVoice基于新一代生成式语音大模型,能根据上下文预测情绪、语调、韵律等,具有"
+"更好的拟人效果"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51
+msgid ""
+"Universal text vector is Tongyi Lab's multi-language text unified vector "
+"model based on the LLM base. It provides high-level vector services for "
+"multiple mainstream languages around the world and helps developers quickly "
+"convert text data into high-quality vector data."
+msgstr ""
+"通用文本向量,是通义实验室基于LLM底座的多语言文本统一向量模型,面向全球多个主"
+"流语种,提供高水准的向量服务,帮助开发者将文本数据快速转换为高质量的向量数"
+"据。"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69
+#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40
+msgid ""
+"Tongyi Wanxiang - a large image model for text generation, supports "
+"bilingual input in Chinese and English, and supports the input of reference "
+"pictures for reference content or reference style migration. Key styles "
+"include but are not limited to watercolor, oil painting, Chinese painting, "
+"sketch, flat illustration, two-dimensional, and 3D. Cartoon."
+msgstr ""
+"通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容"
+"或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二"
+"次元、3D卡通。"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95
+msgid "Alibaba Cloud Bailian"
+msgstr "阿里云百炼"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:28
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:40
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:68
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:55
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:45
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:23
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:58
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:41
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:39
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:44
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:27
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:31
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:44
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:22
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:61
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:40
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:68
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:61
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:40
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:19
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:78
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:53
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:46
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:29
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:24
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:47
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:19
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:39
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:25
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:59
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:39
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:40
+#, python-brace-format
+msgid "{model_type} Model type is not supported"
+msgstr "模型类型 {model_type} 不支持"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:32
+#, python-brace-format
+msgid "{key} is required"
+msgstr "{key} 是必填项"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:52
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:55
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:43
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:54
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:56
+#: community/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py:43
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:54
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py:54
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:52
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py:77
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:60
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:76
+#: community/apps/setting/models_provider/impl/xf_model_provider/model/tts.py:101
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:34
+#: community/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py:44
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:56
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py:49
+msgid "Hello"
+msgstr "你好!"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:38
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:86
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:73
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:65
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:40
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:77
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:61
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:38
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:45
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:51
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:64
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:39
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:80
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:86
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:64
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:39
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:80
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:104
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:55
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:70
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:38
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:38
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:50
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:84
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:41
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:65
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:60
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:40
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:37
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:77
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:56
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:61
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:59
+#, python-brace-format
+msgid ""
+"Verification failed, please check whether the parameters are correct: {error}"
+msgstr "验证失败,请检查参数是否正确: {error}"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:12
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:20
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:14
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:41
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:14
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:22
+msgid "Temperature"
+msgstr "温度"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:13
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:21
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:42
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:23
+msgid ""
+"Higher values make the output more random, while lower values make it more "
+"focused and deterministic"
+msgstr "较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:21
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:29
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:31
+msgid "Output the maximum Tokens"
+msgstr "输出最大Token数"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:30
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:32
+msgid "Specify the maximum number of tokens that the model can generate"
+msgstr "指定模型可以生成的最大 tokens 数"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:72
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:60
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:32
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:50
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:28
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:63
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:46
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:46
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:62
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:63
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:49
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:27
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:66
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:45
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:72
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:49
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:27
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:66
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:45
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:55
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:72
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:34
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:71
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:29
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:52
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:40
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:59
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:29
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:64
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:44
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:45
+#, python-brace-format
+msgid "{key} is required"
+msgstr "{key} 是必填项"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:14
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:15
+msgid "Image size"
+msgstr "图片尺寸"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22
+msgid "Specify the size of the generated image, such as: 1024x1024"
+msgstr "指定生成图片的尺寸, 如: 1024x1024"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:43
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:43
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:41
+msgid "Number of pictures"
+msgstr "图片数量"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34
+msgid "Specify the number of generated images"
+msgstr "指定生成图片的数量"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41
+msgid "Style"
+msgstr "风格"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41
+msgid "Specify the style of generated images"
+msgstr "指定生成图片的风格"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45
+msgid "Default value, the image style is randomly output by the model"
+msgstr "默认值,图片风格由模型随机输出"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46
+msgid "photography"
+msgstr "摄影"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47
+msgid "Portraits"
+msgstr "人像写真"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48
+msgid "3D cartoon"
+msgstr "3D卡通"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49
+msgid "animation"
+msgstr "动画"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50
+msgid "painting"
+msgstr "油画"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51
+msgid "watercolor"
+msgstr "水彩"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52
+msgid "sketch"
+msgstr "素描"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53
+msgid "Chinese painting"
+msgstr "中国画"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54
+msgid "flat illustration"
+msgstr "扁平插画"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15
+msgid "timbre"
+msgstr "音色"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
+msgid "Chinese sounds can support mixed scenes of Chinese and English"
+msgstr "中文音色支持中英文混合场景"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20
+msgid "Long Xiaochun"
+msgstr "龙小淳"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21
+msgid "Long Xiaoxia"
+msgstr "龙小夏"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22
+msgid "Long Xiaochen"
+msgstr "龙小诚"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23
+msgid "Long Xiaobai"
+msgstr "龙小白"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24
+msgid "Long laotie"
+msgstr "龙老铁"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25
+msgid "Long Shu"
+msgstr "龙书"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26
+msgid "Long Shuo"
+msgstr "龙硕"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27
+msgid "Long Jing"
+msgstr "龙婧"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28
+msgid "Long Miao"
+msgstr "龙妙"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29
+msgid "Long Yue"
+msgstr "龙悦"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30
+msgid "Long Yuan"
+msgstr "龙媛"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31
+msgid "Long Fei"
+msgstr "龙飞"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32
+msgid "Long Jielidou"
+msgstr "龙杰力豆"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33
+msgid "Long Tong"
+msgstr "龙彤"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34
+msgid "Long Xiang"
+msgstr "龙祥"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28
+msgid "speaking speed"
+msgstr "语速"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
+msgid "[0.5,2], the default is 1, usually one decimal place is enough"
+msgstr "[0.5,2],默认为1,通常一位小数就足够了"
+
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:34
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:74
+msgid "API URL"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:35
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:75
+msgid "API Key"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36
+msgid ""
+"An update to Claude 2 that doubles the context window and improves "
+"reliability, hallucination rates, and evidence-based accuracy in long "
+"documents and RAG contexts."
+msgstr ""
+"Claude 2 的更新,采用双倍的上下文窗口,并在长文档和 RAG 上下文中提高可靠性、"
+"幻觉率和循证准确性。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43
+msgid ""
+"Anthropic is a powerful model that can handle a variety of tasks, from "
+"complex dialogue and creative content generation to detailed command "
+"obedience."
+msgstr ""
+"Anthropic 功能强大的模型,可处理各种任务,从复杂的对话和创意内容生成到详细的"
+"指令服从。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50
+msgid ""
+"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-"
+"instant responsiveness. The model can answer simple queries and requests "
+"quickly. Customers will be able to build seamless AI experiences that mimic "
+"human interactions. Claude 3 Haiku can process images and return text "
+"output, and provides 200K context windows."
+msgstr ""
+"Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该"
+"模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体"
+"验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57
+msgid ""
+"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between "
+"intelligence and speed, especially when it comes to handling enterprise "
+"workloads. This model offers maximum utility while being priced lower than "
+"competing products, and it's been engineered to be a solid choice for "
+"deploying AI at scale."
+msgstr ""
+"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在"
+"处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过"
+"精心设计,是大规模部署人工智能的可靠选择。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64
+msgid ""
+"The Claude 3.5 Sonnet raises the industry standard for intelligence, "
+"outperforming competing models and the Claude 3 Opus in extensive "
+"evaluations, with the speed and cost-effectiveness of our mid-range models."
+msgstr ""
+"Claude 3.5 Sonnet提高了智能的行业标准,在广泛的评估中超越了竞争对手的型号和"
+"Claude 3 Opus,具有我们中端型号的速度和成本效益。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71
+msgid ""
+"A faster, more affordable but still very powerful model that can handle a "
+"range of tasks including casual conversation, text analysis, summarization "
+"and document question answering."
+msgstr ""
+"一种更快速、更实惠但仍然非常强大的模型,它可以处理一系列任务,包括随意对话、"
+"文本分析、摘要和文档问题回答。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78
+msgid ""
+"Titan Text Premier is the most powerful and advanced model in the Titan Text "
+"series, designed to deliver exceptional performance for a variety of "
+"enterprise applications. With its cutting-edge features, it delivers greater "
+"accuracy and outstanding results, making it an excellent choice for "
+"organizations looking for a top-notch text processing solution."
+msgstr ""
+"Titan Text Premier 是 Titan Text 系列中功能强大且先进的型号,旨在为各种企业应"
+"用程序提供卓越的性能。凭借其尖端功能,它提供了更高的准确性和出色的结果,使其"
+"成为寻求一流文本处理解决方案的组织的绝佳选择。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85
+msgid ""
+"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-"
+"tuning English-language tasks, including summarization and copywriting, "
+"where customers require smaller, more cost-effective, and highly "
+"customizable models."
+msgstr ""
+"Amazon Titan Text Lite 是一种轻量级的高效模型,非常适合英语任务的微调,包括摘"
+"要和文案写作等,在这种场景下,客户需要更小、更经济高效且高度可定制的模型"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91
+msgid ""
+"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making "
+"it ideal for a variety of high-level general language tasks, such as open-"
+"ended text generation and conversational chat, as well as support in "
+"retrieval-augmented generation (RAG). At launch, the model is optimized for "
+"English, but other languages are supported."
+msgstr ""
+"Amazon Titan Text Express 的上下文长度长达 8000 个 tokens,因而非常适合各种高"
+"级常规语言任务,例如开放式文本生成和对话式聊天,以及检索增强生成(RAG)中的支"
+"持。在发布时,该模型针对英语进行了优化,但也支持其他语言。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97
+msgid ""
+"7B dense converter for rapid deployment and easy customization. Small in "
+"size yet powerful in a variety of use cases. Supports English and code, as "
+"well as 32k context windows."
+msgstr ""
+"7B 密集型转换器,可快速部署,易于定制。体积虽小,但功能强大,适用于各种用例。"
+"支持英语和代码,以及 32k 的上下文窗口。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103
+msgid ""
+"Advanced Mistral AI large-scale language model capable of handling any "
+"language task, including complex multilingual reasoning, text understanding, "
+"transformation, and code generation."
+msgstr ""
+"先进的 Mistral AI 大型语言模型,能够处理任何语言任务,包括复杂的多语言推理、"
+"文本理解、转换和代码生成。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109
+msgid ""
+"Ideal for content creation, conversational AI, language understanding, R&D, "
+"and enterprise applications"
+msgstr "非常适合内容创作、会话式人工智能、语言理解、研发和企业应用"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115
+msgid ""
+"Ideal for limited computing power and resources, edge devices, and faster "
+"training times."
+msgstr "非常适合有限的计算能力和资源、边缘设备和更快的训练时间。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123
+msgid ""
+"Titan Embed Text is the largest embedding model in the Amazon Titan Embed "
+"series and can handle various text embedding tasks, such as text "
+"classification, text similarity calculation, etc."
+msgstr ""
+"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以处理各种文本"
+"嵌入任务,如文本分类、文本相似度计算等。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47
+#, python-brace-format
+msgid "The following fields are required: {keys}"
+msgstr "以下字段是必填项: {keys}"
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:64
+msgid "Verification failed, please check whether the parameters are correct"
+msgstr "验证失败,请检查参数是否正确"
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28
+msgid "Picture quality"
+msgstr "图片质量"
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17
+msgid ""
+"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) "
+"to find one that suits your desired tone and audience. The current voiceover "
+"is optimized for English."
+msgstr ""
+"尝试不同的声音(合金、回声、寓言、缟玛瑙、新星和闪光),找到一种适合您所需的"
+"音调和听众的声音。当前的语音针对英语进行了优化。"
+
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24
+msgid "Good at common conversational tasks, supports 32K contexts"
+msgstr "擅长通用对话任务,支持 32K 上下文"
+
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29
+msgid "Good at handling programming tasks, supports 16K contexts"
+msgstr "擅长处理编程任务,支持 16K 上下文"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32
+msgid "Latest Gemini 1.0 Pro model, updated with Google update"
+msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36
+msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update"
+msgstr "最新的Gemini 1.0 Pro Vision模型,随Google更新而更新"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58
+msgid "Latest Gemini 1.5 Flash model, updated with Google updates"
+msgstr "最新的Gemini 1.5 Flash模型,随Google更新而更新"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53
+msgid "convert audio to text"
+msgstr "将音频转换为文本"
+
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:53
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:54
+msgid "Model catalog"
+msgstr "模型目录"
+
+#: community/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py:39
+msgid "local model"
+msgstr "本地模型"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:43
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:48
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:35
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:43
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:24
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:44
+msgid "API domain name is invalid"
+msgstr "API域名无效"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:35
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:48
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:53
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:40
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:48
+msgid "The model does not exist, please download the model first"
+msgstr "模型不存在,请先下载模型"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 7B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。"
+"这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 13B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。"
+"这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 70B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。"
+"这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68
+msgid ""
+"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese "
+"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so "
+"that it has strong Chinese conversation capabilities."
+msgstr ""
+"由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-"
+"chat-hf进行LoRA微调,使其具备较强的中文对话能力。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72
+msgid ""
+"Meta Llama 3: The most capable public product LLM to date. 8 billion "
+"parameters."
+msgstr "Meta Llama 3:迄今为止最有能力的公开产品LLM。80亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76
+msgid ""
+"Meta Llama 3: The most capable public product LLM to date. 70 billion "
+"parameters."
+msgstr "Meta Llama 3:迄今为止最有能力的公开产品LLM。700亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80
+msgid ""
+"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 500 million parameters."
+msgstr ""
+"qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有"
+"显著增强。所有规模的模型都支持32768个tokens的上下文长度。5亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84
+msgid ""
+"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 1.8 billion parameters."
+msgstr ""
+"qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有"
+"显著增强。所有规模的模型都支持32768个tokens的上下文长度。18亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88
+msgid ""
+"Compared with previous versions, qwen 1.5 4b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"4 billion parameters."
+msgstr ""
+"qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
+"著增强。所有规模的模型都支持32768个tokens的上下文长度。40亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93
+msgid ""
+"Compared with previous versions, qwen 1.5 7b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"7 billion parameters."
+msgstr ""
+"qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
+"著增强。所有规模的模型都支持32768个tokens的上下文长度。70亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97
+msgid ""
+"Compared with previous versions, qwen 1.5 14b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"14 billion parameters."
+msgstr ""
+"qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
+"著增强。所有规模的模型都支持32768个tokens的上下文长度。140亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101
+msgid ""
+"Compared with previous versions, qwen 1.5 32b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"32 billion parameters."
+msgstr ""
+"qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
+"著增强。所有规模的模型都支持32768个tokens的上下文长度。320亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105
+msgid ""
+"Compared with previous versions, qwen 1.5 72b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"72 billion parameters."
+msgstr ""
+"qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
+"著增强。所有规模的模型都支持32768个tokens的上下文长度。720亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109
+msgid ""
+"Compared with previous versions, qwen 1.5 110b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 110 billion parameters."
+msgstr ""
+"qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有"
+"显著增强。所有规模的模型都支持32768个tokens的上下文长度。1100亿参数。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193
+msgid ""
+"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open "
+"model."
+msgstr "Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197
+msgid ""
+"A high-performance open embedding model with a large token context window."
+msgstr "一个具有大 tokens上下文窗口的高性能开放嵌入模型。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16
+msgid ""
+"The image generation endpoint allows you to create raw images based on text "
+"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 "
+"or 1792x1024 pixels."
+msgstr ""
+"图像生成端点允许您根据文本提示创建原始图像。使用 DALL·E 3 时,图像的尺寸可以"
+"为 1024x1024、1024x1792 或 1792x1024 像素。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
+msgid ""
+" \n"
+"By default, images are produced in standard quality, but with DALL·E 3 you "
+"can set quality: \"hd\" to enhance detail. Square, standard quality images "
+"are generated fastest.\n"
+" "
+msgstr ""
+"默认情况下,图像以标准质量生成,但使用 DALL·E 3 时,您可以设置质量:“hd”以增"
+"强细节。方形、标准质量的图像生成速度最快。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44
+msgid ""
+"You can use DALL·E 3 to request 1 image at a time (requesting more images by "
+"issuing parallel requests), or use DALL·E 2 with the n parameter to request "
+"up to 10 images at a time."
+msgstr ""
+"您可以使用 DALL·E 3 一次请求 1 个图像(通过发出并行请求来请求更多图像),或者"
+"使用带有 n 参数的 DALL·E 2 一次最多请求 10 个图像。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111
+msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments"
+msgstr "最新的gpt-3.5-turbo,随OpenAI调整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38
+msgid "Latest gpt-4, updated with OpenAI adjustments"
+msgstr "最新的gpt-4,随OpenAI调整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99
+msgid ""
+"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI "
+"adjustments"
+msgstr "最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102
+msgid ""
+"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI "
+"adjustments"
+msgstr "最新的gpt-4o-mini,比gpt-4o更便宜、更快,随OpenAI调整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46
+msgid "The latest gpt-4-turbo, updated with OpenAI adjustments"
+msgstr "最新的gpt-4-turbo,随OpenAI调整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49
+msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments"
+msgstr "最新的gpt-4-turbo-preview,随OpenAI调整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53
+msgid ""
+"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 "
+"tokens"
+msgstr "2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57
+msgid ""
+"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 "
+"tokens"
+msgstr "2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61
+msgid ""
+"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June "
+"13, 2024"
+msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65
+msgid ""
+"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens"
+msgstr "2024年5月13日的gpt-4o快照,支持上下文长度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69
+msgid ""
+"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 "
+"tokens"
+msgstr "2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72
+msgid ""
+"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 "
+"tokens"
+msgstr "2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75
+msgid ""
+"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 "
+"tokens"
+msgstr "2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63
+msgid "Tongyi Qianwen"
+msgstr "通义千问"
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:46
+msgid "Please provide server URL"
+msgstr "请提供服务器URL"
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:49
+msgid "Please provide the model"
+msgstr "请提供模型"
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:52
+msgid "Please provide the API Key"
+msgstr "请提供API密钥"
+
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58
+msgid "Tencent Cloud"
+msgstr "腾讯云"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:41
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:88
+#, python-brace-format
+msgid "{keys} is required"
+msgstr "{keys} 是必填项"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14
+msgid "painting style"
+msgstr "绘画风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14
+msgid "If not passed, the default value is 201 (Japanese anime style)"
+msgstr "如果未传递,则默认值为201(日本动漫风格)"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18
+msgid "Not limited to style"
+msgstr "不限于风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19
+msgid "ink painting"
+msgstr "水墨画"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20
+msgid "concept art"
+msgstr "概念艺术"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21
+msgid "Oil painting 1"
+msgstr "油画1"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22
+msgid "Oil Painting 2 (Van Gogh)"
+msgstr "油画2(梵高)"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23
+msgid "watercolor painting"
+msgstr "水彩画"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24
+msgid "pixel art"
+msgstr "像素画"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25
+msgid "impasto style"
+msgstr "厚涂风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26
+msgid "illustration"
+msgstr "插图"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27
+msgid "paper cut style"
+msgstr "剪纸风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28
+msgid "Impressionism 1 (Monet)"
+msgstr "印象派1(莫奈)"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29
+msgid "Impressionism 2"
+msgstr "印象派2"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31
+msgid "classical portraiture"
+msgstr "古典肖像画"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32
+msgid "black and white sketch"
+msgstr "黑白素描画"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33
+msgid "cyberpunk"
+msgstr "赛博朋克"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34
+msgid "science fiction style"
+msgstr "科幻风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35
+msgid "dark style"
+msgstr "暗黑风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37
+msgid "vaporwave"
+msgstr "蒸汽波"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38
+msgid "Japanese animation"
+msgstr "日系动漫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39
+msgid "monster style"
+msgstr "怪兽风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40
+msgid "Beautiful ancient style"
+msgstr "唯美古风"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41
+msgid "retro anime"
+msgstr "复古动漫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42
+msgid "Game cartoon hand drawing"
+msgstr "游戏卡通手绘"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43
+msgid "Universal realistic style"
+msgstr "通用写实风格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50
+msgid "Generate image resolution"
+msgstr "生成图像分辨率"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50
+msgid "If not transmitted, the default value is 768:768."
+msgstr "不传默认使用768:768。"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38
+msgid ""
+"The most effective version of the current hybrid model, the trillion-level "
+"parameter scale MOE-32K long article model. Reaching the absolute leading "
+"level on various benchmarks, with complex instructions and reasoning, "
+"complex mathematical capabilities, support for function call, and "
+"application focus optimization in fields such as multi-language translation, "
+"finance, law, and medical care"
+msgstr ""
+"当前混元模型中效果最优版本,万亿级参数规模 MOE-32K 长文模型。在各种 "
+"benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 "
+"functioncall,在多语言翻译、金融法律医疗等领域应用重点优化"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45
+msgid ""
+"A better routing strategy is adopted to simultaneously alleviate the "
+"problems of load balancing and expert convergence. For long articles, the "
+"needle-in-a-haystack index reaches 99.9%"
+msgstr ""
+"采用更优的路由策略,同时缓解了负载均衡和专家趋同的问题。长文方面,大海捞针指"
+"标达到99.9%"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51
+msgid ""
+"Upgraded to MOE structure, the context window is 256k, leading many open "
+"source models in multiple evaluation sets such as NLP, code, mathematics, "
+"industry, etc."
+msgstr ""
+"升级为 MOE 结构,上下文窗口为 256k ,在 NLP,代码,数学,行业等多项评测集上领"
+"先众多开源模型"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57
+msgid ""
+"Hunyuan's latest version of the role-playing model, a role-playing model "
+"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan "
+"model combined with the role-playing scene data set for additional training, "
+"and has better basic effects in role-playing scenes."
+msgstr ""
+"混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合"
+"角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63
+msgid ""
+"Hunyuan's latest MOE architecture FunctionCall model has been trained with "
+"high-quality FunctionCall data and has a context window of 32K, leading in "
+"multiple dimensions of evaluation indicators."
+msgstr ""
+"混元最新 MOE 架构 FunctionCall 模型,经过高质量的 FunctionCall 数据训练,上下"
+"文窗口达 32K,在多个维度的评测指标上处于领先。"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69
+msgid ""
+"Hunyuan's latest code generation model, after training the base model with "
+"200B high-quality code data, and iterating on high-quality SFT data for half "
+"a year, the context long window length has been increased to 8K, and it "
+"ranks among the top in the automatic evaluation indicators of code "
+"generation in the five major languages; the five major languages In the "
+"manual high-quality evaluation of 10 comprehensive code tasks that consider "
+"all aspects, the performance is in the first echelon."
+msgstr ""
+"混元最新代码生成模型,经过 200B 高质量代码数据增训基座模型,迭代半年高质量 "
+"SFT 数据训练,上下文长窗口长度增大到 8K,五大语言代码生成自动评测指标上位居前"
+"列;五大语言10项考量各方面综合代码任务人工高质量评测上,性能处于第一梯队"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77
+msgid ""
+"Tencent's Hunyuan Embedding interface can convert text into high-quality "
+"vector data. The vector dimension is 1024 dimensions."
+msgstr ""
+"腾讯混元 Embedding 接口,可以将文本转化为高质量的向量数据。向量维度为1024维。"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87
+msgid "Mixed element visual model"
+msgstr "混元视觉模型"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94
+msgid "Hunyuan graph model"
+msgstr "混元生图模型"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125
+msgid "Tencent Hunyuan"
+msgstr "腾讯混元"
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42
+msgid "Facebook’s 125M parameter model"
+msgstr "Facebook的125M参数模型"
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25
+msgid "BAAI’s 7B parameter model"
+msgstr "BAAI的7B参数模型"
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26
+msgid "BAAI’s 13B parameter mode"
+msgstr "BAAI的13B参数模型"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16
+msgid ""
+"If the gap between width, height and 512 is too large, the picture rendering "
+"effect will be poor and the probability of excessive delay will increase "
+"significantly. Recommended ratio and corresponding width and height before "
+"super score: width*height"
+msgstr ""
+"宽、高与512差距过大,则出图效果不佳、延迟过长概率显著增加。超分前建议比例及对"
+"应宽高:width*height"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29
+msgid "Universal female voice"
+msgstr "通用女声"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25
+msgid "Supernatural timbre-ZiZi 2.0"
+msgstr "超自然音色-梓梓2.0"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26
+msgid "Supernatural timbre-ZiZi"
+msgstr "超自然音色-梓梓"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27
+msgid "Supernatural sound-Ranran 2.0"
+msgstr "超自然音色-燃燃2.0"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28
+msgid "Supernatural sound-Ranran"
+msgstr "超自然音色-燃燃"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30
+msgid "Universal male voice"
+msgstr "通用男声"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33
+msgid "[0.2,3], the default is 1, usually one decimal place is enough"
+msgstr "[0.2,3],默认为1,通常保留一位小数即可"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88
+msgid ""
+"The user goes to the model inference page of Volcano Ark to create an "
+"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call "
+"it."
+msgstr ""
+"用户前往火山方舟的模型推理页面创建推理接入点,这里需要输入ep-xxxxxxxxxx-yyyy"
+"进行调用"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59
+msgid "Universal 2.0-Vincent Diagram"
+msgstr "通用2.0-文生图"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64
+msgid "Universal 2.0Pro-Vincent Chart"
+msgstr "通用2.0Pro-文生图"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69
+msgid "Universal 1.4-Vincent Chart"
+msgstr "通用1.4-文生图"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74
+msgid "Animation 1.3.0-Vincent Picture"
+msgstr "动漫1.3.0-文生图"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79
+msgid "Animation 1.3.1-Vincent Picture"
+msgstr "动漫1.3.1-文生图"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113
+msgid "volcano engine"
+msgstr "火山引擎"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:51
+#, python-brace-format
+msgid "{model_name} The model does not support"
+msgstr "{model_name} 模型不支持"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53
+msgid ""
+"ERNIE-Bot-4 is a large language model independently developed by Baidu. It "
+"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
+"content creation and generation."
+msgstr ""
+"ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、"
+"内容创作生成等能力。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27
+msgid ""
+"ERNIE-Bot is a large language model independently developed by Baidu. It "
+"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
+"content creation and generation."
+msgstr ""
+"ERNIE-Bot是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内"
+"容创作生成等能力。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30
+msgid ""
+"ERNIE-Bot-turbo is a large language model independently developed by Baidu. "
+"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, "
+"content creation and generation, and has a faster response speed."
+msgstr ""
+"ERNIE-Bot-turbo是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问"
+"答、内容创作生成等能力,响应速度更快。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33
+msgid ""
+"BLOOMZ-7B is a well-known large language model in the industry. It was "
+"developed and open sourced by BigScience and can output text in 46 languages "
+"and 13 programming languages."
+msgstr ""
+"BLOOMZ-7B是业内知名的大语言模型,由BigScience研发并开源,能够以46种语言和13种"
+"编程语言输出文本。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39
+msgid ""
+"Llama-2-13b-chat was developed by Meta AI and is open source. It performs "
+"well in scenarios such as coding, reasoning and knowledge application. "
+"Llama-2-13b-chat is a native open source version with balanced performance "
+"and effect, suitable for conversation scenarios."
+msgstr ""
+"Llama-2-13b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,"
+"Llama-2-13b-chat是性能与效果均衡的原生开源版本,适用于对话场景。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42
+msgid ""
+"Llama-2-70b-chat was developed by Meta AI and is open source. It performs "
+"well in scenarios such as coding, reasoning, and knowledge application. "
+"Llama-2-70b-chat is a native open source version with high-precision effects."
+msgstr ""
+"Llama-2-70b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,"
+"Llama-2-70b-chat是高精度效果的原生开源版本。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45
+msgid ""
+"The Chinese enhanced version developed by the Qianfan team based on "
+"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-"
+"EVAL."
+msgstr ""
+"千帆团队在Llama-2-7b基础上的中文增强版本,在CMMLU、C-EVAL等中文知识库上表现优"
+"异。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49
+msgid ""
+"Embedding-V1 is a text representation model based on Baidu Wenxin large "
+"model technology. It can convert text into a vector form represented by "
+"numerical values and can be used in text retrieval, information "
+"recommendation, knowledge mining and other scenarios. Embedding-V1 provides "
+"the Embeddings interface, which can generate corresponding vector "
+"representations based on input content. You can call this interface to input "
+"text into the model and obtain the corresponding vector representation for "
+"subsequent text processing and analysis."
+msgstr ""
+"Embedding-V1是一个基于百度文心大模型技术的文本表示模型,可以将文本转化为用数"
+"值表示的向量形式,用于文本检索、信息推荐、知识挖掘等场景。 Embedding-V1提供了"
+"Embeddings接口,可以根据输入内容生成对应的向量表示。您可以通过调用该接口,将"
+"文本输入到模型中,获取到对应的向量表示,从而进行后续的文本处理和分析。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66
+msgid "Thousand sails large model"
+msgstr "千帆大模型"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42
+msgid "Please outline this picture"
+msgstr "请描述这张图片"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15
+msgid "Speaker"
+msgstr "发音人"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16
+msgid ""
+"Speaker, optional value: Please go to the console to add a trial or purchase "
+"speaker. After adding, the speaker parameter value will be displayed."
+msgstr ""
+"发音人,可选值:请到控制台添加试用或购买发音人,添加后即显示发音人参数值"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21
+msgid "iFlytek Xiaoyan"
+msgstr "讯飞小燕"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22
+msgid "iFlytek Xujiu"
+msgstr "讯飞许久"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23
+msgid "iFlytek Xiaoping"
+msgstr "讯飞小萍"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24
+msgid "iFlytek Xiaojing"
+msgstr "讯飞小婧"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25
+msgid "iFlytek Xuxiaobao"
+msgstr "讯飞许小宝"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28
+msgid "Speech speed, optional value: [0-100], default is 50"
+msgstr "语速,可选值:[0-100],默认为50"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50
+msgid "Chinese and English recognition"
+msgstr "中英文识别"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66
+msgid "iFlytek Spark"
+msgstr "讯飞星火"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15
+msgid ""
+"The image generation endpoint allows you to create raw images based on text "
+"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or "
+"1792x1024 pixels."
+msgstr ""
+"图像生成端点允许您根据文本提示创建原始图像。图像的尺寸可以为 1024x1024、"
+"1024x1792 或 1792x1024 像素。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29
+msgid ""
+"By default, images are generated in standard quality, you can set quality: "
+"\"hd\" to enhance detail. Square, standard quality images are generated "
+"fastest."
+msgstr ""
+"默认情况下,图像以标准质量生成,您可以设置质量:“hd”以增强细节。方形、标准质"
+"量的图像生成速度最快。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42
+msgid ""
+"You can request 1 image at a time (requesting more images by making parallel "
+"requests), or up to 10 images at a time using the n parameter."
+msgstr ""
+"您可以一次请求 1 个图像(通过发出并行请求来请求更多图像),或者使用 n 参数一"
+"次最多请求 10 个图像。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20
+msgid "Chinese female"
+msgstr "中文女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21
+msgid "Chinese male"
+msgstr "中文男"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22
+msgid "Japanese male"
+msgstr "日语男"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23
+msgid "Cantonese female"
+msgstr "粤语女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24
+msgid "English female"
+msgstr "英文女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25
+msgid "English male"
+msgstr "英文男"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26
+msgid "Korean female"
+msgstr "韩语女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37
+msgid ""
+"Code Llama is a language model specifically designed for code generation."
+msgstr "Code Llama 是一个专门用于代码生成的语言模型。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44
+msgid ""
+" \n"
+"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, "
+"designed to perform specific tasks.\n"
+" "
+msgstr ""
+"Code Llama Instruct 是 Code Llama 的指令微调版本,专为执行特定任务而设计。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53
+msgid ""
+"Code Llama Python is a language model specifically designed for Python code "
+"generation."
+msgstr "Code Llama Python 是一个专门用于 Python 代码生成的语言模型。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60
+msgid ""
+"CodeQwen 1.5 is a language model for code generation with high performance."
+msgstr "CodeQwen 1.5 是一个用于代码生成的语言模型,具有较高的性能。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67
+msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5."
+msgstr "CodeQwen 1.5 Chat 是一个聊天模型版本的 CodeQwen 1.5。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74
+msgid "Deepseek is a large-scale language model with 13 billion parameters."
+msgstr "Deepseek Chat 是一个聊天模型版本的 Deepseek。"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16
+msgid ""
+"Image size, only cogview-3-plus supports this parameter. Optional range: "
+"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the "
+"default is 1024x1024."
+msgstr ""
+"图片尺寸,仅 cogview-3-plus 支持该参数。可选范围:"
+"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默认是"
+"1024x1024。"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34
+msgid ""
+"Have strong multi-modal understanding capabilities. Able to understand up to "
+"five images simultaneously and supports video content understanding"
+msgstr "具有强大的多模态理解能力。能够同时理解多达五张图像,并支持视频内容理解"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37
+msgid ""
+"Focus on single picture understanding. Suitable for scenarios requiring "
+"efficient image analysis"
+msgstr "专注于单图理解。适用于需要高效图像解析的场景"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40
+msgid ""
+"Focus on single picture understanding. Suitable for scenarios requiring "
+"efficient image analysis (free)"
+msgstr "专注于单图理解。适用于需要高效图像解析的场景(免费)"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46
+msgid ""
+"Quickly and accurately generate images based on user text descriptions. "
+"Resolution supports 1024x1024"
+msgstr "根据用户文字描述快速、精准生成图像。分辨率支持1024x1024"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49
+msgid ""
+"Generate high-quality images based on user text descriptions, supporting "
+"multiple image sizes"
+msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52
+msgid ""
+"Generate high-quality images based on user text descriptions, supporting "
+"multiple image sizes (free)"
+msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸(免费)"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75
+msgid "zhipu AI"
+msgstr "智谱 AI"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:32
+#: community/apps/setting/serializers/model_apply_serializers.py:37
+msgid "vector text"
+msgstr "向量文本"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:33
+msgid "vector text list"
+msgstr "向量文本列表"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:41
+msgid "text"
+msgstr "文本"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:42
+msgid "metadata"
+msgstr "元数据"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:47
+msgid "query"
+msgstr "查询"
+
+#: community/apps/setting/serializers/provider_serializers.py:79
+#: community/apps/setting/serializers/provider_serializers.py:83
+#: community/apps/setting/serializers/provider_serializers.py:130
+#: community/apps/setting/serializers/provider_serializers.py:176
+#: community/apps/setting/serializers/provider_serializers.py:190
+#: community/apps/setting/swagger_api/provide_api.py:30
+#: community/apps/setting/swagger_api/provide_api.py:54
+#: community/apps/setting/swagger_api/provide_api.py:55
+#: community/apps/setting/swagger_api/provide_api.py:87
+#: community/apps/setting/swagger_api/provide_api.py:88
+#: community/apps/setting/swagger_api/provide_api.py:170
+msgid "model name"
+msgstr "模型名称"
+
+#: community/apps/setting/serializers/provider_serializers.py:81
+#: community/apps/setting/serializers/provider_serializers.py:132
+#: community/apps/setting/serializers/provider_serializers.py:142
+#: community/apps/setting/serializers/provider_serializers.py:180
+#: community/apps/setting/swagger_api/provide_api.py:26
+#: community/apps/setting/swagger_api/provide_api.py:51
+#: community/apps/setting/swagger_api/provide_api.py:52
+#: community/apps/setting/swagger_api/provide_api.py:84
+#: community/apps/setting/swagger_api/provide_api.py:85
+#: community/apps/setting/swagger_api/provide_api.py:134
+#: community/apps/setting/swagger_api/provide_api.py:165
+msgid "model type"
+msgstr "模型类型"
+
+#: community/apps/setting/serializers/provider_serializers.py:85
+#: community/apps/setting/serializers/provider_serializers.py:178
+#: community/apps/setting/serializers/provider_serializers.py:402
+#: community/apps/setting/swagger_api/provide_api.py:35
+#: community/apps/setting/swagger_api/provide_api.py:57
+#: community/apps/setting/swagger_api/provide_api.py:58
+#: community/apps/setting/swagger_api/provide_api.py:79
+#: community/apps/setting/swagger_api/provide_api.py:80
+#: community/apps/setting/swagger_api/provide_api.py:105
+#: community/apps/setting/swagger_api/provide_api.py:129
+#: community/apps/setting/swagger_api/provide_api.py:160
+#: community/apps/setting/swagger_api/provide_api.py:179
+msgid "provider"
+msgstr "供应商"
+
+#: community/apps/setting/serializers/provider_serializers.py:87
+#: community/apps/setting/serializers/provider_serializers.py:134
+#: community/apps/setting/serializers/provider_serializers.py:182
+msgid "permission type"
+msgstr "权限类型"
+
+#: community/apps/setting/serializers/provider_serializers.py:89
+msgid "create user"
+msgstr "创建者"
+
+#: community/apps/setting/serializers/provider_serializers.py:138
+#: community/apps/setting/serializers/provider_serializers.py:186
+msgid "permissions only supportPUBLIC|PRIVATE"
+msgstr "权限类型只支持PUBLIC|PRIVATE"
+
+#: community/apps/setting/serializers/provider_serializers.py:145
+#: community/apps/setting/serializers/provider_serializers.py:196
+msgid "certification information"
+msgstr "认证信息"
+
+#: community/apps/setting/serializers/provider_serializers.py:193
+msgid "parameter configuration"
+msgstr "参数配置"
+
+#: community/apps/setting/serializers/provider_serializers.py:202
+#, python-brace-format
+msgid "Model name【{model_name}】already exists"
+msgstr "模型名称【{model_name}】已存在"
+
+#: community/apps/setting/serializers/system_setting.py:29
+#: community/apps/setting/swagger_api/system_setting.py:25
+#: community/apps/setting/swagger_api/system_setting.py:26
+#: community/apps/setting/swagger_api/system_setting.py:57
+#: community/apps/setting/swagger_api/system_setting.py:58
+msgid "SMTP host"
+msgstr "SMTP 主机"
+
+#: community/apps/setting/serializers/system_setting.py:30
+#: community/apps/setting/swagger_api/system_setting.py:28
+#: community/apps/setting/swagger_api/system_setting.py:29
+#: community/apps/setting/swagger_api/system_setting.py:60
+#: community/apps/setting/swagger_api/system_setting.py:61
+msgid "SMTP port"
+msgstr "SMTP 端口"
+
+#: community/apps/setting/serializers/system_setting.py:31
+#: community/apps/setting/serializers/system_setting.py:35
+#: community/apps/setting/swagger_api/system_setting.py:31
+#: community/apps/setting/swagger_api/system_setting.py:32
+#: community/apps/setting/swagger_api/system_setting.py:43
+#: community/apps/setting/swagger_api/system_setting.py:44
+#: community/apps/setting/swagger_api/system_setting.py:63
+#: community/apps/setting/swagger_api/system_setting.py:64
+#: community/apps/setting/swagger_api/system_setting.py:75
+#: community/apps/setting/swagger_api/system_setting.py:76
+msgid "Sender's email"
+msgstr "发件人邮箱"
+
+#: community/apps/setting/serializers/system_setting.py:32
+#: community/apps/setting/swagger_api/system_setting.py:34
+#: community/apps/setting/swagger_api/system_setting.py:35
+#: community/apps/setting/swagger_api/system_setting.py:66
+#: community/apps/setting/swagger_api/system_setting.py:67
+#: community/apps/users/serializers/user_serializers.py:72
+#: community/apps/users/serializers/user_serializers.py:112
+#: community/apps/users/serializers/user_serializers.py:143
+#: community/apps/users/serializers/user_serializers.py:211
+#: community/apps/users/serializers/user_serializers.py:293
+#: community/apps/users/serializers/user_serializers.py:346
+#: community/apps/users/serializers/user_serializers.py:671
+#: community/apps/users/serializers/user_serializers.py:703
+#: community/apps/users/serializers/user_serializers.py:704
+#: community/apps/users/serializers/user_serializers.py:743
+#: community/apps/users/serializers/user_serializers.py:763
+#: community/apps/users/serializers/user_serializers.py:764
+#: community/apps/users/views/user.py:109
+#: community/apps/users/views/user.py:110
+#: community/apps/users/views/user.py:111
+#: community/apps/users/views/user.py:112
+msgid "Password"
+msgstr "密码"
+
+#: community/apps/setting/serializers/system_setting.py:33
+#: community/apps/setting/swagger_api/system_setting.py:37
+#: community/apps/setting/swagger_api/system_setting.py:38
+#: community/apps/setting/swagger_api/system_setting.py:69
+#: community/apps/setting/swagger_api/system_setting.py:70
+msgid "Whether to enable TLS"
+msgstr "是否启用 TLS"
+
+#: community/apps/setting/serializers/system_setting.py:34
+#: community/apps/setting/swagger_api/system_setting.py:40
+#: community/apps/setting/swagger_api/system_setting.py:41
+#: community/apps/setting/swagger_api/system_setting.py:72
+#: community/apps/setting/swagger_api/system_setting.py:73
+msgid "Whether to enable SSL"
+msgstr "是否启用 SSL"
+
+#: community/apps/setting/serializers/system_setting.py:49
+msgid "Email verification failed"
+msgstr "邮箱验证失败"
+
+#: community/apps/setting/serializers/team_serializers.py:43
+#: community/apps/users/serializers/user_serializers.py:70
+#: community/apps/users/serializers/user_serializers.py:111
+#: community/apps/users/serializers/user_serializers.py:136
+#: community/apps/users/serializers/user_serializers.py:209
+#: community/apps/users/serializers/user_serializers.py:470
+#: community/apps/users/serializers/user_serializers.py:493
+#: community/apps/users/serializers/user_serializers.py:518
+#: community/apps/users/serializers/user_serializers.py:519
+#: community/apps/users/serializers/user_serializers.py:581
+#: community/apps/users/serializers/user_serializers.py:627
+#: community/apps/users/serializers/user_serializers.py:628
+#: community/apps/users/serializers/user_serializers.py:663
+#: community/apps/users/serializers/user_serializers.py:700
+#: community/apps/users/serializers/user_serializers.py:701
+msgid "Username"
+msgstr "用户名"
+
+#: community/apps/setting/serializers/team_serializers.py:44
+#: community/apps/users/serializers/user_serializers.py:131
+#: community/apps/users/serializers/user_serializers.py:210
+#: community/apps/users/serializers/user_serializers.py:226
+#: community/apps/users/serializers/user_serializers.py:256
+#: community/apps/users/serializers/user_serializers.py:287
+#: community/apps/users/serializers/user_serializers.py:343
+#: community/apps/users/serializers/user_serializers.py:356
+#: community/apps/users/serializers/user_serializers.py:438
+#: community/apps/users/serializers/user_serializers.py:471
+#: community/apps/users/serializers/user_serializers.py:494
+#: community/apps/users/serializers/user_serializers.py:520
+#: community/apps/users/serializers/user_serializers.py:582
+#: community/apps/users/serializers/user_serializers.py:629
+#: community/apps/users/serializers/user_serializers.py:658
+#: community/apps/users/serializers/user_serializers.py:702
+#: community/apps/users/serializers/user_serializers.py:713
+#: community/apps/users/serializers/user_serializers.py:734
+msgid "Email"
+msgstr "邮箱"
+
+#: community/apps/setting/serializers/team_serializers.py:47
+#: community/apps/setting/serializers/team_serializers.py:148
+#: community/apps/setting/serializers/team_serializers.py:256
+msgid "team id"
+msgstr "团队 id"
+
+#: community/apps/setting/serializers/team_serializers.py:48
+#: community/apps/setting/serializers/team_serializers.py:254
+#: community/apps/setting/serializers/team_serializers.py:324
+msgid "member id"
+msgstr "成员 id"
+
+#: community/apps/setting/serializers/team_serializers.py:54
+msgid "use"
+msgstr "使用"
+
+#: community/apps/setting/serializers/team_serializers.py:55
+msgid "manage"
+msgstr "管理"
+
+#: community/apps/setting/serializers/team_serializers.py:60
+msgid "Operation permissions USE, MANAGE permissions"
+msgstr "操作权限 USE, MANAGE 权限"
+
+#: community/apps/setting/serializers/team_serializers.py:63
+msgid "use permission"
+msgstr "使用权限"
+
+#: community/apps/setting/serializers/team_serializers.py:64
+msgid "use permission True|False"
+msgstr "使用权限 True|False"
+
+#: community/apps/setting/serializers/team_serializers.py:66
+msgid "manage permission"
+msgstr "管理权限"
+
+#: community/apps/setting/serializers/team_serializers.py:67
+msgid "manage permission True|False"
+msgstr "管理权限 True|False"
+
+#: community/apps/setting/serializers/team_serializers.py:73
+msgid "target id"
+msgstr "目标 id"
+
+#: community/apps/setting/serializers/team_serializers.py:82
+#: community/apps/setting/serializers/team_serializers.py:83
+msgid "dataset id/application id"
+msgstr "知识库 id/应用 id"
+
+#: community/apps/setting/serializers/team_serializers.py:105
+msgid "Non-existent application|knowledge base id["
+msgstr "应用|知识库 id[ 不存在"
+
+#: community/apps/setting/serializers/team_serializers.py:139
+#: community/apps/setting/serializers/team_serializers.py:140
+msgid "Permission data"
+msgstr "权限数据"
+
+#: community/apps/setting/serializers/team_serializers.py:157
+#: community/apps/setting/serializers/team_serializers.py:158
+msgid "user id list"
+msgstr "用户 id 列表"
+
+#: community/apps/setting/serializers/team_serializers.py:168
+#: community/apps/setting/serializers/team_serializers.py:169
+msgid "Username or email"
+msgstr "用户名或邮箱"
+
+#: community/apps/setting/serializers/team_serializers.py:217
+msgid "Username or email is required"
+msgstr "用户名或邮箱是必填项"
+
+#: community/apps/setting/serializers/team_serializers.py:221
+#: community/apps/users/serializers/user_serializers.py:800
+msgid "User does not exist"
+msgstr "用户不存在"
+
+#: community/apps/setting/serializers/team_serializers.py:224
+msgid "The current members already exist in the team, do not add them again."
+msgstr "当前成员已存在于团队中,无需再次添加。"
+
+#: community/apps/setting/serializers/team_serializers.py:248
+msgid "member list"
+msgstr "成员列表"
+
+#: community/apps/setting/serializers/team_serializers.py:263
+msgid "The member does not exist, please add a member first"
+msgstr "成员不存在,请先添加成员"
+
+#: community/apps/setting/serializers/team_serializers.py:297
+msgid "Administrator rights do not allow modification"
+msgstr "管理员权限不允许修改"
+
+#: community/apps/setting/serializers/team_serializers.py:311
+msgid "Unable to remove team admin"
+msgstr "不支持移除团队管理员"
+
+#: community/apps/setting/serializers/valid_serializers.py:32
+#: community/apps/users/serializers/user_serializers.py:190
+#: community/apps/users/serializers/user_serializers.py:777
+msgid ""
+"The community version supports up to 2 users. If you need more users, please "
+"contact us (https://fit2cloud.com/)."
+msgstr ""
+"社区版最多支持 2 个用户,如需拥有更多用户,请联系我们(https://"
+"fit2cloud.com/)。"
+
+#: community/apps/setting/serializers/valid_serializers.py:41
+#: community/apps/setting/swagger_api/valid_api.py:27
+msgid "check quantity"
+msgstr "检查数量"
+
+#: community/apps/setting/swagger_api/provide_api.py:43
+#: community/apps/setting/swagger_api/provide_api.py:44
+#: community/apps/setting/swagger_api/provide_api.py:71
+#: community/apps/setting/swagger_api/provide_api.py:72
+#: community/apps/setting/swagger_api/provide_api.py:190
+#: community/apps/setting/swagger_api/provide_api.py:191
+msgid "parameters required to call the function"
+msgstr "调用函数所需要的参数"
+
+#: community/apps/setting/swagger_api/provide_api.py:60
+#: community/apps/setting/swagger_api/provide_api.py:61
+#: community/apps/setting/swagger_api/provide_api.py:90
+#: community/apps/setting/swagger_api/provide_api.py:91
+msgid "model certificate information"
+msgstr "模型认证信息"
+
+#: community/apps/setting/swagger_api/provide_api.py:114
+#: community/apps/setting/swagger_api/provide_api.py:115
+msgid "model type description"
+msgstr "模型类型描述"
+
+#: community/apps/setting/swagger_api/provide_api.py:115
+msgid "large language model"
+msgstr "大语言模型"
+
+#: community/apps/setting/swagger_api/provide_api.py:116
+#: community/apps/setting/swagger_api/provide_api.py:117
+#: community/apps/setting/swagger_api/provide_api.py:147
+#: community/apps/setting/swagger_api/provide_api.py:148
+msgid "model type value"
+msgstr "模型类型值"
+
+#: community/apps/setting/swagger_api/provide_api.py:145
+#: community/apps/setting/swagger_api/provide_api.py:146
+msgid "model description"
+msgstr "模型描述"
+
+#: community/apps/setting/swagger_api/provide_api.py:184
+msgid "function that needs to be executed"
+msgstr "需要执行的函数"
+
+#: community/apps/setting/swagger_api/system_setting.py:19
+#: community/apps/setting/swagger_api/system_setting.py:20
+#: community/apps/setting/swagger_api/system_setting.py:51
+#: community/apps/setting/swagger_api/system_setting.py:52
+msgid "Email related parameters"
+msgstr "邮箱相关参数"
+
+#: community/apps/setting/swagger_api/valid_api.py:22
+msgid "Verification type: application|dataset|user"
+msgstr "认证类型:application|dataset|user"
+
+#: community/apps/setting/views/Team.py:27
+#: community/apps/setting/views/Team.py:28
+msgid "Get a list of team members"
+msgstr "获取团队成员列表"
+
+#: community/apps/setting/views/Team.py:30
+#: community/apps/setting/views/Team.py:40
+#: community/apps/setting/views/Team.py:54
+#: community/apps/setting/views/Team.py:68
+#: community/apps/setting/views/Team.py:80
+#: community/apps/setting/views/Team.py:92
+#: community/apps/users/serializers/user_serializers.py:198
+#: community/apps/users/serializers/user_serializers.py:791
+msgid "team"
+msgstr "团队成员"
+
+#: community/apps/setting/views/Team.py:37
+#: community/apps/setting/views/Team.py:38
+msgid "Add member"
+msgstr "添加成员"
+
+#: community/apps/setting/views/Team.py:51
+#: community/apps/setting/views/Team.py:52
+msgid "Add members in batches"
+msgstr "批量添加成员"
+
+#: community/apps/setting/views/Team.py:65
+#: community/apps/setting/views/Team.py:66
+msgid "Get team member permissions"
+msgstr "获取团队成员权限"
+
+#: community/apps/setting/views/Team.py:76
+#: community/apps/setting/views/Team.py:77
+msgid "Update team member permissions"
+msgstr "更新团队成员权限"
+
+#: community/apps/setting/views/Team.py:89
+#: community/apps/setting/views/Team.py:90
+msgid "Remove member"
+msgstr "移除成员"
+
+#: community/apps/setting/views/model.py:30
+#: community/apps/setting/views/model.py:31
+msgid "Create model"
+msgstr "创建模型"
+
+#: community/apps/setting/views/model.py:33
+#: community/apps/setting/views/model.py:45
+#: community/apps/setting/views/model.py:57
+#: community/apps/setting/views/model.py:74
+#: community/apps/setting/views/model.py:88
+#: community/apps/setting/views/model.py:103
+#: community/apps/setting/views/model.py:114
+#: community/apps/setting/views/model.py:129
+#: community/apps/setting/views/model.py:141
+#: community/apps/setting/views/model.py:151
+#: community/apps/setting/views/model.py:170
+#: community/apps/setting/views/model.py:180
+#: community/apps/setting/views/model.py:204
+#: community/apps/setting/views/model.py:219
+#: community/apps/setting/views/model.py:239
+#: community/apps/setting/views/model.py:257
+#: community/apps/setting/views/model_apply.py:26
+#: community/apps/setting/views/model_apply.py:36
+#: community/apps/setting/views/model_apply.py:46
+msgid "model"
+msgstr "模型设置"
+
+#: community/apps/setting/views/model.py:42
+#: community/apps/setting/views/model.py:43
+msgid "Download model, trial only with Ollama platform"
+msgstr "下载模型,仅支持 Ollama 平台试用"
+
+#: community/apps/setting/views/model.py:54
+#: community/apps/setting/views/model.py:55
+msgid "Get model list"
+msgstr "获取模型列表"
+
+#: community/apps/setting/views/model.py:71
+#: community/apps/setting/views/model.py:73
+msgid ""
+"Query model meta information, this interface does not carry authentication "
+"information"
+msgstr "查询模型元信息,该接口不携带认证信息"
+
+#: community/apps/setting/views/model.py:86
+#: community/apps/setting/views/model.py:87
+msgid "Pause model download"
+msgstr "下载模型暂停"
+
+#: community/apps/setting/views/model.py:111
+#: community/apps/setting/views/model.py:112
+msgid "Save model parameter form"
+msgstr "保存模型参数表单"
+
+#: community/apps/setting/views/model.py:126
+#: community/apps/setting/views/model.py:127
+msgid "Update model"
+msgstr "更新模型"
+
+#: community/apps/setting/views/model.py:138
+#: community/apps/setting/views/model.py:139
+msgid "Delete model"
+msgstr "删除模型"
+
+#: community/apps/setting/views/model.py:149
+#: community/apps/setting/views/model.py:150
+msgid "Query model details"
+msgstr "查询模型详情"
+
+#: community/apps/setting/views/model.py:166
+#: community/apps/setting/views/model.py:167
+msgid "Call the supplier function to obtain form data"
+msgstr "调用供应商函数,获取表单数据"
+
+#: community/apps/setting/views/model.py:178
+#: community/apps/setting/views/model.py:179
+msgid "Get a list of model suppliers"
+msgstr "获取模型供应商列表"
+
+#: community/apps/setting/views/model.py:200
+#: community/apps/setting/views/model.py:201
+msgid "Get a list of model types"
+msgstr "获取模型类型列表"
+
+#: community/apps/setting/views/model.py:215
+#: community/apps/setting/views/model.py:216
+#: community/apps/setting/views/model.py:236
+#: community/apps/setting/views/model.py:254
+#: community/apps/setting/views/model.py:255
+msgid "Get the model creation form"
+msgstr "获取模型创建表单"
+
+#: community/apps/setting/views/model.py:235
+msgid "Get model default parameters"
+msgstr "获取模型默认参数"
+
+#: community/apps/setting/views/model_apply.py:23
+#: community/apps/setting/views/model_apply.py:24
+#: community/apps/setting/views/model_apply.py:33
+#: community/apps/setting/views/model_apply.py:34
+msgid "Vectorization documentation"
+msgstr "向量化文档"
+
+#: community/apps/setting/views/model_apply.py:43
+#: community/apps/setting/views/model_apply.py:44
+msgid "Reorder documents"
+msgstr "重排序文档"
+
+#: community/apps/setting/views/system_setting.py:29
+#: community/apps/setting/views/system_setting.py:30
+msgid "Create or update email settings"
+msgstr "创建或更新邮箱设置"
+
+#: community/apps/setting/views/system_setting.py:31
+#: community/apps/setting/views/system_setting.py:45
+#: community/apps/setting/views/system_setting.py:57
+msgid "Email settings"
+msgstr "邮箱设置"
+
+#: community/apps/setting/views/system_setting.py:41
+#: community/apps/setting/views/system_setting.py:42
+msgid "Test email settings"
+msgstr "测试邮箱设置"
+
+#: community/apps/setting/views/system_setting.py:54
+#: community/apps/setting/views/system_setting.py:55
+msgid "Get email settings"
+msgstr "获取邮箱设置"
+
+#: community/apps/setting/views/valid.py:26
+#: community/apps/setting/views/valid.py:27
+msgid "Get verification results"
+msgstr "获取认证结果"
+
+#: community/apps/users/serializers/user_serializers.py:62
+#: community/apps/users/serializers/user_serializers.py:63
+msgid "System version number"
+msgstr "系统版本号"
+
+#: community/apps/users/serializers/user_serializers.py:141
+#: community/apps/users/serializers/user_serializers.py:669
+msgid "Username must be 6-20 characters long"
+msgstr "用户名必须是 6-20 个字符长"
+
+#: community/apps/users/serializers/user_serializers.py:148
+#: community/apps/users/serializers/user_serializers.py:156
+#: community/apps/users/serializers/user_serializers.py:676
+#: community/apps/users/serializers/user_serializers.py:748
+msgid ""
+"The password must be 6-20 characters long and must be a combination of "
+"letters, numbers, and special characters."
+msgstr "密码必须是 6-20 个字符长,且必须是字母、数字和特殊字符的组合"
+
+#: community/apps/users/serializers/user_serializers.py:151
+#: community/apps/users/serializers/user_serializers.py:212
+#: community/apps/users/serializers/user_serializers.py:213
+#: community/apps/users/serializers/user_serializers.py:300
+#: community/apps/users/serializers/user_serializers.py:347
+#: community/apps/users/serializers/user_serializers.py:348
+#: community/apps/users/serializers/user_serializers.py:749
+#: community/apps/users/serializers/user_serializers.py:765
+#: community/apps/users/serializers/user_serializers.py:766
+msgid "Confirm Password"
+msgstr "确认密码"
+
+#: community/apps/users/serializers/user_serializers.py:158
+#: community/apps/users/serializers/user_serializers.py:214
+#: community/apps/users/serializers/user_serializers.py:215
+#: community/apps/users/serializers/user_serializers.py:229
+#: community/apps/users/serializers/user_serializers.py:257
+#: community/apps/users/serializers/user_serializers.py:258
+#: community/apps/users/serializers/user_serializers.py:291
+#: community/apps/users/serializers/user_serializers.py:344
+#: community/apps/users/serializers/user_serializers.py:345
+#: community/apps/users/views/user.py:107
+#: community/apps/users/views/user.py:108
+msgid "Verification code"
+msgstr "验证码"
+
+#: community/apps/users/serializers/user_serializers.py:232
+#: community/apps/users/serializers/user_serializers.py:259
+#: community/apps/users/serializers/user_serializers.py:360
+#: community/apps/users/serializers/user_serializers.py:439
+msgid "Type"
+msgstr "类型"
+
+#: community/apps/users/serializers/user_serializers.py:236
+#: community/apps/users/serializers/user_serializers.py:362
+msgid "The type only supports register|reset_password"
+msgstr "该类型仅支持 register|reset_password"
+
+#: community/apps/users/serializers/user_serializers.py:266
+msgid "Is it successful"
+msgstr "是否成功"
+
+#: community/apps/users/serializers/user_serializers.py:268
+msgid "Error message"
+msgstr "错误信息"
+
+#: community/apps/users/serializers/user_serializers.py:280
+msgid "language only support:"
+msgstr "语言只支持:"
+
+#: community/apps/users/serializers/user_serializers.py:298
+#: community/apps/users/serializers/user_serializers.py:305
+#: community/apps/users/serializers/user_serializers.py:754
+msgid ""
+"The confirmation password must be 6-20 characters long and must be a "
+"combination of letters, numbers, and special characters."
+msgstr "确认密码长度6-20个字符,必须字母、数字、特殊字符组合"
+
+#: community/apps/users/serializers/user_serializers.py:380
+#, python-brace-format
+msgid "Do not send emails again within {seconds} seconds"
+msgstr "{seconds} 秒内请勿重复发送邮件"
+
+#: community/apps/users/serializers/user_serializers.py:410
+msgid ""
+"The email service has not been set up. Please contact the administrator to "
+"set up the email service in [Email Settings]."
+msgstr "邮箱服务未设置,请联系管理员在【邮箱设置】中设置邮箱服务"
+
+#: community/apps/users/serializers/user_serializers.py:421
+#, python-brace-format
+msgid "【Intelligent knowledge base question and answer system-{action}】"
+msgstr "【智能知识库问答系统-{action}】"
+
+#: community/apps/users/serializers/user_serializers.py:422
+#: community/apps/users/views/user.py:194
+#: community/apps/users/views/user.py:195
+msgid "User registration"
+msgstr "用户注册"
+
+#: community/apps/users/serializers/user_serializers.py:422
+#: community/apps/users/views/user.py:212
+#: community/apps/users/views/user.py:213
+#: community/apps/users/views/user.py:301
+#: community/apps/users/views/user.py:302
+msgid "Change password"
+msgstr "修改密码"
+
+#: community/apps/users/serializers/user_serializers.py:474
+#: community/apps/users/serializers/user_serializers.py:475
+msgid "Permissions"
+msgstr "权限列表"
+
+#: community/apps/users/serializers/user_serializers.py:509
+#: community/apps/users/serializers/user_serializers.py:610
+#: community/apps/users/serializers/user_serializers.py:618
+msgid "Email or username"
+msgstr "邮箱或用户名"
+
+#: community/apps/users/serializers/user_serializers.py:560
+msgid "All"
+msgstr "全部"
+
+#: community/apps/users/serializers/user_serializers.py:561
+msgid "Me"
+msgstr "我的"
+
+#: community/apps/users/serializers/user_serializers.py:583
+#: community/apps/users/serializers/user_serializers.py:680
+#: community/apps/users/serializers/user_serializers.py:705
+#: community/apps/users/serializers/user_serializers.py:719
+#: community/apps/users/serializers/user_serializers.py:736
+msgid "Phone"
+msgstr "手机号"
+
+#: community/apps/users/serializers/user_serializers.py:587
+msgid "Source"
+msgstr "来源"
+
+#: community/apps/users/serializers/user_serializers.py:588
+#: community/apps/users/serializers/user_serializers.py:678
+#: community/apps/users/serializers/user_serializers.py:706
+#: community/apps/users/serializers/user_serializers.py:717
+#: community/apps/users/serializers/user_serializers.py:735
+msgid "Name"
+msgstr "名字"
+
+#: community/apps/users/serializers/user_serializers.py:727
+msgid "Email is already in use"
+msgstr "邮箱已被使用"
+
+#: community/apps/users/serializers/user_serializers.py:808
+msgid "Unable to delete administrator"
+msgstr "不能删除管理员"
+
+#: community/apps/users/serializers/user_serializers.py:845
+msgid "Cannot modify administrator status"
+msgstr "不能修改管理员状态"
+
+#: community/apps/users/views/user.py:37 community/apps/users/views/user.py:38
+msgid "Get MaxKB related information"
+msgstr "获取 MaxKB 相关信息"
+
+#: community/apps/users/views/user.py:40
+msgid "System parameters"
+msgstr "系统参数"
+
+#: community/apps/users/views/user.py:50 community/apps/users/views/user.py:51
+msgid "Get current user information"
+msgstr "获取当前用户信息"
+
+#: community/apps/users/views/user.py:63 community/apps/users/views/user.py:64
+msgid "Get user list"
+msgstr "获取用户列表"
+
+#: community/apps/users/views/user.py:67 community/apps/users/views/user.py:90
+#: community/apps/users/views/user.py:116
+#: community/apps/users/views/user.py:136
+#: community/apps/users/views/user.py:152
+#: community/apps/users/views/user.py:178
+#: community/apps/users/views/user.py:199
+#: community/apps/users/views/user.py:217
+#: community/apps/users/views/user.py:234
+#: community/apps/users/views/user.py:249
+#: community/apps/users/views/user.py:373
+msgid "User"
+msgstr "用户"
+
+#: community/apps/users/views/user.py:79 community/apps/users/views/user.py:80
+msgid "Switch Language"
+msgstr "切换语言"
+
+#: community/apps/users/views/user.py:101
+#: community/apps/users/views/user.py:102
+msgid "Modify current user password"
+msgstr "修改当前用户密码"
+
+#: community/apps/users/views/user.py:125
+msgid "Failed to change password"
+msgstr "修改密码失败"
+
+#: community/apps/users/views/user.py:133
+#: community/apps/users/views/user.py:134
+msgid "Send email to current user"
+msgstr "给当前用户发送邮件"
+
+#: community/apps/users/views/user.py:149
+#: community/apps/users/views/user.py:150
+msgid "Sign out"
+msgstr "登出"
+
+#: community/apps/users/views/user.py:205
+msgid "Registration successful"
+msgstr "注册成功"
+
+#: community/apps/users/views/user.py:229
+#: community/apps/users/views/user.py:230
+msgid "Check whether the verification code is correct"
+msgstr "检查验证码是否正确"
+
+#: community/apps/users/views/user.py:244
+#: community/apps/users/views/user.py:245
+msgid "Send email"
+msgstr "发送邮件"
+
+#: community/apps/users/views/user.py:262
+#: community/apps/users/views/user.py:263
+msgid "Add user"
+msgstr "添加用户"
+
+#: community/apps/users/views/user.py:266
+#: community/apps/users/views/user.py:282
+#: community/apps/users/views/user.py:306
+#: community/apps/users/views/user.py:324
+#: community/apps/users/views/user.py:338
+#: community/apps/users/views/user.py:354
+msgid "User management"
+msgstr "用户管理"
+
+#: community/apps/users/views/user.py:280
+#: community/apps/users/views/user.py:281
+msgid "Get user paginated list"
+msgstr "获取用户分页列表"
+
+#: community/apps/users/views/user.py:320
+#: community/apps/users/views/user.py:321
+msgid "Delete user"
+msgstr "删除用户"
+
+#: community/apps/users/views/user.py:334
+#: community/apps/users/views/user.py:335
+msgid "Get user information"
+msgstr "获取用户信息"
+
+#: community/apps/users/views/user.py:349
+#: community/apps/users/views/user.py:350
+msgid "Update user information"
+msgstr "更新用户信息"
+
+#: community/apps/users/views/user.py:369
+#: community/apps/users/views/user.py:370
+msgid "Get user list by type"
+msgstr "按类型获取用户列表"
+
+#~ msgid "MaxKB table template.csv"
+#~ msgstr "MaxKB表格模版.csv"
+
+#~ msgid "MaxKB table template.xlsx"
+#~ msgstr "MaxKB表格模版.xlsx"
+
+msgid "Fail"
+msgstr "失败"
+
+msgid "Menu"
+msgstr "操作菜单"
+
+msgid "Operate"
+msgstr "操作"
+
+msgid "Operate user"
+msgstr "操作用户"
+
+msgid "Ip Address"
+msgstr "IP地址"
+
+msgid "API Details"
+msgstr "API详情"
+
+msgid "Operate Time"
+msgstr "操作时间"
+
+msgid "System Settings/API Key"
+msgstr "系统 API Key"
+
+msgid "Appearance Settings"
+msgstr "外观设置"
+
+msgid "Conversation Log"
+msgstr "对话日志"
+
+msgid "login authentication"
+msgstr "登录认证"
+
+msgid "Paragraph"
+msgstr "段落"
+
+msgid "Batch generate related"
+msgstr "分段生成问题"
+
+msgid "Application access"
+msgstr "应用接入"
+
+msgid "Add internal function"
+msgstr "添加内置函数"
+
+msgid "Batch generate related documents"
+msgstr "批量生成问题"
+
+msgid "No permission to use this function {name}"
+msgstr "无权使用此模型 {name}"
+
+msgid "Function {name} is unavailable"
+msgstr "函数{name} 不可用"
+
+msgid "Field: {name} Type: {_type} Value: {value} Type error"
+msgstr "字段: {name} 类型: {_type} 值: {value} 类型错误"
+
+msgid "Field: {name} Type: {_type} Value: {value} Unsupported types"
+msgstr "字段: {name} 类型: {_type} 值: {value} 不支持的类型"
+
+msgid "Field: {name} No value set"
+msgstr "字段: {name} 未设置值"
+
+msgid "Generate related"
+msgstr "生成问题"
+
+msgid "Obtain graphical captcha"
+msgstr "获取图形验证码"
+
+msgid "Captcha code error or expiration"
+msgstr "验证码错误或过期"
+
+msgid "captcha"
+msgstr "验证码"
\ No newline at end of file
diff --git a/apps/locales/zh_Hant/LC_MESSAGES/django.po b/apps/locales/zh_Hant/LC_MESSAGES/django.po
new file mode 100644
index 00000000000..8bf746a89c8
--- /dev/null
+++ b/apps/locales/zh_Hant/LC_MESSAGES/django.po
@@ -0,0 +1,7675 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR , YEAR.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2025-03-20 14:22+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language-Team: LANGUAGE \n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: apps/xpack/auth/user_key.py:26
+#: apps/xpack/serializers/license_serializers.py:96
+#: apps/xpack/serializers/license_tools.py:109
+msgid "The license is invalid"
+msgstr "License 無效"
+
+#: apps/xpack/auth/user_key.py:32 apps/xpack/auth/user_key.py:34
+msgid "secret_key is invalid"
+msgstr "secret key無效"
+
+#: apps/xpack/middleware/swagger_middleware.py:19
+msgid "The license has not been uploaded or the license has expired"
+msgstr "License 未上傳或 License 已過期"
+
+#: apps/xpack/serializers/application_setting_serializer.py:20
+msgid "theme color"
+msgstr "主題顏色"
+
+#: apps/xpack/serializers/application_setting_serializer.py:22
+msgid "header font color"
+msgstr "頭部字體顏色"
+
+#: apps/xpack/serializers/application_setting_serializer.py:26
+msgid "float location type"
+msgstr "浮窗位置類型"
+
+#: apps/xpack/serializers/application_setting_serializer.py:27
+msgid "float location value"
+msgstr "浮窗位置值"
+
+#: apps/xpack/serializers/application_setting_serializer.py:31
+msgid "float location x"
+msgstr "浮窗位置 x"
+
+#: apps/xpack/serializers/application_setting_serializer.py:32
+msgid "float location y"
+msgstr "浮窗位置 y"
+
+#: apps/xpack/serializers/application_setting_serializer.py:36
+#: apps/xpack/swagger_api/application_setting_api.py:23
+msgid "show source"
+msgstr "是否顯示來源"
+
+#: apps/xpack/serializers/application_setting_serializer.py:37
+#: community/apps/application/serializers/application_serializers.py:354
+#: community/apps/application/swagger_api/application_api.py:169
+#: community/apps/application/swagger_api/application_api.py:170
+#: community/apps/users/serializers/user_serializers.py:273
+#: community/apps/users/views/user.py:85 community/apps/users/views/user.py:86
+msgid "language"
+msgstr "語言"
+
+#: apps/xpack/serializers/application_setting_serializer.py:38
+#: apps/xpack/swagger_api/application_setting_api.py:30
+msgid "show history"
+msgstr "是否顯示歷史記錄"
+
+#: apps/xpack/serializers/application_setting_serializer.py:39
+#: apps/xpack/swagger_api/application_setting_api.py:37
+msgid "draggable"
+msgstr "是否可拖動"
+
+#: apps/xpack/serializers/application_setting_serializer.py:40
+#: apps/xpack/swagger_api/application_setting_api.py:44
+msgid "show guide"
+msgstr "是否顯示引導圖"
+
+#: apps/xpack/serializers/application_setting_serializer.py:41
+#: apps/xpack/swagger_api/application_setting_api.py:51
+msgid "avatar"
+msgstr "頭像"
+
+#: apps/xpack/serializers/application_setting_serializer.py:42
+msgid "avatar url"
+msgstr "頭像地址"
+
+#: apps/xpack/serializers/application_setting_serializer.py:43
+#: apps/xpack/swagger_api/application_setting_api.py:86
+msgid "user avatar"
+msgstr "用戶頭像"
+
+#: apps/xpack/serializers/application_setting_serializer.py:44
+msgid "user avatar url"
+msgstr "用戶頭像地址"
+
+#: apps/xpack/serializers/application_setting_serializer.py:45
+#: apps/xpack/swagger_api/application_setting_api.py:58
+msgid "float icon"
+msgstr "浮窗圖標"
+
+#: apps/xpack/serializers/application_setting_serializer.py:46
+msgid "float icon url"
+msgstr "浮窗圖標地址"
+
+#: apps/xpack/serializers/application_setting_serializer.py:47
+#: apps/xpack/swagger_api/application_setting_api.py:65
+msgid "disclaimer"
+msgstr "免責聲明"
+
+#: apps/xpack/serializers/application_setting_serializer.py:48
+#: apps/xpack/swagger_api/application_setting_api.py:72
+msgid "disclaimer value"
+msgstr "免責聲明的值"
+
+#: apps/xpack/serializers/application_setting_serializer.py:70
+#: apps/xpack/serializers/dataset_lark_serializer.py:373
+#: community/apps/dataset/serializers/dataset_serializers.py:548
+msgid "application id"
+msgstr "應用 id"
+
+#: apps/xpack/serializers/application_setting_serializer.py:96
+#: apps/xpack/serializers/platform_serializer.py:83
+#: apps/xpack/serializers/platform_serializer.py:105
+#: apps/xpack/serializers/platform_serializer.py:174
+#: apps/xpack/serializers/platform_serializer.py:185
+#: community/apps/application/serializers/application_serializers.py:1237
+#: community/apps/application/serializers/chat_message_serializers.py:424
+#: community/apps/application/serializers/chat_serializers.py:294
+#: community/apps/application/serializers/chat_serializers.py:396
+msgid "Application does not exist"
+msgstr "應用不存在"
+
+#: apps/xpack/serializers/application_setting_serializer.py:116
+msgid "Float location field type error"
+msgstr "浮窗位置字段類型錯誤"
+
+#: apps/xpack/serializers/application_setting_serializer.py:122
+msgid "Custom theme field type error"
+msgstr "自定義主題字段類型錯誤"
+
+#: apps/xpack/serializers/auth_config_serializer.py:19
+msgid "LDAP server cannot be empty"
+msgstr "LDAP 服務器不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:20
+msgid "Base DN cannot be empty"
+msgstr "Base DN 不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:21
+msgid "Password cannot be empty"
+msgstr "密碼不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:22
+msgid "OU cannot be empty"
+msgstr "OU 不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:23
+msgid "LDAP filter cannot be empty"
+msgstr "LDAP 過濾器不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:24
+msgid "LDAP mapping cannot be empty"
+msgstr "LDAP 映射不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:29
+msgid "Authorization address cannot be empty"
+msgstr "授權地址不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:31
+msgid "Token address cannot be empty"
+msgstr "令牌地址不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:33
+msgid "User information address cannot be empty"
+msgstr "用戶信息地址不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:34
+msgid "Scope cannot be empty"
+msgstr "Scope 不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:35
+msgid "Client ID cannot be empty"
+msgstr "Client ID 不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:36
+msgid "Client secret cannot be empty"
+msgstr "Client secret 不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:38
+msgid "Redirect address cannot be empty"
+msgstr "重定向地址不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:39
+msgid "Field mapping cannot be empty"
+msgstr "字段映射不能爲空"
+
+#: apps/xpack/serializers/auth_config_serializer.py:166
+#: apps/xpack/serializers/qr_login/qr_login.py:33
+#: community/apps/users/serializers/user_serializers.py:89
+msgid "The user has been disabled, please contact the administrator!"
+msgstr "用戶已被禁用,請聯繫管理員!"
+
+#: apps/xpack/serializers/cas.py:32
+msgid "HttpClient query failed: "
+msgstr "HttpClient 查詢失敗:"
+
+#: apps/xpack/serializers/cas.py:56
+msgid "CAS authentication failed"
+msgstr "CAS 認證失敗"
+
+#: apps/xpack/serializers/channel/chat_manage.py:76
+#: apps/xpack/serializers/channel/chat_manage.py:134
+msgid ""
+"Sorry, no relevant content was found. Please re-describe your problem or "
+"provide more information. "
+msgstr "抱歉,沒有找到相關內容。請重新描述您的問題或提供更多信息。"
+
+#: apps/xpack/serializers/channel/chat_manage.py:82
+msgid "Think: "
+msgstr "思考過程: "
+
+#: apps/xpack/serializers/channel/chat_manage.py:85
+#: apps/xpack/serializers/channel/chat_manage.py:87
+msgid "AI reply: "
+msgstr "AI 回覆: "
+
+#: apps/xpack/serializers/channel/chat_manage.py:298
+msgid "Thinking, please wait a moment!"
+msgstr "正在思考中,請稍等......"
+
+#: apps/xpack/serializers/channel/ding_talk.py:19
+#: apps/xpack/serializers/channel/wechat.py:89
+#: apps/xpack/serializers/channel/wechat.py:130
+#: apps/xpack/serializers/channel/wecom.py:76
+#: apps/xpack/serializers/channel/wecom.py:259
+msgid "The corresponding platform configuration was not found"
+msgstr "對應的平臺配置未找到"
+
+#: apps/xpack/serializers/channel/ding_talk.py:27
+#: apps/xpack/serializers/channel/feishu.py:112
+msgid "Currently only text messages are supported"
+msgstr "目前只支持文本消息"
+
+#: apps/xpack/serializers/channel/ding_talk.py:91
+#: apps/xpack/serializers/channel/wechat.py:161
+#: apps/xpack/serializers/channel/wecom.py:189
+msgid "Image download failed, check network"
+msgstr "圖片下載失敗,檢查網絡"
+
+#: apps/xpack/serializers/channel/ding_talk.py:92
+#: apps/xpack/serializers/channel/wechat.py:159
+#: apps/xpack/serializers/channel/wecom.py:185
+msgid "Please analyze the content of the image."
+msgstr "請分析圖片內容。"
+
+#: apps/xpack/serializers/channel/ding_talk.py:95
+#, python-brace-format
+msgid "DingTalk application: {user}"
+msgstr "釘釘應用:{user}"
+
+#: apps/xpack/serializers/channel/ding_talk.py:106
+#: apps/xpack/serializers/channel/ding_talk.py:151
+msgid "Content generated by AI"
+msgstr "內容由 AI 生成"
+
+#: apps/xpack/serializers/channel/feishu.py:87
+#: apps/xpack/serializers/channel/feishu.py:107
+msgid "Lark application: "
+msgstr "飛書應用:"
+
+#: apps/xpack/serializers/channel/slack.py:116
+#| msgid "The corresponding platform configuration was not found"
+msgid "The corresponding platform configuration for Slack was not found"
+msgstr "未找到Slack的對應平臺配置"
+
+#: apps/xpack/serializers/channel/slack.py:206
+msgid "Thinking..."
+msgstr "思考中..."
+
+#: apps/xpack/serializers/channel/slack.py:321
+msgid "Invalid json format."
+msgstr "json格式無效。"
+
+#: apps/xpack/serializers/channel/slack.py:327
+#| msgid "Invalid access_token"
+msgid "Invalid Slack request"
+msgstr "Slack請求無效"
+
+#: apps/xpack/serializers/channel/slack.py:335
+#| msgid "DingTalk application: {user}"
+msgid "Slack application: {user}"
+msgstr "Slack 應用:{user}"
+
+#: apps/xpack/serializers/channel/slack.py:471
+msgid "Stop"
+msgstr "停止"
+
+#: apps/xpack/serializers/channel/wechat.py:141
+#, python-brace-format
+msgid "WeChat Official Account: {account}"
+msgstr "微信公衆號:{account}"
+
+#: apps/xpack/serializers/channel/wechat.py:148
+#: apps/xpack/serializers/channel/wecom.py:171
+#: apps/xpack/serializers/channel/wecom.py:175
+msgid ""
+"The app does not enable the speech-to-text function or the speech-to-text "
+"function fails."
+msgstr "應用未開啓語音轉文字功能或語音轉文字功能失敗。"
+
+#: apps/xpack/serializers/channel/wechat.py:187
+msgid "Message types not supported yet"
+msgstr "暫時不支持該類型的消息"
+
+#: apps/xpack/serializers/channel/wechat.py:194
+msgid "Welcome to subscribe"
+msgstr "歡迎訂閱"
+
+#: apps/xpack/serializers/channel/wecom.py:84
+msgid "Enterprise WeChat user: "
+msgstr "企業微信用戶:"
+
+#: apps/xpack/serializers/channel/wecom.py:95
+msgid "Enterprise WeChat customer service: "
+msgstr "企業微信客服:"
+
+#: apps/xpack/serializers/channel/wecom.py:132
+#: apps/xpack/serializers/channel/wecom.py:148
+msgid "This type of message is not supported yet"
+msgstr "暫時不支持該類型的消息"
+
+#: apps/xpack/serializers/channel/wecom.py:254
+msgid "Signature missing"
+msgstr "簽名缺失"
+
+#: apps/xpack/serializers/channel/wecom.py:266
+#: apps/xpack/serializers/channel/wecom.py:273
+#, python-brace-format
+msgid "An error occurred while processing the GET request {e}"
+msgstr "GET 請求處理時發生錯誤 {e}"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:58
+#: community/apps/dataset/serializers/dataset_serializers.py:82
+#: community/apps/dataset/serializers/dataset_serializers.py:214
+#: community/apps/dataset/serializers/dataset_serializers.py:295
+#: community/apps/dataset/serializers/dataset_serializers.py:296
+#: community/apps/dataset/serializers/dataset_serializers.py:357
+#: community/apps/dataset/serializers/dataset_serializers.py:358
+#: community/apps/dataset/serializers/dataset_serializers.py:502
+#: community/apps/dataset/serializers/dataset_serializers.py:503
+#: community/apps/dataset/serializers/dataset_serializers.py:568
+#: community/apps/dataset/serializers/dataset_serializers.py:607
+#: community/apps/dataset/serializers/dataset_serializers.py:701
+#: community/apps/dataset/serializers/dataset_serializers.py:933
+#: community/apps/dataset/serializers/dataset_serializers.py:934
+#: community/apps/dataset/serializers/document_serializers.py:816
+#: community/apps/function_lib/serializers/function_lib_serializer.py:141
+#: community/apps/function_lib/serializers/function_lib_serializer.py:186
+#: community/apps/function_lib/serializers/function_lib_serializer.py:203
+#: community/apps/function_lib/serializers/function_lib_serializer.py:262
+#: community/apps/setting/serializers/provider_serializers.py:76
+#: community/apps/setting/serializers/provider_serializers.py:127
+#: community/apps/setting/serializers/provider_serializers.py:174
+#: community/apps/setting/serializers/provider_serializers.py:256
+#: community/apps/setting/serializers/provider_serializers.py:277
+#: community/apps/setting/serializers/provider_serializers.py:301
+#: community/apps/setting/serializers/team_serializers.py:42
+#: community/apps/users/serializers/user_serializers.py:272
+msgid "user id"
+msgstr "用戶 id"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:61
+#: apps/xpack/serializers/dataset_lark_serializer.py:112
+#: apps/xpack/serializers/dataset_lark_serializer.py:113
+#: apps/xpack/serializers/dataset_lark_serializer.py:367
+#: community/apps/dataset/serializers/dataset_serializers.py:137
+#: community/apps/dataset/serializers/dataset_serializers.py:201
+#: community/apps/dataset/serializers/dataset_serializers.py:221
+#: community/apps/dataset/serializers/dataset_serializers.py:244
+#: community/apps/dataset/serializers/dataset_serializers.py:273
+#: community/apps/dataset/serializers/dataset_serializers.py:274
+#: community/apps/dataset/serializers/dataset_serializers.py:291
+#: community/apps/dataset/serializers/dataset_serializers.py:292
+#: community/apps/dataset/serializers/dataset_serializers.py:319
+#: community/apps/dataset/serializers/dataset_serializers.py:353
+#: community/apps/dataset/serializers/dataset_serializers.py:354
+#: community/apps/dataset/serializers/dataset_serializers.py:382
+#: community/apps/dataset/serializers/dataset_serializers.py:383
+#: community/apps/dataset/serializers/dataset_serializers.py:498
+#: community/apps/dataset/serializers/dataset_serializers.py:499
+#: community/apps/dataset/serializers/dataset_serializers.py:527
+#: community/apps/dataset/serializers/dataset_serializers.py:528
+#: community/apps/dataset/serializers/dataset_serializers.py:542
+#: community/apps/dataset/serializers/dataset_serializers.py:907
+#: community/apps/dataset/serializers/dataset_serializers.py:908
+#: community/apps/dataset/serializers/dataset_serializers.py:929
+#: community/apps/dataset/serializers/dataset_serializers.py:930
+msgid "dataset name"
+msgstr "知識庫名稱"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:63
+#: apps/xpack/serializers/dataset_lark_serializer.py:114
+#: apps/xpack/serializers/dataset_lark_serializer.py:115
+#: apps/xpack/serializers/dataset_lark_serializer.py:369
+#: community/apps/dataset/serializers/dataset_serializers.py:142
+#: community/apps/dataset/serializers/dataset_serializers.py:206
+#: community/apps/dataset/serializers/dataset_serializers.py:226
+#: community/apps/dataset/serializers/dataset_serializers.py:249
+#: community/apps/dataset/serializers/dataset_serializers.py:278
+#: community/apps/dataset/serializers/dataset_serializers.py:279
+#: community/apps/dataset/serializers/dataset_serializers.py:293
+#: community/apps/dataset/serializers/dataset_serializers.py:294
+#: community/apps/dataset/serializers/dataset_serializers.py:324
+#: community/apps/dataset/serializers/dataset_serializers.py:355
+#: community/apps/dataset/serializers/dataset_serializers.py:356
+#: community/apps/dataset/serializers/dataset_serializers.py:384
+#: community/apps/dataset/serializers/dataset_serializers.py:385
+#: community/apps/dataset/serializers/dataset_serializers.py:500
+#: community/apps/dataset/serializers/dataset_serializers.py:501
+#: community/apps/dataset/serializers/dataset_serializers.py:529
+#: community/apps/dataset/serializers/dataset_serializers.py:530
+#: community/apps/dataset/serializers/dataset_serializers.py:544
+#: community/apps/dataset/serializers/dataset_serializers.py:909
+#: community/apps/dataset/serializers/dataset_serializers.py:910
+#: community/apps/dataset/serializers/dataset_serializers.py:931
+#: community/apps/dataset/serializers/dataset_serializers.py:932
+msgid "dataset description"
+msgstr "知識庫描述"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:65
+#: apps/xpack/serializers/dataset_lark_serializer.py:118
+#: apps/xpack/serializers/dataset_lark_serializer.py:377
+msgid "app id"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:66
+#: apps/xpack/serializers/dataset_lark_serializer.py:119
+#: apps/xpack/serializers/dataset_lark_serializer.py:120
+#: apps/xpack/serializers/dataset_lark_serializer.py:378
+msgid "app secret"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:67
+#: apps/xpack/serializers/dataset_lark_serializer.py:121
+#: apps/xpack/serializers/dataset_lark_serializer.py:122
+#: apps/xpack/serializers/dataset_lark_serializer.py:132
+#: apps/xpack/serializers/dataset_lark_serializer.py:165
+#: apps/xpack/serializers/dataset_lark_serializer.py:379
+msgid "folder token"
+msgstr ""
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:69
+#: apps/xpack/serializers/dataset_lark_serializer.py:116
+#: apps/xpack/serializers/dataset_lark_serializer.py:117
+#: community/apps/dataset/serializers/dataset_serializers.py:231
+#: community/apps/dataset/serializers/dataset_serializers.py:254
+#: community/apps/dataset/serializers/dataset_serializers.py:330
+#: community/apps/dataset/serializers/dataset_serializers.py:386
+#: community/apps/dataset/serializers/dataset_serializers.py:387
+#: community/apps/dataset/serializers/dataset_serializers.py:531
+#: community/apps/dataset/serializers/dataset_serializers.py:532
+msgid "embedding mode"
+msgstr "向量模型"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:79
+#: apps/xpack/serializers/dataset_lark_serializer.py:389
+msgid "Network error or folder token error!"
+msgstr "網絡錯誤或資料夾token錯誤!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:87
+#: apps/xpack/serializers/dataset_lark_serializer.py:444
+#: community/apps/dataset/serializers/dataset_serializers.py:424
+#: community/apps/dataset/serializers/dataset_serializers.py:476
+#: community/apps/dataset/serializers/dataset_serializers.py:865
+msgid "Knowledge base name duplicate!"
+msgstr "知識庫名稱重複!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:130
+#: apps/xpack/serializers/dataset_lark_serializer.py:164
+#: apps/xpack/serializers/dataset_lark_serializer.py:201
+#: apps/xpack/serializers/dataset_lark_serializer.py:221
+#: apps/xpack/serializers/dataset_lark_serializer.py:346
+#: apps/xpack/serializers/dataset_lark_serializer.py:363
+#: community/apps/common/swagger_api/common_api.py:68
+#: community/apps/common/swagger_api/common_api.py:69
+#: community/apps/dataset/serializers/dataset_serializers.py:84
+#: community/apps/dataset/serializers/dataset_serializers.py:93
+#: community/apps/dataset/serializers/dataset_serializers.py:605
+#: community/apps/dataset/serializers/dataset_serializers.py:688
+#: community/apps/dataset/serializers/dataset_serializers.py:699
+#: community/apps/dataset/serializers/dataset_serializers.py:955
+#: community/apps/dataset/serializers/document_serializers.py:169
+#: community/apps/dataset/serializers/document_serializers.py:286
+#: community/apps/dataset/serializers/document_serializers.py:407
+#: community/apps/dataset/serializers/document_serializers.py:573
+#: community/apps/dataset/serializers/document_serializers.py:1055
+#: community/apps/dataset/serializers/document_serializers.py:1216
+#: community/apps/dataset/serializers/paragraph_serializers.py:96
+#: community/apps/dataset/serializers/paragraph_serializers.py:162
+#: community/apps/dataset/serializers/paragraph_serializers.py:195
+#: community/apps/dataset/serializers/paragraph_serializers.py:196
+#: community/apps/dataset/serializers/paragraph_serializers.py:208
+#: community/apps/dataset/serializers/paragraph_serializers.py:266
+#: community/apps/dataset/serializers/paragraph_serializers.py:285
+#: community/apps/dataset/serializers/paragraph_serializers.py:302
+#: community/apps/dataset/serializers/paragraph_serializers.py:459
+#: community/apps/dataset/serializers/paragraph_serializers.py:567
+#: community/apps/dataset/serializers/paragraph_serializers.py:638
+#: community/apps/dataset/serializers/paragraph_serializers.py:647
+#: community/apps/dataset/serializers/paragraph_serializers.py:715
+#: community/apps/dataset/serializers/paragraph_serializers.py:716
+#: community/apps/dataset/serializers/paragraph_serializers.py:732
+#: community/apps/dataset/serializers/problem_serializers.py:87
+#: community/apps/dataset/serializers/problem_serializers.py:112
+#: community/apps/dataset/serializers/problem_serializers.py:135
+#: community/apps/dataset/serializers/problem_serializers.py:192
+#: community/apps/dataset/swagger_api/problem_api.py:28
+#: community/apps/dataset/swagger_api/problem_api.py:29
+#: community/apps/dataset/swagger_api/problem_api.py:77
+#: community/apps/dataset/swagger_api/problem_api.py:96
+#: community/apps/dataset/swagger_api/problem_api.py:149
+#: community/apps/dataset/swagger_api/problem_api.py:177
+msgid "dataset id"
+msgstr "知識庫 id"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:145
+#: apps/xpack/serializers/dataset_lark_serializer.py:146
+#: apps/xpack/serializers/dataset_lark_serializer.py:212
+#: community/apps/dataset/serializers/document_serializers.py:812
+#: community/apps/dataset/serializers/document_serializers.py:813
+#: community/apps/setting/swagger_api/provide_api.py:22
+#: community/apps/setting/swagger_api/provide_api.py:48
+#: community/apps/setting/swagger_api/provide_api.py:49
+#: community/apps/setting/swagger_api/provide_api.py:76
+#: community/apps/setting/swagger_api/provide_api.py:77
+#: community/apps/setting/swagger_api/provide_api.py:143
+#: community/apps/setting/swagger_api/provide_api.py:144
+msgid "name"
+msgstr "名稱"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:147
+#: apps/xpack/serializers/dataset_lark_serializer.py:148
+#: apps/xpack/serializers/dataset_lark_serializer.py:211
+#: community/apps/application/serializers/application_serializers.py:257
+msgid "token"
+msgstr "token"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:149
+#: apps/xpack/serializers/dataset_lark_serializer.py:150
+#: apps/xpack/serializers/dataset_lark_serializer.py:210
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:26
+#: community/apps/dataset/serializers/document_serializers.py:229
+#: community/apps/function_lib/serializers/function_lib_serializer.py:72
+#: community/apps/function_lib/swagger_api/function_lib_api.py:92
+#: community/apps/function_lib/swagger_api/function_lib_api.py:138
+#: community/apps/function_lib/swagger_api/function_lib_api.py:184
+#: community/apps/setting/serializers/team_serializers.py:59
+#: community/apps/setting/serializers/team_serializers.py:74
+#: community/apps/setting/serializers/team_serializers.py:85
+#: community/apps/setting/serializers/valid_serializers.py:37
+msgid "type"
+msgstr "類型"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:151
+#: apps/xpack/serializers/dataset_lark_serializer.py:152
+#| msgid "id does not exist"
+msgid "is exist"
+msgstr "ID 不存在"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:173
+#: apps/xpack/serializers/dataset_lark_serializer.py:230
+#: apps/xpack/task/sync.py:120
+#| msgid "Knowledge base id"
+msgid "Knowledge base not found!"
+msgstr "知識庫不存在!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:185
+#: apps/xpack/serializers/dataset_lark_serializer.py:252
+msgid "Failed to get lark document list!"
+msgstr "獲取飛書檔案清單失敗!"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:262
+#: community/apps/common/swagger_api/common_api.py:70
+#: community/apps/common/swagger_api/common_api.py:71
+#: community/apps/dataset/serializers/document_serializers.py:293
+#: community/apps/dataset/serializers/document_serializers.py:386
+#: community/apps/dataset/serializers/document_serializers.py:490
+#: community/apps/dataset/serializers/document_serializers.py:572
+#: community/apps/dataset/serializers/document_serializers.py:581
+#: community/apps/dataset/serializers/document_serializers.py:586
+#: community/apps/dataset/serializers/document_serializers.py:854
+#: community/apps/dataset/serializers/document_serializers.py:982
+#: community/apps/dataset/serializers/document_serializers.py:1191
+#: community/apps/dataset/serializers/paragraph_serializers.py:98
+#: community/apps/dataset/serializers/paragraph_serializers.py:167
+#: community/apps/dataset/serializers/paragraph_serializers.py:212
+#: community/apps/dataset/serializers/paragraph_serializers.py:271
+#: community/apps/dataset/serializers/paragraph_serializers.py:286
+#: community/apps/dataset/serializers/paragraph_serializers.py:303
+#: community/apps/dataset/serializers/paragraph_serializers.py:426
+#: community/apps/dataset/serializers/paragraph_serializers.py:431
+#: community/apps/dataset/serializers/paragraph_serializers.py:462
+#: community/apps/dataset/serializers/paragraph_serializers.py:570
+#: community/apps/dataset/serializers/paragraph_serializers.py:642
+#: community/apps/dataset/serializers/paragraph_serializers.py:650
+#: community/apps/dataset/serializers/paragraph_serializers.py:682
+#: community/apps/dataset/serializers/paragraph_serializers.py:717
+#: community/apps/dataset/serializers/paragraph_serializers.py:718
+#: community/apps/dataset/serializers/paragraph_serializers.py:733
+#: community/apps/dataset/serializers/problem_serializers.py:58
+#: community/apps/dataset/swagger_api/problem_api.py:64
+msgid "document id"
+msgstr "文檔 id"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:269
+#: apps/xpack/serializers/dataset_lark_serializer.py:289
+#: community/apps/dataset/serializers/document_serializers.py:497
+#: community/apps/dataset/serializers/document_serializers.py:593
+#: community/apps/dataset/serializers/document_serializers.py:1197
+msgid "document id not exist"
+msgstr "文檔 id 不存在"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:271
+#| msgid "Synchronization is only supported for web site types"
+msgid "Synchronization is only supported for lark documents"
+msgstr "僅支持飛書文檔的同步"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:374
+#: community/apps/dataset/serializers/dataset_serializers.py:549
+#: community/apps/dataset/serializers/dataset_serializers.py:914
+#: community/apps/dataset/serializers/dataset_serializers.py:915
+msgid "application id list"
+msgstr "應用 id 列表"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:416
+#: community/apps/dataset/serializers/dataset_serializers.py:175
+#: community/apps/dataset/serializers/dataset_serializers.py:837
+#: community/apps/function_lib/serializers/function_lib_serializer.py:125
+#: community/apps/function_lib/swagger_api/function_lib_api.py:119
+#: community/apps/function_lib/swagger_api/function_lib_api.py:120
+#: community/apps/function_lib/swagger_api/function_lib_api.py:165
+#: community/apps/function_lib/swagger_api/function_lib_api.py:166
+#: community/apps/setting/swagger_api/provide_api.py:81
+msgid "permission"
+msgstr "權限"
+
+#: apps/xpack/serializers/dataset_lark_serializer.py:463
+#: community/apps/dataset/serializers/dataset_serializers.py:884
+#, python-brace-format
+msgid "Unknown application id {dataset_id}, cannot be associated"
+msgstr "未知的應用id {dataset_id},無法關聯"
+
+#: apps/xpack/serializers/license_serializers.py:52
+msgid "license file"
+msgstr "License 文件"
+
+#: apps/xpack/serializers/license_tools.py:134
+msgid "License usage limit exceeded."
+msgstr "超出許可證使用限制。"
+
+#: apps/xpack/serializers/license_tools.py:158
+msgid "The network is busy, try again later."
+msgstr "網絡繁忙,請稍後再試。"
+
+#: apps/xpack/serializers/oauth2.py:79 apps/xpack/serializers/oauth2.py:82
+msgid "Failed to obtain user information"
+msgstr "獲取用戶信息失敗"
+
+#: apps/xpack/serializers/operate_log.py:36
+#: community/apps/application/serializers/application_statistics_serializers.py:27
+#: community/apps/application/serializers/chat_serializers.py:116
+#: community/apps/application/swagger_api/application_statistics_api.py:26
+msgid "Start time"
+msgstr "開始時間"
+
+#: apps/xpack/serializers/operate_log.py:37
+#: community/apps/application/serializers/application_statistics_serializers.py:28
+#: community/apps/application/serializers/chat_serializers.py:117
+#: community/apps/application/swagger_api/application_statistics_api.py:31
+#: community/apps/application/swagger_api/chat_api.py:270
+msgid "End time"
+msgstr "結束時間"
+
+#: apps/xpack/serializers/operate_log.py:38
+#: apps/xpack/swagger_api/operate_log.py:17
+#: apps/xpack/swagger_api/operate_log.py:18
+#: apps/xpack/swagger_api/operate_log.py:45
+#: apps/xpack/swagger_api/operate_log.py:46
+msgid "menu"
+msgstr "選單"
+
+#: apps/xpack/serializers/operate_log.py:39
+#: apps/xpack/swagger_api/operate_log.py:20
+#: apps/xpack/swagger_api/operate_log.py:21
+#: apps/xpack/swagger_api/operate_log.py:48
+#: apps/xpack/swagger_api/operate_log.py:49
+#| msgid "Temperature"
+msgid "operate"
+msgstr "操作"
+
+#: apps/xpack/serializers/operate_log.py:40
+#: apps/xpack/swagger_api/operate_log.py:51
+#: apps/xpack/swagger_api/operate_log.py:52
+#| msgid "user id"
+msgid "user"
+msgstr "用戶"
+
+#: apps/xpack/serializers/operate_log.py:41
+#: apps/xpack/swagger_api/operate_log.py:54
+#: apps/xpack/swagger_api/operate_log.py:55
+#: community/apps/dataset/serializers/document_serializers.py:417
+msgid "status"
+msgstr "狀態"
+
+#: apps/xpack/serializers/operate_log.py:42
+#: apps/xpack/swagger_api/operate_log.py:57
+#: apps/xpack/swagger_api/operate_log.py:58
+
+#| msgid "Forum address"
+msgid "ip_address"
+msgstr "論壇地址"
+
+#: apps/xpack/serializers/platform_serializer.py:14
+msgid "app_id is required"
+msgstr "app_id 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:15
+msgid "app_secret is required"
+msgstr "app_secret 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:16
+msgid "token is required"
+msgstr "token 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:17
+msgid "callback_url is required"
+msgstr "回調地址是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:23
+#: apps/xpack/serializers/platform_serializer.py:32
+msgid "App ID is required"
+msgstr "App ID 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:24
+#: apps/xpack/serializers/platform_source_serializer.py:24
+msgid "Agent ID is required"
+msgstr "Agent ID 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:25
+msgid "Secret is required"
+msgstr "Secret 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:26
+msgid "Token is required"
+msgstr "Token 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:28
+#: apps/xpack/serializers/platform_serializer.py:36
+#: apps/xpack/serializers/platform_serializer.py:42
+#: apps/xpack/serializers/platform_serializer.py:48
+#: apps/xpack/serializers/platform_source_serializer.py:19
+msgid "Callback URL is required"
+msgstr "Callback URL 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:33
+#: apps/xpack/serializers/platform_source_serializer.py:18
+msgid "App Secret is required"
+msgstr "App Secret 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:35
+msgid "Verification Token is required"
+msgstr "Verification Token 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:40
+msgid "Client ID is required"
+msgstr "Client ID 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:41
+msgid "Client Secret is required"
+msgstr "Client Secret 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:46
+#| msgid "Client Secret is required"
+msgid "Signing Secret is required"
+msgstr "Signing Secret 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:47
+#| msgid "Token is required"
+msgid "Bot User Token is required"
+msgstr "Bot User Token 是必填項"
+
+#: apps/xpack/serializers/platform_serializer.py:68
+msgid "Check if the fields are correct"
+msgstr "檢查字段是否正確"
+
+#: apps/xpack/serializers/platform_serializer.py:114
+#: apps/xpack/views/platform.py:85 apps/xpack/views/platform.py:101
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:13
+#: community/apps/application/serializers/application_serializers.py:335
+#: community/apps/application/serializers/application_serializers.py:581
+#: community/apps/application/serializers/application_serializers.py:696
+#: community/apps/application/serializers/application_serializers.py:791
+#: community/apps/application/serializers/application_serializers.py:1230
+#: community/apps/application/serializers/application_serializers.py:1272
+#: community/apps/application/serializers/application_statistics_serializers.py:26
+#: community/apps/application/serializers/application_version_serializers.py:35
+#: community/apps/application/serializers/application_version_serializers.py:59
+#: community/apps/application/serializers/chat_message_serializers.py:207
+#: community/apps/application/serializers/chat_message_serializers.py:270
+#: community/apps/application/serializers/chat_serializers.py:77
+#: community/apps/application/serializers/chat_serializers.py:102
+#: community/apps/application/serializers/chat_serializers.py:119
+#: community/apps/application/serializers/chat_serializers.py:287
+#: community/apps/application/serializers/chat_serializers.py:363
+#: community/apps/application/serializers/chat_serializers.py:440
+#: community/apps/application/swagger_api/application_api.py:87
+#: community/apps/application/swagger_api/application_api.py:101
+#: community/apps/application/swagger_api/application_api.py:112
+#: community/apps/application/swagger_api/application_api.py:143
+#: community/apps/application/swagger_api/application_api.py:392
+#: community/apps/application/swagger_api/application_api.py:413
+#: community/apps/application/swagger_api/application_api.py:424
+#: community/apps/application/swagger_api/application_statistics_api.py:21
+#: community/apps/application/swagger_api/application_version_api.py:42
+#: community/apps/application/swagger_api/application_version_api.py:56
+#: community/apps/application/swagger_api/chat_api.py:23
+#: community/apps/application/swagger_api/chat_api.py:33
+#: community/apps/application/swagger_api/chat_api.py:167
+#: community/apps/application/swagger_api/chat_api.py:168
+#: community/apps/application/swagger_api/chat_api.py:199
+#: community/apps/application/swagger_api/chat_api.py:222
+#: community/apps/application/swagger_api/chat_api.py:249
+#: community/apps/application/swagger_api/chat_api.py:281
+#: community/apps/application/swagger_api/chat_api.py:350
+#: community/apps/application/swagger_api/chat_api.py:410
+#: community/apps/application/swagger_api/chat_api.py:427
+#: community/apps/application/swagger_api/chat_api.py:460
+#: community/apps/application/views/chat_views.py:477
+msgid "Application ID"
+msgstr "應用 ID"
+
+#: apps/xpack/serializers/platform_serializer.py:116
+msgid "Platform type, for example: wechat"
+msgstr "平臺類型,例如:wechat"
+
+#: apps/xpack/serializers/platform_serializer.py:125
+#: apps/xpack/serializers/platform_serializer.py:126
+msgid "Platform type"
+msgstr "平臺類型"
+
+#: apps/xpack/serializers/platform_serializer.py:128
+msgid "Status"
+msgstr "狀態"
+
+#: apps/xpack/serializers/platform_serializer.py:138
+#: apps/xpack/serializers/platform_serializer.py:139
+msgid "Configuration information"
+msgstr "配置信息"
+
+#: apps/xpack/serializers/platform_serializer.py:191
+#, python-brace-format
+msgid "The platform configuration corresponding to {type} was not found"
+msgstr "平臺配置 {type} 未找到"
+
+#: apps/xpack/serializers/platform_source_serializer.py:23
+#: apps/xpack/serializers/platform_source_serializer.py:32
+msgid "Corp ID is required"
+msgstr "Corp ID 是必填項"
+
+#: apps/xpack/serializers/platform_source_serializer.py:28
+#: apps/xpack/serializers/platform_source_serializer.py:33
+msgid "App Key is required"
+msgstr "App Key 是必填項"
+
+#: apps/xpack/serializers/platform_source_serializer.py:78
+msgid "Configuration information is wrong and failed to save"
+msgstr "配置信息錯誤,保存失敗"
+
+#: apps/xpack/serializers/platform_source_serializer.py:104
+msgid "Connection failed"
+msgstr "連接失敗"
+
+#: apps/xpack/serializers/platform_source_serializer.py:123
+msgid "Platform does not exist"
+msgstr "平臺不存在"
+
+#: apps/xpack/serializers/platform_source_serializer.py:134
+#| msgid "Unsupported file format"
+msgid "Unsupported platform type"
+msgstr "不支持的平臺類型"
+
+#: apps/xpack/serializers/qr_login/qr_login.py:28
+msgid "Team"
+msgstr "團隊成员"
+
+#: apps/xpack/serializers/system_params_serializers.py:63
+msgid "theme"
+msgstr "主題"
+
+#: apps/xpack/serializers/system_params_serializers.py:70
+msgid "website icon"
+msgstr "網站圖標"
+
+#: apps/xpack/serializers/system_params_serializers.py:77
+msgid "login logo"
+msgstr "登錄logo"
+
+#: apps/xpack/serializers/system_params_serializers.py:84
+msgid "Login background image"
+msgstr "登錄背景圖"
+
+#: apps/xpack/serializers/system_params_serializers.py:91
+msgid "website title"
+msgstr "網站標題"
+
+#: apps/xpack/serializers/system_params_serializers.py:98
+msgid "website slogan"
+msgstr "網站標語"
+
+#: apps/xpack/serializers/system_params_serializers.py:105
+msgid "Show user manual"
+msgstr "是否顯示用戶手冊"
+
+#: apps/xpack/serializers/system_params_serializers.py:112
+msgid "User manual address"
+msgstr "用戶手冊地址"
+
+#: apps/xpack/serializers/system_params_serializers.py:119
+msgid "Show forum"
+msgstr "是否顯示論壇"
+
+#: apps/xpack/serializers/system_params_serializers.py:126
+msgid "Forum address"
+msgstr "論壇地址"
+
+#: apps/xpack/serializers/system_params_serializers.py:133
+msgid "Show project"
+msgstr "是否顯示項目"
+
+#: apps/xpack/serializers/system_params_serializers.py:140
+msgid "Project address"
+msgstr "項目地址"
+
+#: apps/xpack/serializers/tools.py:58
+#, python-brace-format
+msgid ""
+"Thinking about 【{question}】...If you want me to continue answering, please "
+"reply {trigger_message}"
+msgstr ""
+"思考中【{question}】...如果您希望我繼續回答,請回復“ {trigger_message} ”。"
+
+#: apps/xpack/serializers/tools.py:158
+msgid ""
+"\n"
+" ------------\n"
+"[To be continued, reply \"Continue to answer the question]"
+msgstr ""
+"\n"
+" ------------\n"
+"【未完待續,回覆“問題繼續回答】"
+
+#: apps/xpack/serializers/tools.py:238
+#, python-brace-format
+msgid ""
+"To be continued, reply \"{trigger_message}\" to continue answering the "
+"question"
+msgstr "【未完待續,回覆“{trigger_message}” 或 問題繼續回答】"
+
+#: apps/xpack/swagger_api/application_setting_api.py:79
+msgid "Custom theme {theme_color: , header_font_color: }"
+msgstr "自定義主題 {theme_color:, header_font_color: }"
+
+#: apps/xpack/swagger_api/application_setting_api.py:93
+msgid "Float location {top: 0, left: 0}"
+msgstr "浮窗位置 {top: 0, left: 0}"
+
+#: apps/xpack/swagger_api/application_setting_api.py:101
+#: apps/xpack/swagger_api/application_setting_api.py:102
+#: apps/xpack/swagger_api/auth_api.py:10 apps/xpack/swagger_api/auth_api.py:11
+#: apps/xpack/swagger_api/auth_api.py:81 apps/xpack/swagger_api/auth_api.py:82
+msgid "Authentication configuration"
+msgstr "認證配置"
+
+#: apps/xpack/swagger_api/application_setting_api.py:106
+#: apps/xpack/swagger_api/application_setting_api.py:107
+#: apps/xpack/swagger_api/auth_api.py:15 apps/xpack/swagger_api/auth_api.py:16
+#: apps/xpack/swagger_api/auth_api.py:30 apps/xpack/swagger_api/auth_api.py:87
+#: apps/xpack/swagger_api/auth_api.py:88 apps/xpack/views/auth.py:27
+#: apps/xpack/views/auth.py:28
+msgid "Authentication type"
+msgstr "認證類型"
+
+#: apps/xpack/swagger_api/application_setting_api.py:109
+#: apps/xpack/swagger_api/application_setting_api.py:110
+#: apps/xpack/swagger_api/auth_api.py:18 apps/xpack/swagger_api/auth_api.py:19
+#: apps/xpack/swagger_api/auth_api.py:93 apps/xpack/swagger_api/auth_api.py:94
+msgid "Configuration"
+msgstr "配置"
+
+#: apps/xpack/swagger_api/application_setting_api.py:112
+#: apps/xpack/swagger_api/application_setting_api.py:113
+#: apps/xpack/swagger_api/auth_api.py:21 apps/xpack/swagger_api/auth_api.py:22
+#: community/apps/common/swagger_api/common_api.py:72
+#: community/apps/common/swagger_api/common_api.py:73
+#: community/apps/dataset/serializers/document_serializers.py:819
+#: community/apps/dataset/serializers/document_serializers.py:820
+#: community/apps/dataset/serializers/document_serializers.py:838
+#: community/apps/dataset/serializers/document_serializers.py:839
+#: community/apps/dataset/serializers/paragraph_serializers.py:57
+#: community/apps/dataset/serializers/paragraph_serializers.py:71
+#: community/apps/dataset/serializers/paragraph_serializers.py:719
+#: community/apps/dataset/serializers/paragraph_serializers.py:720
+#: community/apps/dataset/swagger_api/problem_api.py:130
+#: community/apps/function_lib/serializers/function_lib_serializer.py:110
+#: community/apps/function_lib/serializers/function_lib_serializer.py:129
+#: community/apps/function_lib/serializers/function_lib_serializer.py:139
+#: community/apps/function_lib/swagger_api/function_lib_api.py:121
+#: community/apps/function_lib/swagger_api/function_lib_api.py:122
+#: community/apps/function_lib/swagger_api/function_lib_api.py:167
+#: community/apps/function_lib/swagger_api/function_lib_api.py:168
+#: community/apps/setting/serializers/team_serializers.py:46
+#: community/apps/users/serializers/user_serializers.py:473
+#: community/apps/users/serializers/user_serializers.py:496
+#: community/apps/users/serializers/user_serializers.py:584
+#: community/apps/users/serializers/user_serializers.py:585
+#: community/apps/users/serializers/user_serializers.py:721
+#: community/apps/users/serializers/user_serializers.py:737
+#: community/apps/users/serializers/user_serializers.py:738
+msgid "Is active"
+msgstr "是否可用"
+
+#: apps/xpack/swagger_api/auth_api.py:37
+#| msgid "Form Configuration"
+msgid "Wecom configuration"
+msgstr "企業微信配置"
+
+#: apps/xpack/swagger_api/auth_api.py:38
+#| msgid "Get function details"
+msgid "Wecom configuration details"
+msgstr "企業微信配置詳情"
+
+#: apps/xpack/swagger_api/auth_api.py:40 apps/xpack/swagger_api/auth_api.py:53
+msgid "Corp ID"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:41
+msgid "Agent ID"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:42 apps/xpack/swagger_api/auth_api.py:55
+#: apps/xpack/swagger_api/auth_api.py:67
+#| msgid "App Secret is required"
+msgid "App Secret"
+msgstr "App Secret 是必填項"
+
+#: apps/xpack/swagger_api/auth_api.py:43 apps/xpack/swagger_api/auth_api.py:56
+#: apps/xpack/swagger_api/auth_api.py:68
+#| msgid "Callback URL is required"
+msgid "Callback URL"
+msgstr "Callback URL 是必填項"
+
+#: apps/xpack/swagger_api/auth_api.py:50
+#| msgid "Configuration"
+msgid "Dingtalk configuration"
+msgstr "釘釘配置"
+
+#: apps/xpack/swagger_api/auth_api.py:51
+#| msgid "Get application details"
+msgid "Dingtalk configuration details"
+msgstr "釘釘應用詳情"
+
+#: apps/xpack/swagger_api/auth_api.py:54 apps/xpack/swagger_api/auth_api.py:66
+msgid "App Key"
+msgstr ""
+
+#: apps/xpack/swagger_api/auth_api.py:63
+#| msgid "Form Configuration"
+msgid "Feishu configuration"
+msgstr "飛書配置"
+
+#: apps/xpack/swagger_api/auth_api.py:64
+#| msgid "Get function details"
+msgid "Feishu configuration details"
+msgstr "飛書配置詳情"
+
+#: apps/xpack/swagger_api/license_api.py:22
+msgid "license status"
+msgstr "License 狀態"
+
+#: apps/xpack/swagger_api/license_api.py:24
+msgid ""
+"License status, possible values are: valid, invalid, expired, which "
+"respectively represent: valid, invalid, expired"
+msgstr ""
+"license狀態,可能值爲:valid、invalid、expired,分別代表:有效、無效、已過期"
+
+#: apps/xpack/swagger_api/license_api.py:26
+msgid "license details"
+msgstr "License 詳情"
+
+#: apps/xpack/swagger_api/license_api.py:30
+msgid "customer name"
+msgstr "客戶名稱"
+
+#: apps/xpack/swagger_api/license_api.py:31
+msgid "customer name. For example: *** company."
+msgstr "客戶名稱。例如:***公司。"
+
+#: apps/xpack/swagger_api/license_api.py:33
+msgid "independent software vendor"
+msgstr "獨立軟件供應商。"
+
+#: apps/xpack/swagger_api/license_api.py:35
+msgid ""
+"Independent Software Vendor. For example: *** Company, suitable for the "
+"embedded version of the product."
+msgstr "獨立軟件供應商。例如:***公司,適用於產品的嵌入式版本。"
+
+#: apps/xpack/swagger_api/license_api.py:37
+msgid "Authorization deadline."
+msgstr "授權截止時間"
+
+#: apps/xpack/swagger_api/license_api.py:39
+msgid ""
+"Authorization deadline. For example: 2020-12-31, this license will expire on "
+"2021-01-01."
+msgstr "授權截止時間。例如:2020-12-31,此license將在2021-01-01到期。"
+
+#: apps/xpack/swagger_api/license_api.py:41
+msgid "product name."
+msgstr "產品名稱"
+
+#: apps/xpack/swagger_api/license_api.py:43
+msgid "Product name. For example: JumpServer, CMP, etc."
+msgstr "產品名稱。例如:CMP、KO、JS、MS。"
+
+#: apps/xpack/swagger_api/license_api.py:45
+msgid "product version."
+msgstr "產品版本"
+
+#: apps/xpack/swagger_api/license_api.py:47
+msgid "Product version. For example: JumpServer 2.0, CMP 1.0, etc."
+msgstr "產品版本。例如:Standard、Enterprise,代表標準版、企業版。"
+
+#: apps/xpack/swagger_api/license_api.py:49
+msgid "license version."
+msgstr "License 版本"
+
+#: apps/xpack/swagger_api/license_api.py:51
+msgid "License version. For example: 1.0, 2.0, etc."
+msgstr "License版本。例如:1.0、2.0、3.0等。"
+
+#: apps/xpack/swagger_api/license_api.py:53
+msgid "authorization quantity."
+msgstr "認證數量"
+
+#: apps/xpack/swagger_api/license_api.py:55
+msgid ""
+"Authorization quantity. For example: 100, this license can be used by 100 "
+"users."
+msgstr "授權數量。例如:cmp授權的cpu數量,或JS授權的資產數量。"
+
+#: apps/xpack/swagger_api/license_api.py:57
+msgid "Serial number, the unique identifier of the License."
+msgstr "序列號,License唯一標識。"
+
+#: apps/xpack/swagger_api/license_api.py:59
+msgid ""
+"Serial number, the unique identifier of the license. The customer support "
+"portal will save the serial number after generating the license. If the "
+"serial number is not recorded in the customer support portal, the license "
+"will be regarded as an unknown source."
+msgstr ""
+"序列號,License唯一標識。客戶支持門戶生成License後會保存序列號,如果序列號在"
+"客戶支持門戶中沒有記錄,則此License將被視爲未知來源。"
+
+#: apps/xpack/swagger_api/license_api.py:61
+msgid "remarks"
+msgstr "備註"
+
+#: apps/xpack/swagger_api/license_api.py:63
+msgid ""
+"Remarks, record additional information, length limit is 50. For example, a "
+"customer purchases two identical JumpServer subscriptions and uses them in "
+"different computer rooms respectively. You can use this field to note the A "
+"computer room and B computer room to help distinguish the licenses."
+msgstr ""
+"備註,記錄額外的信息,長度限制50。例如某個客戶買了兩個同樣的JumpServer訂閱分"
+"別在不同機房使用,可以用這個字段備註A機房B機房,幫助區別License。"
+
+#: apps/xpack/swagger_api/operate_log.py:12
+#: apps/xpack/swagger_api/operate_log.py:13
+#: apps/xpack/swagger_api/operate_log.py:38
+#: apps/xpack/swagger_api/operate_log.py:39 apps/xpack/views/operate_log.py:24
+#: apps/xpack/views/operate_log.py:36
+msgid "Operate log"
+msgstr "操作日誌"
+
+#: apps/xpack/swagger_api/operate_log.py:23
+#: apps/xpack/swagger_api/operate_log.py:24
+msgid "menu_label"
+msgstr "操作選單"
+
+#: apps/xpack/swagger_api/operate_log.py:26
+#: apps/xpack/swagger_api/operate_log.py:27
+msgid "operate_label"
+msgstr "操作"
+
+#: apps/xpack/swagger_api/operate_log.py:42
+#: apps/xpack/swagger_api/operate_log.py:43
+#: community/apps/dataset/serializers/dataset_serializers.py:104
+msgid "id"
+msgstr ""
+
+#: apps/xpack/swagger_api/operate_log.py:60
+#: apps/xpack/swagger_api/operate_log.py:61
+#| msgid "license details"
+msgid "details"
+msgstr "詳情"
+
+#: apps/xpack/views/application_setting_views.py:22
+#: apps/xpack/views/application_setting_views.py:23
+#| msgid "Pro/Modify Application Settings"
+msgid "Modify Application Settings"
+msgstr "修改應用显示設置"
+
+#: apps/xpack/views/application_setting_views.py:24
+#: apps/xpack/views/application_setting_views.py:40
+msgid "Pro/Application/Public Access"
+msgstr "專業版/應用/公共訪問"
+
+#: apps/xpack/views/application_setting_views.py:37
+#: apps/xpack/views/application_setting_views.py:38
+#| msgid "Pro/Get Application Settings"
+msgid "Get Application Settings"
+msgstr "獲取應用詳情"
+
+#: apps/xpack/views/auth.py:29
+msgid "Authentication"
+msgstr "認證"
+
+#: apps/xpack/views/auth.py:40 apps/xpack/views/auth.py:41
+msgid "Add or modify authentication configuration"
+msgstr "添加或修改認證信息"
+
+#: apps/xpack/views/auth.py:44 apps/xpack/views/auth.py:58
+#: apps/xpack/views/auth.py:72
+msgid "System settings/login authentication"
+msgstr "系統設置/登錄認證"
+
+#: apps/xpack/views/auth.py:55 apps/xpack/views/auth.py:56
+msgid "Get authentication configuration"
+msgstr "獲取認證配置"
+
+#: apps/xpack/views/auth.py:69 apps/xpack/views/auth.py:70
+msgid "test connection"
+msgstr "測試連接"
+
+#: apps/xpack/views/auth.py:96 apps/xpack/views/auth.py:97
+#: community/apps/users/views/user.py:173
+#: community/apps/users/views/user.py:174
+msgid "Log in"
+msgstr "登錄"
+
+#: apps/xpack/views/auth.py:101 apps/xpack/views/auth.py:114
+#: apps/xpack/views/auth.py:130 apps/xpack/views/auth.py:146
+#: apps/xpack/views/auth.py:207 apps/xpack/views/auth.py:224
+#: apps/xpack/views/auth.py:242 apps/xpack/views/auth.py:260
+#: apps/xpack/views/auth.py:278 apps/xpack/views/auth.py:296
+msgid "Three-party login"
+msgstr "三方登錄"
+
+#: apps/xpack/views/auth.py:111 apps/xpack/views/auth.py:112
+msgid "CAS login"
+msgstr "CAS 登錄"
+
+#: apps/xpack/views/auth.py:127 apps/xpack/views/auth.py:128
+msgid "OIDC login"
+msgstr "OIDC 登錄"
+
+#: apps/xpack/views/auth.py:143 apps/xpack/views/auth.py:144
+msgid "OAuth2 login"
+msgstr "OAuth2 登錄"
+
+#: apps/xpack/views/auth.py:160 apps/xpack/views/auth.py:161
+#: apps/xpack/views/auth.py:162 apps/xpack/views/auth.py:170
+#: apps/xpack/views/auth.py:194 apps/xpack/views/auth.py:195
+#: apps/xpack/views/auth.py:196
+msgid "Get platform information"
+msgstr "獲取平臺信息"
+
+#: apps/xpack/views/auth.py:167 apps/xpack/views/auth.py:168
+msgid "Modify platform information"
+msgstr "修改平臺信息"
+
+#: apps/xpack/views/auth.py:175 apps/xpack/views/auth.py:176
+#: apps/xpack/views/auth.py:178
+msgid "Test platform connection"
+msgstr "測試平臺連接"
+
+#: apps/xpack/views/auth.py:185 apps/xpack/views/auth.py:186
+msgid "Scan code login type"
+msgstr "掃碼登錄類型"
+
+#: apps/xpack/views/auth.py:187
+msgid "Scan code to log in"
+msgstr "掃碼登錄"
+
+#: apps/xpack/views/auth.py:204 apps/xpack/views/auth.py:205
+msgid "DingTalk callback"
+msgstr "釘釘回調"
+
+#: apps/xpack/views/auth.py:221 apps/xpack/views/auth.py:222
+#| msgid "DingTalk callback"
+msgid "DingTalk OAuth2 callback"
+msgstr "釘釘回調"
+
+#: apps/xpack/views/auth.py:239 apps/xpack/views/auth.py:240
+msgid "Lark callback"
+msgstr "飛書回調"
+
+#: apps/xpack/views/auth.py:257 apps/xpack/views/auth.py:258
+#| msgid "Lark callback"
+msgid "Lark OAuth2 callback"
+msgstr "飛書回調"
+
+#: apps/xpack/views/auth.py:275 apps/xpack/views/auth.py:276
+msgid "Wecom callback"
+msgstr "企業微信回調"
+
+#: apps/xpack/views/auth.py:293 apps/xpack/views/auth.py:294
+#| msgid "Wecom callback"
+msgid "Wecom OAuth2 callback"
+msgstr "企業微信回調"
+
+#: apps/xpack/views/dataset_lark_views.py:22
+#: apps/xpack/views/dataset_lark_views.py:23
+#| msgid "Create a knowledge base"
+msgid "Create a lark knowledge base"
+msgstr "創建知識庫"
+
+#: apps/xpack/views/dataset_lark_views.py:26
+#: apps/xpack/views/dataset_lark_views.py:40
+#: community/apps/dataset/views/dataset.py:39
+#: community/apps/dataset/views/dataset.py:62
+#: community/apps/dataset/views/dataset.py:82
+#: community/apps/dataset/views/dataset.py:98
+#: community/apps/dataset/views/dataset.py:109
+#: community/apps/dataset/views/dataset.py:123
+#: community/apps/dataset/views/dataset.py:137
+#: community/apps/dataset/views/dataset.py:157
+#: community/apps/dataset/views/dataset.py:172
+#: community/apps/dataset/views/dataset.py:187
+#: community/apps/dataset/views/dataset.py:202
+#: community/apps/dataset/views/dataset.py:217
+#: community/apps/dataset/views/dataset.py:231
+#: community/apps/dataset/views/dataset.py:250
+msgid "Knowledge Base"
+msgstr "知識庫"
+
+#: apps/xpack/views/dataset_lark_views.py:36
+#: apps/xpack/views/dataset_lark_views.py:37
+#| msgid "Create a knowledge base"
+msgid "Update the lark knowledge base"
+msgstr "更新知識庫"
+
+#: apps/xpack/views/dataset_lark_views.py:53
+#: apps/xpack/views/dataset_lark_views.py:54
+#| msgid "Get a list of applications available in the knowledge base"
+msgid "Get the list of documents in the lark knowledge base"
+msgstr "獲取知識庫中文檔列表"
+
+#: apps/xpack/views/dataset_lark_views.py:57
+#: apps/xpack/views/dataset_lark_views.py:74
+#: apps/xpack/views/dataset_lark_views.py:90
+#: apps/xpack/views/dataset_lark_views.py:110
+#: community/apps/dataset/views/document.py:34
+#: community/apps/dataset/views/document.py:47
+#: community/apps/dataset/views/document.py:62
+#: community/apps/dataset/views/document.py:81
+#: community/apps/dataset/views/document.py:102
+#: community/apps/dataset/views/document.py:123
+#: community/apps/dataset/views/document.py:137
+#: community/apps/dataset/views/document.py:158
+#: community/apps/dataset/views/document.py:178
+#: community/apps/dataset/views/document.py:193
+#: community/apps/dataset/views/document.py:208
+#: community/apps/dataset/views/document.py:224
+#: community/apps/dataset/views/document.py:244
+#: community/apps/dataset/views/document.py:265
+#: community/apps/dataset/views/document.py:284
+#: community/apps/dataset/views/document.py:306
+#: community/apps/dataset/views/document.py:324
+#: community/apps/dataset/views/document.py:349
+#: community/apps/dataset/views/document.py:364
+#: community/apps/dataset/views/document.py:380
+#: community/apps/dataset/views/document.py:396
+#: community/apps/dataset/views/document.py:413
+#: community/apps/dataset/views/document.py:429
+#: community/apps/dataset/views/document.py:442
+#: community/apps/dataset/views/document.py:467
+msgid "Knowledge Base/Documentation"
+msgstr "知識庫/文檔"
+
+#: apps/xpack/views/dataset_lark_views.py:70
+#: apps/xpack/views/dataset_lark_views.py:71
+#| msgid "Create a knowledge base"
+msgid "Import documents to the lark knowledge base"
+msgstr "導入文檔到知識庫"
+
+#: apps/xpack/views/dataset_lark_views.py:86
+#: apps/xpack/views/dataset_lark_views.py:87
+#| msgid "Create document"
+msgid "Synchronize lark document"
+msgstr "同步飞书文檔"
+
+#: apps/xpack/views/dataset_lark_views.py:104
+#: apps/xpack/views/dataset_lark_views.py:105
+#| msgid "Batch sync documents"
+msgid "Batch sync lark documents"
+msgstr "批量同步飞书文檔"
+
+#: apps/xpack/views/display.py:17 apps/xpack/views/display.py:18
+msgid "View appearance settings"
+msgstr "查看外觀設置"
+
+#: apps/xpack/views/display.py:19 apps/xpack/views/display.py:33
+msgid "System Settings/Appearance Settings"
+msgstr "系統設置/外觀設置"
+
+#: apps/xpack/views/display.py:30 apps/xpack/views/display.py:31
+msgid "Update appearance settings"
+msgstr "更新外觀設置"
+
+#: apps/xpack/views/license.py:29 apps/xpack/views/license.py:30
+msgid "Get license information"
+msgstr "獲取 License 信息"
+
+#: apps/xpack/views/license.py:38 apps/xpack/views/license.py:39
+msgid "Update license information"
+msgstr "更新 License 信息"
+
+#: apps/xpack/views/license.py:44
+msgid "upload file"
+msgstr "上傳文件"
+
+#: apps/xpack/views/operate_log.py:21 apps/xpack/views/operate_log.py:22
+#| msgid "Get model parameter form"
+msgid "Get menu operate log"
+msgstr "獲取菜單操作日誌"
+
+#: apps/xpack/views/operate_log.py:33 apps/xpack/views/operate_log.py:34
+#| msgid "Get model parameter form"
+msgid "Get operate log"
+msgstr "獲取操作日誌"
+
+#: apps/xpack/views/platform.py:56 apps/xpack/views/platform.py:57
+msgid "Get platform configuration"
+msgstr "獲取平臺配置"
+
+#: apps/xpack/views/platform.py:59 apps/xpack/views/platform.py:67
+msgid "Application/application access"
+msgstr "應用/應用訪問"
+
+#: apps/xpack/views/platform.py:63 apps/xpack/views/platform.py:64
+msgid "Update platform configuration"
+msgstr "更新平臺配置"
+
+#: apps/xpack/views/platform.py:80 apps/xpack/views/platform.py:81
+msgid "Get platform status"
+msgstr "獲取平臺狀態"
+
+#: apps/xpack/views/platform.py:86
+msgid "Application/Get platform status"
+msgstr "應用/獲取平臺狀態"
+
+#: apps/xpack/views/platform.py:96 apps/xpack/views/platform.py:97
+msgid "Update platform status"
+msgstr "更新平臺狀態"
+
+#: apps/xpack/views/platform.py:103
+msgid "Application/Update platform status"
+msgstr "應用/更新平臺狀態"
+
+#: apps/xpack/views/system_api_key_views.py:28
+#: apps/xpack/views/system_api_key_views.py:29
+msgid "Get personal system API_KEY list"
+msgstr "獲取個人系統 API_KEY 列表"
+
+#: apps/xpack/views/system_api_key_views.py:30
+#: apps/xpack/views/system_api_key_views.py:39
+#: apps/xpack/views/system_api_key_views.py:53
+#: apps/xpack/views/system_api_key_views.py:62
+msgid "Personal system/API_KEY"
+msgstr "個人系統/API_KEY"
+
+#: apps/xpack/views/system_api_key_views.py:37
+#: apps/xpack/views/system_api_key_views.py:38
+msgid "Update personal system API_KEY"
+msgstr "更新個人系統 API_KEY"
+
+#: apps/xpack/views/system_api_key_views.py:51
+#: apps/xpack/views/system_api_key_views.py:52
+msgid "Delete personal system API_KEY"
+msgstr "刪除個人系統 API_KEY"
+
+#: apps/xpack/views/system_api_key_views.py:60
+#: apps/xpack/views/system_api_key_views.py:61
+msgid "Add personal system API_KEY"
+msgstr "添加個人系統 API_KEY"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:27
+msgid "Model type error"
+msgstr "模型類型錯誤"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:37
+#: community/apps/common/field/common.py:21
+#: community/apps/common/field/common.py:34
+msgid "Message type error"
+msgstr "消息類型錯誤"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:56
+msgid "Conversation list"
+msgstr "對話列表"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:57
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:30
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:19
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:13
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:13
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:19
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:13
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:13
+#: community/apps/application/serializers/application_serializers.py:72
+#: community/apps/application/serializers/chat_serializers.py:365
+#: community/apps/application/swagger_api/application_api.py:53
+#: community/apps/application/swagger_api/application_api.py:185
+#: community/apps/application/swagger_api/application_api.py:186
+#: community/apps/application/swagger_api/application_api.py:334
+#: community/apps/application/swagger_api/application_api.py:335
+msgid "Model id"
+msgstr "模型 id"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:59
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:30
+msgid "Paragraph List"
+msgstr "段落列表"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:61
+#: community/apps/application/serializers/chat_message_serializers.py:201
+#: community/apps/application/serializers/chat_message_serializers.py:253
+#: community/apps/application/serializers/chat_serializers.py:76
+#: community/apps/application/serializers/chat_serializers.py:240
+#: community/apps/application/serializers/chat_serializers.py:439
+#: community/apps/application/serializers/chat_serializers.py:531
+#: community/apps/application/serializers/chat_serializers.py:587
+#: community/apps/application/serializers/chat_serializers.py:613
+#: community/apps/application/serializers/chat_serializers.py:672
+#: community/apps/application/serializers/chat_serializers.py:712
+#: community/apps/application/swagger_api/chat_api.py:38
+#: community/apps/application/swagger_api/chat_api.py:76
+#: community/apps/application/swagger_api/chat_api.py:171
+#: community/apps/application/swagger_api/chat_api.py:172
+#: community/apps/application/swagger_api/chat_api.py:286
+#: community/apps/application/swagger_api/chat_api.py:355
+#: community/apps/application/swagger_api/chat_api.py:432
+#: community/apps/application/swagger_api/chat_api.py:465
+#: community/apps/application/views/chat_views.py:482
+msgid "Conversation ID"
+msgstr "對話 ID"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:63
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:15
+#: community/apps/application/serializers/chat_message_serializers.py:254
+#: community/apps/application/serializers/chat_serializers.py:240
+msgid "User Questions"
+msgstr "用戶問題"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:66
+msgid "Post-processor"
+msgstr "後置處理器"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:69
+msgid "Completion Question"
+msgstr "補全問題"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:71
+#: community/apps/application/serializers/chat_message_serializers.py:203
+msgid "Streaming Output"
+msgstr "流式輸出"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:72
+#: community/apps/application/serializers/chat_message_serializers.py:208
+#: community/apps/application/serializers/chat_message_serializers.py:271
+#: community/apps/application/serializers/chat_serializers.py:103
+msgid "Client id"
+msgstr "客戶端 id"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:73
+#: community/apps/application/serializers/chat_message_serializers.py:209
+#: community/apps/application/serializers/chat_message_serializers.py:272
+msgid "Client Type"
+msgstr "客戶端類型"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:76
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:46
+#: community/apps/application/swagger_api/application_api.py:262
+msgid "No reference segment settings"
+msgstr "未查詢到引用分段"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:78
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:31
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:48
+#: community/apps/application/serializers/application_serializers.py:70
+#: community/apps/application/serializers/application_serializers.py:511
+#: community/apps/application/serializers/application_serializers.py:582
+#: community/apps/application/serializers/application_serializers.py:627
+#: community/apps/application/serializers/application_serializers.py:697
+#: community/apps/application/serializers/application_serializers.py:718
+#: community/apps/application/serializers/application_serializers.py:792
+#: community/apps/application/serializers/application_serializers.py:1228
+#: community/apps/application/serializers/chat_serializers.py:118
+#: community/apps/application/serializers/chat_serializers.py:285
+#: community/apps/application/serializers/chat_serializers.py:338
+#: community/apps/application/serializers/chat_serializers.py:360
+#: community/apps/function_lib/serializers/function_lib_serializer.py:332
+#: community/apps/function_lib/serializers/function_lib_serializer.py:358
+#: community/apps/function_lib/serializers/function_lib_serializer.py:387
+msgid "User ID"
+msgstr "用戶 ID"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:81
+#| msgid "Model parameter settings"
+msgid "Model settings"
+msgstr "模型參數設置"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:84
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:31
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:29
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:27
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:27
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:19
+#: community/apps/application/serializers/chat_serializers.py:382
+msgid "Model parameter settings"
+msgstr "模型參數設置"
+
+#: community/apps/application/chat_pipeline/step/chat_step/i_chat_step.py:91
+msgid "message type error"
+msgstr "消息類型錯誤"
+
+#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:226
+#: community/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py:271
+msgid ""
+"Sorry, the AI model is not configured. Please go to the application to set "
+"up the AI model first."
+msgstr "抱歉,沒有配置 AI 模型,請先去應用中設置 AI 模型。"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:27
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:25
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:25
+#: community/apps/application/serializers/chat_serializers.py:579
+msgid "question"
+msgstr "問題"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:33
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:28
+msgid "History Questions"
+msgstr "歷史對答"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:35
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:25
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:21
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:18
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:24
+#: community/apps/application/swagger_api/application_api.py:55
+#: community/apps/application/swagger_api/application_api.py:56
+#: community/apps/application/swagger_api/application_api.py:188
+#: community/apps/application/swagger_api/application_api.py:189
+#: community/apps/application/swagger_api/application_api.py:337
+#: community/apps/application/swagger_api/application_api.py:338
+msgid "Number of multi-round conversations"
+msgstr "多輪對話數量"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:38
+msgid "Maximum length of the knowledge base paragraph"
+msgstr "最大攜帶知識庫段落長度"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:40
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:22
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:16
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:22
+#: community/apps/application/serializers/application_serializers.py:108
+#: community/apps/application/serializers/application_serializers.py:138
+#: community/apps/application/swagger_api/application_api.py:286
+#: community/apps/application/swagger_api/application_api.py:287
+msgid "Prompt word"
+msgstr "提示詞"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:42
+#: community/apps/application/swagger_api/application_api.py:300
+#: community/apps/application/swagger_api/application_api.py:301
+msgid "System prompt words (role)"
+msgstr "系統提示詞(角色)"
+
+#: community/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py:44
+msgid "Completion problem"
+msgstr "補齊問題"
+
+#: community/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py:34
+#: community/apps/application/serializers/application_serializers.py:237
+msgid "Question completion prompt"
+msgstr "問題補全提示詞"
+
+#: community/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py:20
+#: community/apps/application/serializers/chat_message_serializers.py:99
+#: community/apps/application/swagger_api/application_api.py:210
+#: community/apps/application/swagger_api/application_api.py:355
+#, python-brace-format
+msgid ""
+"() contains the user's question. Answer the guessed user's question based on "
+"the context ({question}) Requirement: Output a complete question and put it "
+"in the tag"
+msgstr ""
+"()裏面是用戶問題,根據上下文回答揣測用戶問題({question}) 要求: 輸出一個補全問"
+"題,並且放在 標籤中"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:28
+msgid "System completes question text"
+msgstr "系統補全問題文本"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:31
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:39
+msgid "Dataset id list"
+msgstr "知識庫 ID 列表"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:34
+msgid "List of document ids to exclude"
+msgstr "要排除的文檔 ID 列表"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:37
+msgid "List of exclusion vector ids"
+msgstr "排除向量 ID 列表"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:40
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:21
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:24
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:24
+#: community/apps/application/serializers/application_serializers.py:121
+#: community/apps/application/serializers/chat_serializers.py:243
+#: community/apps/application/swagger_api/application_api.py:249
+#: community/apps/application/swagger_api/application_api.py:250
+msgid "Reference segment number"
+msgstr "引用分段數"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:43
+#: community/apps/application/swagger_api/application_api.py:252
+#: community/apps/application/swagger_api/application_api.py:253
+msgid "Similarity"
+msgstr "相似度"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:46
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:30
+#: community/apps/application/serializers/application_serializers.py:129
+#: community/apps/application/serializers/application_serializers.py:590
+#: community/apps/dataset/serializers/dataset_serializers.py:576
+#| msgid "Retrieval pattern embedding|keywords|blend"
+msgid "The type only supports embedding|keywords|blend"
+msgstr "檢索模式 embedding|keywords|blend"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/i_search_dataset_step.py:47
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:31
+#: community/apps/application/serializers/application_serializers.py:130
+#: community/apps/application/serializers/application_serializers.py:591
+#: community/apps/application/swagger_api/application_api.py:259
+msgid "Retrieval Mode"
+msgstr "檢索方式"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:31
+#: community/apps/application/serializers/application_serializers.py:84
+#: community/apps/application/serializers/application_serializers.py:1026
+#: community/apps/application/serializers/application_serializers.py:1036
+#: community/apps/application/serializers/application_serializers.py:1046
+#: community/apps/dataset/serializers/dataset_serializers.py:801
+#: community/apps/dataset/serializers/document_serializers.py:746
+#: community/apps/setting/models_provider/tools.py:23
+msgid "Model does not exist"
+msgstr "模型不存在"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:33
+#, python-brace-format
+msgid "No permission to use this model {model_name}"
+msgstr "無權使用此模型 {model_name}"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:41
+msgid ""
+"The vector model of the associated knowledge base is inconsistent and the "
+"segmentation cannot be recalled."
+msgstr "關聯知識庫的向量模型不一致,無法召回分段。"
+
+#: community/apps/application/chat_pipeline/step/search_dataset_step/impl/base_search_dataset_step.py:43
+msgid "The knowledge base setting is wrong, please reset the knowledge base"
+msgstr "知識庫設置錯誤,請重新設置知識庫!"
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:21
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:15
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:21
+msgid "Role Setting"
+msgstr "角色設置"
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:28
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:24
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:29
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:47
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:26
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:22
+#: community/apps/application/flow/step_node/question_node/i_question_node.py:26
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:15
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:15
+msgid "Whether to return content"
+msgstr "是否返回內容"
+
+#: community/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py:35
+msgid "Context Type"
+msgstr "內容類型"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:16
+msgid "API Input Fields"
+msgstr "api 輸入字段"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:18
+msgid "User Input Fields"
+msgstr "用戶輸入字段"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:19
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:24
+#: community/apps/application/serializers/application_serializers.py:698
+#: community/apps/application/serializers/chat_message_serializers.py:274
+#: community/apps/function_lib/serializers/function_lib_serializer.py:359
+msgid "picture"
+msgstr "圖片"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:20
+#: community/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py:13
+#: community/apps/application/serializers/chat_message_serializers.py:275
+msgid "document"
+msgstr "文檔"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:21
+#: community/apps/application/serializers/chat_message_serializers.py:276
+msgid "Audio"
+msgstr "音頻"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:23
+#: community/apps/application/serializers/chat_message_serializers.py:278
+msgid "Child Nodes"
+msgstr "子節點"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:24
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:21
+msgid "Form Data"
+msgstr "表單數據"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:58
+msgid ""
+"Parameter value error: The uploaded document lacks file_id, and the document "
+"upload fails"
+msgstr "參數值錯誤: 上傳的文檔中缺少 file_id,文檔上傳失敗"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:67
+msgid ""
+"Parameter value error: The uploaded image lacks file_id, and the image "
+"upload fails"
+msgstr "參數值錯誤: 上傳的圖片中缺少 file_id,圖片上傳失敗"
+
+#: community/apps/application/flow/step_node/application_node/i_application_node.py:77
+msgid ""
+"Parameter value error: The uploaded audio lacks file_id, and the audio "
+"upload fails."
+msgstr "參數值錯誤: 上傳的音頻中缺少file_id,音頻上傳失敗"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:19
+#: community/apps/application/serializers/chat_serializers.py:124
+msgid "Comparator"
+msgstr "比較器"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:20
+#: community/apps/application/swagger_api/application_api.py:271
+msgid "value"
+msgstr "值"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:21
+msgid "Fields"
+msgstr "字段"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:25
+msgid "Branch id"
+msgstr "分支 id"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:26
+msgid "Branch Type"
+msgstr "分支類型"
+
+#: community/apps/application/flow/step_node/condition_node/i_condition_node.py:27
+msgid "Condition or|and"
+msgstr "條件 or|and"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:20
+msgid "Response Type"
+msgstr "響應類型"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:21
+#: community/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py:14
+msgid "Reference Field"
+msgstr "引用字段"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:23
+msgid "Direct answer content"
+msgstr "直接回答內容"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:30
+msgid "Reference field cannot be empty"
+msgstr "引用字段不能爲空"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:32
+msgid "Reference field error"
+msgstr "引用字段錯誤"
+
+#: community/apps/application/flow/step_node/direct_reply_node/i_reply_node.py:35
+msgid "Content cannot be empty"
+msgstr "內容不能爲空"
+
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:19
+msgid "Form Configuration"
+msgstr "表單配置"
+
+#: community/apps/application/flow/step_node/form_node/i_form_node.py:20
+msgid "Form output content"
+msgstr "表單輸出內容"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:22
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:24
+msgid "Variable Name"
+msgstr "變量名"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:23
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:34
+msgid "Variable Value"
+msgstr "變量值"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:27
+msgid "Library ID"
+msgstr "函數庫id"
+
+#: community/apps/application/flow/step_node/function_lib_node/i_function_lib_node.py:35
+msgid "The function has been deleted"
+msgstr "函數已被刪除"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:25
+msgid "Is this field required"
+msgstr "字段是否必填"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:28
+msgid "The field only supports string|int|dict|array|float"
+msgstr "字段只支持 string|int|dict|array|float"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:30
+#: community/apps/function_lib/serializers/function_lib_serializer.py:76
+#: community/apps/function_lib/swagger_api/function_lib_api.py:98
+#: community/apps/function_lib/swagger_api/function_lib_api.py:144
+#: community/apps/function_lib/swagger_api/function_lib_api.py:190
+msgid "source"
+msgstr "來源"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:32
+#: community/apps/function_lib/serializers/function_lib_serializer.py:78
+msgid "The field only supports custom|reference"
+msgstr "字段只支持 custom|reference"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:40
+#, python-brace-format
+msgid "{field}, this field is required."
+msgstr "{field}, 此字段爲必填項。"
+
+#: community/apps/application/flow/step_node/function_node/i_function_node.py:46
+#: community/apps/function_lib/views/function_lib_views.py:131
+#: community/apps/function_lib/views/function_lib_views.py:145
+msgid "function"
+msgstr "函數"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:15
+msgid "Prompt word (positive)"
+msgstr "提示詞(正向)"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:17
+msgid "Prompt word (negative)"
+msgstr "提示詞(負向)"
+
+#: community/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py:24
+#: community/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py:20
+msgid "Conversation storage type"
+msgstr "對話存儲類型"
+
+#: community/apps/application/flow/step_node/reranker_node/i_reranker_node.py:26
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:33
+msgid "Maximum number of words in a quoted segment"
+msgstr "最大引用分段字數"
+
+#: community/apps/application/flow/step_node/search_dataset_node/i_search_dataset_node.py:27
+#: community/apps/common/swagger_api/common_api.py:36
+#: community/apps/dataset/serializers/dataset_serializers.py:573
+msgid "similarity"
+msgstr "相似度"
+
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:17
+msgid "The audio file cannot be empty"
+msgstr "音頻文件不能爲空"
+
+#: community/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py:31
+msgid ""
+"Parameter value error: The uploaded audio lacks file_id, and the audio "
+"upload fails"
+msgstr "參數值錯誤:上傳的音頻缺少file_id,音頻上傳失敗"
+
+#: community/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py:17
+msgid "Text content"
+msgstr "文本內容"
+
+#: community/apps/application/flow/workflow_manage.py:107
+#, python-brace-format
+msgid "The branch {branch} of the {node} node needs to be connected"
+msgstr "{node}節點的{branch}分支需要連接"
+
+#: community/apps/application/flow/workflow_manage.py:113
+#, python-brace-format
+msgid "{node} Nodes cannot be considered as end nodes"
+msgstr "{node}節點不能當做結束節點"
+
+#: community/apps/application/flow/workflow_manage.py:123
+msgid "The next node that does not exist"
+msgstr "不存在的下一個節點"
+
+#: community/apps/application/flow/workflow_manage.py:137
+msgid "The starting node is required"
+msgstr "開始節點必填"
+
+#: community/apps/application/flow/workflow_manage.py:139
+msgid "There can only be one starting node"
+msgstr "開始節點只能有一個"
+
+#: community/apps/application/flow/workflow_manage.py:147
+#, python-brace-format
+msgid "The node {node} model does not exist"
+msgstr "節點{node}模型不存在"
+
+#: community/apps/application/flow/workflow_manage.py:157
+#, python-brace-format
+msgid "Node {node} is unavailable"
+msgstr "節點{node}不可用"
+
+#: community/apps/application/flow/workflow_manage.py:163
+#, python-brace-format
+msgid "The library ID of node {node} cannot be empty"
+msgstr "節點{node}函式程式庫id不能為空"
+
+#: community/apps/application/flow/workflow_manage.py:166
+#, python-brace-format
+msgid "The function library for node {node} is not available"
+msgstr "節點{node}函式程式庫不可用"
+
+#: community/apps/application/flow/workflow_manage.py:172
+msgid "Basic information node is required"
+msgstr "基本資訊節點必填"
+
+#: community/apps/application/flow/workflow_manage.py:174
+msgid "There can only be one basic information node"
+msgstr "基本資訊節點只能有一個"
+
+#: community/apps/application/serializers/application_serializers.py:75
+#: community/apps/application/serializers/chat_serializers.py:618
+#: community/apps/application/serializers/chat_serializers.py:677
+#: community/apps/application/serializers/chat_serializers.py:709
+#: community/apps/application/swagger_api/chat_api.py:365
+#: community/apps/application/swagger_api/chat_api.py:393
+#: community/apps/application/swagger_api/chat_api.py:394
+#: community/apps/application/swagger_api/chat_api.py:415
+#: community/apps/application/swagger_api/chat_api.py:494
+#: community/apps/application/swagger_api/chat_api.py:495
+msgid "Knowledge base id"
+msgstr "知識庫 id"
+
+#: community/apps/application/serializers/application_serializers.py:76
+msgid "Knowledge Base List"
+msgstr "知識庫列表"
+
+#: community/apps/application/serializers/application_serializers.py:90
+msgid "The knowledge base id does not exist"
+msgstr "知識庫 id 不存在"
+
+#: community/apps/application/serializers/application_serializers.py:107
+msgid "No reference status"
+msgstr "無引用狀態"
+
+#: community/apps/application/serializers/application_serializers.py:123
+msgid "Acquaintance"
+msgstr "相似度"
+
+#: community/apps/application/serializers/application_serializers.py:126
+#: community/apps/application/swagger_api/application_api.py:256
+#: community/apps/application/swagger_api/application_api.py:257
+msgid "Maximum number of quoted characters"
+msgstr "最多引用字符數"
+
+#: community/apps/application/serializers/application_serializers.py:133
+msgid "Segment settings not referenced"
+msgstr "未引用分段設置"
+
+#: community/apps/application/serializers/application_serializers.py:140
+msgid "Role prompts"
+msgstr "角色提示詞"
+
+#: community/apps/application/serializers/application_serializers.py:142
+#: community/apps/application/swagger_api/application_api.py:303
+#: community/apps/application/swagger_api/application_api.py:305
+msgid "No citation segmentation prompt"
+msgstr "無引用分段提示詞"
+
+#: community/apps/application/serializers/application_serializers.py:144
+msgid "Thinking process switch"
+msgstr "思考過程開關"
+
+#: community/apps/application/serializers/application_serializers.py:148
+msgid "The thinking process begins to mark"
+msgstr "思考過程開始標記"
+
+#: community/apps/application/serializers/application_serializers.py:151
+msgid "End of thinking process marker"
+msgstr "思考過程結束標記"
+
+#: community/apps/application/serializers/application_serializers.py:156
+#: community/apps/application/serializers/application_serializers.py:482
+#: community/apps/application/serializers/application_serializers.py:623
+#: community/apps/application/swagger_api/application_api.py:49
+#: community/apps/application/swagger_api/application_api.py:50
+#: community/apps/application/swagger_api/application_api.py:181
+#: community/apps/application/swagger_api/application_api.py:182
+#: community/apps/application/swagger_api/application_api.py:330
+#: community/apps/application/swagger_api/application_api.py:331
+#: community/apps/application/swagger_api/application_api.py:377
+msgid "Application Name"
+msgstr "應用名稱"
+
+#: community/apps/application/serializers/application_serializers.py:159
+#: community/apps/application/serializers/application_serializers.py:484
+#: community/apps/application/serializers/application_serializers.py:625
+#: community/apps/application/swagger_api/application_api.py:51
+#: community/apps/application/swagger_api/application_api.py:52
+#: community/apps/application/swagger_api/application_api.py:183
+#: community/apps/application/swagger_api/application_api.py:184
+#: community/apps/application/swagger_api/application_api.py:332
+#: community/apps/application/swagger_api/application_api.py:333
+#: community/apps/application/swagger_api/application_api.py:382
+msgid "Application Description"
+msgstr "應用描述"
+
+#: community/apps/application/serializers/application_serializers.py:160
+msgid "Workflow Objects"
+msgstr "工作流對象"
+
+#: community/apps/application/serializers/application_serializers.py:162
+#: community/apps/application/serializers/application_serializers.py:225
+#: community/apps/application/serializers/application_serializers.py:492
+#: community/apps/application/swagger_api/application_api.py:57
+#: community/apps/application/swagger_api/application_api.py:58
+#: community/apps/application/swagger_api/application_api.py:190
+#: community/apps/application/swagger_api/application_api.py:191
+#: community/apps/application/swagger_api/application_api.py:339
+#: community/apps/application/swagger_api/application_api.py:340
+msgid "Opening remarks"
+msgstr "開場白"
+
+#: community/apps/application/serializers/application_serializers.py:214
+#: community/apps/dataset/serializers/dataset_serializers.py:105
+#: community/apps/dataset/serializers/dataset_serializers.py:106
+msgid "application name"
+msgstr "應用名稱"
+
+#: community/apps/application/serializers/application_serializers.py:217
+msgid "application describe"
+msgstr "應用描述"
+
+#: community/apps/application/serializers/application_serializers.py:219
+#: community/apps/application/serializers/application_serializers.py:486
+msgid "Model"
+msgstr "模型"
+
+#: community/apps/application/serializers/application_serializers.py:223
+#: community/apps/application/serializers/application_serializers.py:490
+msgid "Historical chat records"
+msgstr "歷史聊天記錄"
+
+#: community/apps/application/serializers/application_serializers.py:228
+#: community/apps/application/serializers/application_serializers.py:494
+msgid "Related Knowledge Base"
+msgstr "關聯知識庫"
+
+#: community/apps/application/serializers/application_serializers.py:235
+#: community/apps/application/serializers/application_serializers.py:504
+#: community/apps/application/serializers/chat_serializers.py:379
+msgid "Question completion"
+msgstr "問題補全"
+
+#: community/apps/application/serializers/application_serializers.py:239
+#: community/apps/application/swagger_api/application_api.py:203
+#: community/apps/application/swagger_api/application_api.py:349
+msgid "Application Type"
+msgstr "應用類型"
+
+#: community/apps/application/serializers/application_serializers.py:243
+msgid "Application type only supports SIMPLE|WORK_FLOW"
+msgstr "應用類型只支持 SIMPLE|WORK_FLOW"
+
+#: community/apps/application/serializers/application_serializers.py:247
+#: community/apps/application/serializers/application_serializers.py:508
+msgid "Model parameters"
+msgstr "模型參數"
+
+#: community/apps/application/serializers/application_serializers.py:255
+msgid "Host"
+msgstr "主機"
+
+#: community/apps/application/serializers/application_serializers.py:256
+msgid "protocol"
+msgstr "協議"
+
+#: community/apps/application/serializers/application_serializers.py:339
+#: community/apps/application/swagger_api/application_api.py:153
+#: community/apps/application/swagger_api/application_api.py:154
+msgid "Reset Token"
+msgstr "重置 Token"
+
+#: community/apps/application/serializers/application_serializers.py:340
+msgid "Is it enabled"
+msgstr "是否開啓"
+
+#: community/apps/application/serializers/application_serializers.py:343
+#: community/apps/application/swagger_api/application_api.py:158
+#: community/apps/application/swagger_api/application_api.py:159
+msgid "Number of visits"
+msgstr "訪問次數"
+
+#: community/apps/application/serializers/application_serializers.py:345
+#: community/apps/application/swagger_api/application_api.py:160
+#: community/apps/application/swagger_api/application_api.py:161
+msgid "Whether to enable whitelist"
+msgstr "是否開啓白名單"
+
+#: community/apps/application/serializers/application_serializers.py:348
+#: community/apps/application/serializers/application_serializers.py:349
+#: community/apps/application/swagger_api/application_api.py:163
+#: community/apps/application/swagger_api/application_api.py:164
+msgid "Whitelist"
+msgstr "白名單"
+
+#: community/apps/application/serializers/application_serializers.py:352
+#: community/apps/application/swagger_api/application_api.py:166
+#: community/apps/application/swagger_api/application_api.py:167
+msgid "Whether to display knowledge sources"
+msgstr "是否顯示知識來源"
+
+#: community/apps/application/serializers/application_serializers.py:423
+msgid "access_token"
+msgstr "access_token"
+
+#: community/apps/application/serializers/application_serializers.py:425
+msgid "Certification Information"
+msgstr "認證信息"
+
+#: community/apps/application/serializers/application_serializers.py:462
+msgid "Invalid access_token"
+msgstr "無效的access_token"
+
+#: community/apps/application/serializers/application_serializers.py:473
+msgid "Wrong password"
+msgstr "密碼錯誤"
+
+#: community/apps/application/serializers/application_serializers.py:498
+msgid "Dataset settings"
+msgstr "知識庫設置"
+
+#: community/apps/application/serializers/application_serializers.py:501
+msgid "Model setup"
+msgstr "模型設置"
+
+#: community/apps/application/serializers/application_serializers.py:505
+msgid "Icon"
+msgstr "icon 圖標"
+
+#: community/apps/application/serializers/application_serializers.py:515
+#: community/apps/application/serializers/application_serializers.py:722
+#: community/apps/setting/serializers/valid_serializers.py:29
+msgid ""
+"The community version supports up to 5 applications. If you need more "
+"applications, please contact us (https://fit2cloud.com/)."
+msgstr ""
+"社區版最多支持 5 個應用,如需擁有更多應用,請聯繫我們(https://"
+"fit2cloud.com/)"
+
+#: community/apps/application/serializers/application_serializers.py:583
+msgid "Query text"
+msgstr "查詢文本"
+
+#: community/apps/application/serializers/application_serializers.py:585
+msgid "topN"
+msgstr "topN"
+
+#: community/apps/application/serializers/application_serializers.py:587
+msgid "Relevance"
+msgstr "相似度"
+
+#: community/apps/application/serializers/application_serializers.py:596
+#: community/apps/application/serializers/application_serializers.py:705
+#: community/apps/application/serializers/application_serializers.py:797
+msgid "Application id does not exist"
+msgstr "應用 ID 不存在"
+
+#: community/apps/application/serializers/application_serializers.py:628
+msgid "Select User ID"
+msgstr "選擇用戶 ID"
+
+#: community/apps/application/serializers/application_serializers.py:717
+#: community/apps/dataset/serializers/document_serializers.py:164
+#: community/apps/dataset/serializers/document_serializers.py:213
+#: community/apps/dataset/serializers/document_serializers.py:220
+#: community/apps/dataset/serializers/file_serializers.py:59
+#: community/apps/dataset/views/file.py:35
+#: community/apps/dataset/views/file.py:44
+#: community/apps/function_lib/serializers/function_lib_serializer.py:331
+msgid "file"
+msgstr "文件"
+
+#: community/apps/application/serializers/application_serializers.py:732
+#: community/apps/common/handle/impl/qa/zip_parse_qa_handle.py:62
+#: community/apps/common/handle/impl/zip_split_handle.py:56
+#: community/apps/dataset/serializers/document_serializers.py:874
+#: community/apps/dataset/serializers/document_serializers.py:882
+#: community/apps/function_lib/serializers/function_lib_serializer.py:343
+msgid "Unsupported file format"
+msgstr "文件格式不支持"
+
+#: community/apps/application/serializers/application_serializers.py:872
+msgid "work_flow is a required field"
+msgstr "work_flow是必填字段"
+
+#: community/apps/application/serializers/application_serializers.py:934
+#: community/apps/application/serializers/application_serializers.py:1076
+#, python-brace-format
+msgid "Unknown knowledge base id {dataset_id}, unable to associate"
+msgstr "未知的知識庫 id {dataset_id},無法關聯"
+
+#: community/apps/application/serializers/application_serializers.py:954
+msgid "Illegal User"
+msgstr "非法用戶"
+
+#: community/apps/application/serializers/application_serializers.py:1028
+#: community/apps/application/serializers/application_serializers.py:1038
+#: community/apps/application/serializers/application_serializers.py:1048
+#, python-brace-format
+msgid "No permission to use this model:{model_name}"
+msgstr "用戶沒有使用該模型:{model_name}的權限"
+
+#: community/apps/application/serializers/application_serializers.py:1259
+#: community/apps/application/swagger_api/chat_api.py:498
+#: community/apps/application/swagger_api/chat_api.py:499
+msgid "Availability"
+msgstr "是否可用"
+
+#: community/apps/application/serializers/application_serializers.py:1263
+#: community/apps/application/swagger_api/application_api.py:129
+#: community/apps/application/swagger_api/application_api.py:130
+msgid "Is cross-domain allowed"
+msgstr "是否允許跨域"
+
+#: community/apps/application/serializers/application_serializers.py:1268
+msgid "Cross-domain address"
+msgstr "跨域地址"
+
+#: community/apps/application/serializers/application_serializers.py:1269
+#: community/apps/application/swagger_api/application_api.py:131
+msgid "Cross-domain list"
+msgstr "跨域列表"
+
+#: community/apps/application/serializers/application_serializers.py:1274
+msgid "ApiKeyid"
+msgstr ""
+
+#: community/apps/application/serializers/application_serializers.py:1295
+msgid "APIKey does not exist"
+msgstr "APIKey 不存在"
+
+#: community/apps/application/serializers/application_version_serializers.py:30
+#: community/apps/application/swagger_api/application_version_api.py:24
+#: community/apps/application/swagger_api/application_version_api.py:25
+#: community/apps/application/swagger_api/application_version_api.py:47
+#: community/apps/application/swagger_api/application_version_api.py:70
+#: community/apps/application/swagger_api/application_version_api.py:71
+msgid "Version Name"
+msgstr "版本名稱"
+
+#: community/apps/application/serializers/application_version_serializers.py:37
+#: community/apps/application/serializers/chat_serializers.py:115
+#: community/apps/application/serializers/chat_serializers.py:240
+msgid "summary"
+msgstr "摘要"
+
+#: community/apps/application/serializers/application_version_serializers.py:61
+msgid "Workflow version id"
+msgstr "工作流版本 id"
+
+#: community/apps/application/serializers/application_version_serializers.py:71
+#: community/apps/application/serializers/application_version_serializers.py:86
+msgid "Workflow version does not exist"
+msgstr "工作流版本不存在"
+
+#: community/apps/application/serializers/chat_message_serializers.py:195
+#: community/apps/dataset/serializers/paragraph_serializers.py:47
+#: community/apps/dataset/serializers/paragraph_serializers.py:180
+#: community/apps/dataset/serializers/paragraph_serializers.py:692
+#: community/apps/dataset/serializers/paragraph_serializers.py:705
+#: community/apps/dataset/serializers/paragraph_serializers.py:706
+#: community/apps/dataset/serializers/problem_serializers.py:41
+#: community/apps/dataset/serializers/problem_serializers.py:52
+#: community/apps/dataset/serializers/problem_serializers.py:113
+#: community/apps/dataset/swagger_api/problem_api.py:24
+#: community/apps/dataset/swagger_api/problem_api.py:25
+#: community/apps/dataset/swagger_api/problem_api.py:109
+#: community/apps/dataset/swagger_api/problem_api.py:110
+#: community/apps/dataset/swagger_api/problem_api.py:126
+#: community/apps/dataset/swagger_api/problem_api.py:127
+#: community/apps/dataset/swagger_api/problem_api.py:154
+#: community/apps/dataset/swagger_api/problem_api.py:169
+msgid "content"
+msgstr "內容"
+
+#: community/apps/application/serializers/chat_message_serializers.py:196
+#: community/apps/setting/serializers/team_serializers.py:45
+#: community/apps/users/serializers/user_serializers.py:472
+#: community/apps/users/serializers/user_serializers.py:495
+#: community/apps/users/serializers/user_serializers.py:586
+msgid "Role"
+msgstr "角色"
+
+#: community/apps/application/serializers/chat_message_serializers.py:202
+msgid "Regenerate"
+msgstr "重新生成"
+
+#: community/apps/application/serializers/chat_message_serializers.py:256
+msgid "Is the answer in streaming mode"
+msgstr "是否流式回答"
+
+#: community/apps/application/serializers/chat_message_serializers.py:257
+msgid "Do you want to reply again"
+msgstr "是否重新回答"
+
+#: community/apps/application/serializers/chat_message_serializers.py:259
+#: community/apps/application/serializers/chat_serializers.py:442
+#: community/apps/application/serializers/chat_serializers.py:534
+#: community/apps/application/serializers/chat_serializers.py:590
+#: community/apps/application/serializers/chat_serializers.py:616
+#: community/apps/application/serializers/chat_serializers.py:675
+#: community/apps/application/swagger_api/chat_api.py:148
+#: community/apps/application/swagger_api/chat_api.py:149
+#: community/apps/application/swagger_api/chat_api.py:360
+#: community/apps/application/swagger_api/chat_api.py:437
+#: community/apps/application/swagger_api/chat_api.py:470
+msgid "Conversation record id"
+msgstr "對話記錄 ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:262
+msgid "Node id"
+msgstr "節點 ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:265
+#: community/apps/application/swagger_api/chat_api.py:142
+#: community/apps/application/swagger_api/chat_api.py:143
+msgid "Runtime node id"
+msgstr "運行時節點 ID"
+
+#: community/apps/application/serializers/chat_message_serializers.py:268
+msgid "Node parameters"
+msgstr "節點參數"
+
+#: community/apps/application/serializers/chat_message_serializers.py:273
+msgid "Global variables"
+msgstr "全局變量"
+
+#: community/apps/application/serializers/chat_message_serializers.py:286
+#: community/apps/application/serializers/chat_message_serializers.py:421
+#: community/apps/application/serializers/chat_serializers.py:469
+msgid "Conversation does not exist"
+msgstr "對話不存在"
+
+#: community/apps/application/serializers/chat_message_serializers.py:303
+msgid "The number of visits exceeds today's visits"
+msgstr "訪問次數超過今日訪問量"
+
+#: community/apps/application/serializers/chat_message_serializers.py:314
+msgid "The current model is not available"
+msgstr "當前模型不可用"
+
+#: community/apps/application/serializers/chat_message_serializers.py:316
+msgid "The model is downloading, please try again later"
+msgstr "模型正在下載中,請稍後再試"
+
+#: community/apps/application/serializers/chat_message_serializers.py:361
+#: community/apps/application/serializers/chat_serializers.py:599
+#: community/apps/application/serializers/chat_serializers.py:645
+#: community/apps/application/serializers/chat_serializers.py:694
+msgid "Conversation record does not exist"
+msgstr "對話記錄不存在"
+
+#: community/apps/application/serializers/chat_message_serializers.py:454
+#: community/apps/application/serializers/chat_serializers.py:314
+msgid "The application has not been published. Please use it after publishing."
+msgstr "應用未發佈,請發佈後使用"
+
+#: community/apps/application/serializers/chat_serializers.py:55
+msgid "node"
+msgstr "節點"
+
+#: community/apps/application/serializers/chat_serializers.py:56
+msgid "Connection"
+msgstr "連線"
+
+#: community/apps/application/serializers/chat_serializers.py:71
+#: community/apps/application/swagger_api/chat_api.py:48
+#: community/apps/application/swagger_api/chat_api.py:49
+#: community/apps/application/swagger_api/chat_api.py:169
+#: community/apps/application/swagger_api/chat_api.py:170
+#: community/apps/application/swagger_api/chat_api.py:256
+msgid "abstract"
+msgstr "摘要"
+
+#: community/apps/application/serializers/chat_serializers.py:121
+#: community/apps/application/swagger_api/chat_api.py:258
+msgid "Minimum number of likes"
+msgstr "最小點贊數"
+
+#: community/apps/application/serializers/chat_serializers.py:123
+#: community/apps/application/swagger_api/chat_api.py:260
+msgid "Minimum number of clicks"
+msgstr "最小點踩數"
+
+#: community/apps/application/serializers/chat_serializers.py:126
+msgid "Only supports and|or"
+msgstr "只支持 and|or"
+
+#: community/apps/application/serializers/chat_serializers.py:241
+msgid "Problem after optimization"
+msgstr "問題優化"
+
+#: community/apps/application/serializers/chat_serializers.py:242
+msgid "answer"
+msgstr "回答"
+
+#: community/apps/application/serializers/chat_serializers.py:242
+msgid "User feedback"
+msgstr "用戶回饋"
+
+#: community/apps/application/serializers/chat_serializers.py:244
+msgid "Section title + content"
+msgstr "分段標題+內容"
+
+#: community/apps/application/serializers/chat_serializers.py:245
+#: community/apps/application/views/chat_views.py:385
+#: community/apps/application/views/chat_views.py:386
+msgid "Annotation"
+msgstr "標註"
+
+#: community/apps/application/serializers/chat_serializers.py:245
+msgid "Consuming tokens"
+msgstr "消耗tokens"
+
+#: community/apps/application/serializers/chat_serializers.py:245
+msgid "Time consumed (s)"
+msgstr "耗時(s)',' 提問時間"
+
+#: community/apps/application/serializers/chat_serializers.py:246
+msgid "Question Time"
+msgstr "提問時間"
+
+#: community/apps/application/serializers/chat_serializers.py:337
+msgid "Workflow"
+msgstr "工作流"
+
+#: community/apps/application/serializers/chat_serializers.py:369
+msgid "Multi-round conversation"
+msgstr "多輪對話"
+
+#: community/apps/application/serializers/chat_serializers.py:372
+msgid "Related Datasets"
+msgstr "關聯數據集"
+
+#: community/apps/application/serializers/chat_serializers.py:449
+#| msgid "Application authentication token"
+msgid "Application authentication information does not exist"
+msgstr "應用認證信息不存在"
+
+#: community/apps/application/serializers/chat_serializers.py:451
+#| msgid "Whether to display knowledge sources"
+msgid "Displaying knowledge sources is not enabled"
+msgstr "是否顯示知識來源未開啓"
+
+#: community/apps/application/serializers/chat_serializers.py:537
+msgid "Bidding Status"
+msgstr "投標狀態"
+
+#: community/apps/application/serializers/chat_serializers.py:546
+#| msgid "The task is being executed, please do not send it repeatedly."
+msgid ""
+"Voting on the current session minutes, please do not send repeated requests"
+msgstr "當前會話正在投票中,請勿重複發送請求"
+
+#: community/apps/application/serializers/chat_serializers.py:551
+#| msgid "Get a list of conversation records"
+msgid "Non-existent conversation chat_record_id"
+msgstr "不存在的對話 chat_record_id"
+
+#: community/apps/application/serializers/chat_serializers.py:568
+#| msgid "Already associated, please do not associate again"
+msgid "Already voted, please cancel first and then vote again"
+msgstr "已投票,請先取消再重新投票"
+
+#: community/apps/application/serializers/chat_serializers.py:575
+#: community/apps/application/swagger_api/chat_api.py:379
+#: community/apps/application/swagger_api/chat_api.py:380
+#: community/apps/dataset/swagger_api/problem_api.py:128
+#: community/apps/dataset/swagger_api/problem_api.py:129
+msgid "Section title"
+msgstr "段落標題"
+
+#: community/apps/application/serializers/chat_serializers.py:576
+#: community/apps/application/swagger_api/chat_api.py:381
+#: community/apps/application/swagger_api/chat_api.py:382
+#: community/apps/application/swagger_api/chat_api.py:483
+#: community/apps/application/swagger_api/chat_api.py:484
+#: community/apps/common/swagger_api/common_api.py:57
+#: community/apps/common/swagger_api/common_api.py:58
+msgid "Paragraph content"
+msgstr "段落內容"
+
+#: community/apps/application/serializers/chat_serializers.py:620
+#: community/apps/application/serializers/chat_serializers.py:679
+#: community/apps/application/serializers/chat_serializers.py:710
+#: community/apps/application/swagger_api/chat_api.py:370
+#: community/apps/application/swagger_api/chat_api.py:395
+#: community/apps/application/swagger_api/chat_api.py:396
+#: community/apps/application/swagger_api/chat_api.py:496
+#: community/apps/application/swagger_api/chat_api.py:497
+msgid "Document id"
+msgstr "文檔 ID"
+
+#: community/apps/application/serializers/chat_serializers.py:626
+#: community/apps/application/serializers/chat_serializers.py:717
+#: community/apps/dataset/serializers/paragraph_serializers.py:576
+msgid "The document id is incorrect"
+msgstr "文檔 id 不正確"
+
+#: community/apps/application/serializers/chat_serializers.py:681
+#: community/apps/application/swagger_api/chat_api.py:310
+#: community/apps/application/swagger_api/chat_api.py:311
+msgid "Paragraph id"
+msgstr "段落 ID"
+
+#: community/apps/application/serializers/chat_serializers.py:697
+#, python-brace-format
+msgid ""
+"The paragraph id is wrong. The current conversation record does not exist. "
+"[{paragraph_id}] paragraph id"
+msgstr "段落id錯誤。當前對話記錄不存在。[{paragraph_id}] 段落id"
+
+#: community/apps/application/serializers/chat_serializers.py:736
+#| msgid "Conversation record does not exist"
+msgid "Conversation records that do not exist"
+msgstr "對話記錄不存在"
+
+#: community/apps/application/swagger_api/application_api.py:24
+#: community/apps/application/views/chat_views.py:470
+#: community/apps/application/views/chat_views.py:471
+msgid "Upload files"
+msgstr "上傳文件"
+
+#: community/apps/application/swagger_api/application_api.py:35
+#: community/apps/application/swagger_api/application_api.py:36
+msgid "Application authentication token"
+msgstr "應用認證 token"
+
+#: community/apps/application/swagger_api/application_api.py:48
+#: community/apps/application/swagger_api/application_version_api.py:22
+#: community/apps/application/swagger_api/application_version_api.py:23
+msgid "Primary key id"
+msgstr "主鍵 id"
+
+#: community/apps/application/swagger_api/application_api.py:60
+msgid "Example List"
+msgstr "示例列表"
+
+#: community/apps/application/swagger_api/application_api.py:61
+#: community/apps/application/swagger_api/application_api.py:62
+msgid "Affiliation user"
+msgstr "所屬用戶"
+
+#: community/apps/application/swagger_api/application_api.py:64
+msgid "Is publish"
+msgstr "是否發佈"
+
+#: community/apps/application/swagger_api/application_api.py:66
+#: community/apps/application/swagger_api/application_api.py:67
+#: community/apps/application/swagger_api/application_version_api.py:28
+#: community/apps/application/swagger_api/application_version_api.py:29
+#: community/apps/application/swagger_api/chat_api.py:185
+#: community/apps/application/swagger_api/chat_api.py:186
+#: community/apps/application/swagger_api/chat_api.py:335
+#: community/apps/application/swagger_api/chat_api.py:336
+#: community/apps/application/swagger_api/chat_api.py:503
+#: community/apps/application/swagger_api/chat_api.py:504
+msgid "Creation time"
+msgstr "創建時間"
+
+#: community/apps/application/swagger_api/application_api.py:69
+#: community/apps/application/swagger_api/application_api.py:70
+#: community/apps/application/swagger_api/application_version_api.py:30
+#: community/apps/application/swagger_api/application_version_api.py:31
+#: community/apps/application/swagger_api/chat_api.py:332
+#: community/apps/application/swagger_api/chat_api.py:333
+#: community/apps/application/swagger_api/chat_api.py:500
+#: community/apps/application/swagger_api/chat_api.py:501
+msgid "Modification time"
+msgstr "修改時間"
+
+#: community/apps/application/swagger_api/application_api.py:74
+#: community/apps/application/swagger_api/application_api.py:194
+#: community/apps/application/swagger_api/application_api.py:195
+#: community/apps/application/swagger_api/application_api.py:343
+#: community/apps/application/swagger_api/application_api.py:344
+#: community/apps/application/swagger_api/chat_api.py:229
+#: community/apps/application/swagger_api/chat_api.py:230
+msgid "List of associated knowledge base IDs"
+msgstr "關聯知識庫 ID 列表"
+
+#: community/apps/application/swagger_api/application_api.py:76
+msgid "List of associated knowledge base IDs (returned when querying details)"
+msgstr "關聯知識庫ID列表(查詢詳情時返回)"
+
+#: community/apps/application/swagger_api/application_api.py:91
+msgid "Model Type"
+msgstr "模型類型"
+
+#: community/apps/application/swagger_api/application_api.py:117
+msgid "Application api_key id"
+msgstr "應用 api_key id"
+
+#: community/apps/application/swagger_api/application_api.py:126
+#: community/apps/application/swagger_api/application_api.py:127
+#: community/apps/application/swagger_api/application_api.py:156
+#: community/apps/application/swagger_api/application_api.py:157
+msgid "Is activation"
+msgstr "是否可用"
+
+#: community/apps/application/swagger_api/application_api.py:198
+#: community/apps/application/swagger_api/application_api.py:347
+#: community/apps/application/swagger_api/application_api.py:348
+msgid "Problem Optimization"
+msgstr "問題優化"
+
+#: community/apps/application/swagger_api/application_api.py:199
+msgid "Whether to enable problem optimization"
+msgstr "是否開啓問題優化"
+
+#: community/apps/application/swagger_api/application_api.py:204
+#: community/apps/application/swagger_api/application_api.py:350
+msgid "Application Type SIMPLE | WORK_FLOW"
+msgstr "應用類型 SIMPLE | WORK_FLOW"
+
+#: community/apps/application/swagger_api/application_api.py:207
+#: community/apps/application/swagger_api/application_api.py:208
+#: community/apps/application/swagger_api/application_api.py:352
+#: community/apps/application/swagger_api/application_api.py:353
+msgid "Question optimization tips"
+msgstr "問題優化提示詞"
+
+#: community/apps/application/swagger_api/application_api.py:211
+#: community/apps/application/swagger_api/application_api.py:212
+#: community/apps/application/swagger_api/application_api.py:356
+#: community/apps/application/swagger_api/application_api.py:357
+msgid "Text-to-speech model ID"
+msgstr "文本轉語音模型 ID"
+
+#: community/apps/application/swagger_api/application_api.py:213
+#: community/apps/application/swagger_api/application_api.py:214
+#: community/apps/application/swagger_api/application_api.py:358
+#: community/apps/application/swagger_api/application_api.py:359
+msgid "Speech-to-text model id"
+msgstr "語音轉文本模型 ID"
+
+#: community/apps/application/swagger_api/application_api.py:215
+#: community/apps/application/swagger_api/application_api.py:216
+#: community/apps/application/swagger_api/application_api.py:360
+#: community/apps/application/swagger_api/application_api.py:361
+msgid "Is speech-to-text enabled"
+msgstr "是否開啓語音轉文本"
+
+#: community/apps/application/swagger_api/application_api.py:217
+#: community/apps/application/swagger_api/application_api.py:218
+#: community/apps/application/swagger_api/application_api.py:362
+#: community/apps/application/swagger_api/application_api.py:363
+msgid "Is text-to-speech enabled"
+msgstr "是否開啓文本轉語音"
+
+#: community/apps/application/swagger_api/application_api.py:219
+#: community/apps/application/swagger_api/application_api.py:220
+#: community/apps/application/swagger_api/application_api.py:364
+#: community/apps/application/swagger_api/application_api.py:365
+msgid "Text-to-speech type"
+msgstr "文本轉語音類型"
+
+#: community/apps/application/swagger_api/application_api.py:233
+msgid "Node List"
+msgstr "節點列表"
+
+#: community/apps/application/swagger_api/application_api.py:236
+msgid "Connection List"
+msgstr "連線列表"
+
+#: community/apps/application/swagger_api/application_api.py:266
+msgid "state"
+msgstr "狀態"
+
+#: community/apps/application/swagger_api/application_api.py:268
+msgid "ai_questioning|designated_answer"
+msgstr "ai作答|指定答案"
+
+#: community/apps/application/swagger_api/application_api.py:273
+msgid ""
+"ai_questioning: is the title, designated_answer: is the designated answer "
+"content"
+msgstr "ai作答:就是題詞,指定回答:就是指定回答內容"
+
+#: community/apps/application/swagger_api/application_api.py:403
+#: community/apps/function_lib/swagger_api/function_lib_api.py:216
+msgid "Upload image files"
+msgstr "上傳圖片文件"
+
+#: community/apps/application/swagger_api/application_api.py:434
+#: community/apps/application/swagger_api/application_api.py:435
+msgid "Text"
+msgstr "文字"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:41
+#: community/apps/application/swagger_api/application_statistics_api.py:42
+#: community/apps/application/swagger_api/chat_api.py:490
+#: community/apps/application/swagger_api/chat_api.py:491
+msgid "Number of Likes"
+msgstr "點贊數"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:44
+#: community/apps/application/swagger_api/chat_api.py:492
+#: community/apps/application/swagger_api/chat_api.py:493
+msgid "Number of thumbs-downs"
+msgstr "點踩數"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:45
+#: community/apps/application/swagger_api/application_statistics_api.py:46
+msgid "Number of tokens used"
+msgstr "token使用數量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:47
+#: community/apps/application/swagger_api/application_statistics_api.py:48
+msgid "Number of conversations"
+msgstr "對話次數"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:49
+#: community/apps/application/swagger_api/application_statistics_api.py:50
+msgid "Number of customers"
+msgstr "客戶數量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:51
+#: community/apps/application/swagger_api/application_statistics_api.py:52
+msgid "Number of new customers"
+msgstr "客戶新增數量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:54
+#: community/apps/application/swagger_api/application_statistics_api.py:69
+#: community/apps/application/swagger_api/application_statistics_api.py:70
+msgid "time"
+msgstr "日期"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:55
+msgid "Time, this field is only available when querying trends"
+msgstr "日期,只有查詢趨勢的時候纔有該字段"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:66
+#: community/apps/application/swagger_api/application_statistics_api.py:83
+msgid "New quantity"
+msgstr "新增數量"
+
+#: community/apps/application/swagger_api/application_statistics_api.py:81
+#: community/apps/application/swagger_api/application_statistics_api.py:82
+msgid "Today's new quantity"
+msgstr "今日新增數量"
+
+#: community/apps/application/swagger_api/application_version_api.py:26
+#: community/apps/application/swagger_api/application_version_api.py:27
+msgid "Workflow data"
+msgstr "工作流數據"
+
+#: community/apps/application/swagger_api/application_version_api.py:61
+msgid "Application version id"
+msgstr "應用版本 id"
+
+#: community/apps/application/swagger_api/chat_api.py:61
+#: community/apps/application/swagger_api/chat_api.py:62
+#: community/apps/application/swagger_api/chat_api.py:92
+#: community/apps/dataset/serializers/problem_serializers.py:91
+msgid "problem"
+msgstr "問題"
+
+#: community/apps/application/swagger_api/chat_api.py:68
+msgid "Question content"
+msgstr "問題內容"
+
+#: community/apps/application/swagger_api/chat_api.py:72
+msgid "role"
+msgstr "角色"
+
+#: community/apps/application/swagger_api/chat_api.py:77
+#: community/apps/application/swagger_api/chat_api.py:93
+msgid "regenerate"
+msgstr "重新生成"
+
+#: community/apps/application/swagger_api/chat_api.py:79
+msgid "Stream Output"
+msgstr "流式輸出"
+
+#: community/apps/application/swagger_api/chat_api.py:94
+msgid "Is it streaming output"
+msgstr "是否流式輸出"
+
+#: community/apps/application/swagger_api/chat_api.py:96
+#: community/apps/application/swagger_api/chat_api.py:97
+#| msgid "Form Data"
+msgid "Form data"
+msgstr "表單數據"
+
+#: community/apps/application/swagger_api/chat_api.py:101
+#: community/apps/application/swagger_api/chat_api.py:102
+#| msgid "state list"
+msgid "Image list"
+msgstr "圖片列表"
+
+#: community/apps/application/swagger_api/chat_api.py:107
+msgid "Image name"
+msgstr "圖片名稱"
+
+#: community/apps/application/swagger_api/chat_api.py:109
+msgid "Image URL"
+msgstr "圖片網址"
+
+#: community/apps/application/swagger_api/chat_api.py:115
+#: community/apps/application/swagger_api/chat_api.py:116
+#: community/apps/dataset/views/document.py:133
+#: community/apps/dataset/views/document.py:134
+msgid "Document list"
+msgstr "文檔列表"
+
+#: community/apps/application/swagger_api/chat_api.py:122
+msgid "Document name"
+msgstr "文件名稱"
+
+#: community/apps/application/swagger_api/chat_api.py:124
+msgid "Document URL"
+msgstr "文件網址"
+
+#: community/apps/application/swagger_api/chat_api.py:129
+#: community/apps/application/swagger_api/chat_api.py:130
+#| msgid "id list"
+msgid "Audio list"
+msgstr "音频列表"
+
+#: community/apps/application/swagger_api/chat_api.py:135
+msgid "Audio name"
+msgstr "音頻名稱"
+
+#: community/apps/application/swagger_api/chat_api.py:137
+msgid "Audio URL"
+msgstr "音頻網址"
+
+#: community/apps/application/swagger_api/chat_api.py:145
+#: community/apps/application/swagger_api/chat_api.py:146
+msgid "Node data"
+msgstr "節點數據"
+
+#: community/apps/application/swagger_api/chat_api.py:151
+#: community/apps/application/swagger_api/chat_api.py:152
+msgid "Child node"
+msgstr "子節點"
+
+#: community/apps/application/swagger_api/chat_api.py:173
+#: community/apps/application/swagger_api/chat_api.py:174
+msgid "Number of dialogue questions"
+msgstr "對話提問數量"
+
+#: community/apps/application/swagger_api/chat_api.py:176
+#: community/apps/application/swagger_api/chat_api.py:177
+msgid "Number of tags"
+msgstr "標記數量"
+
+#: community/apps/application/swagger_api/chat_api.py:178
+#: community/apps/application/swagger_api/chat_api.py:179
+#: community/apps/common/swagger_api/common_api.py:64
+#: community/apps/common/swagger_api/common_api.py:65
+#: community/apps/dataset/serializers/paragraph_serializers.py:711
+#: community/apps/dataset/serializers/paragraph_serializers.py:712
+msgid "Number of likes"
+msgstr "點贊數量"
+
+#: community/apps/application/swagger_api/chat_api.py:180
+#: community/apps/application/swagger_api/chat_api.py:181
+msgid "Number of clicks"
+msgstr "點踩數量"
+
+#: community/apps/application/swagger_api/chat_api.py:182
+#: community/apps/application/swagger_api/chat_api.py:183
+msgid "Change time"
+msgstr "修改時間"
+
+#: community/apps/application/swagger_api/chat_api.py:224
+msgid "Application ID, pass when modifying, do not pass when creating"
+msgstr "應用id,修改的時候傳,創建的時候不傳"
+
+#: community/apps/application/swagger_api/chat_api.py:225
+#: community/apps/application/swagger_api/chat_api.py:226
+msgid "Model ID"
+msgstr "模型 ID"
+
+#: community/apps/application/swagger_api/chat_api.py:232
+#: community/apps/application/swagger_api/chat_api.py:234
+msgid "Do you want to initiate multiple sessions"
+msgstr "是否開啓多輪會話"
+
+#: community/apps/application/swagger_api/chat_api.py:237
+msgid "Problem optimization"
+msgstr "問題優化"
+
+#: community/apps/application/swagger_api/chat_api.py:238
+msgid "Do you want to enable problem optimization"
+msgstr "是否開啓問題優化"
+
+#: community/apps/application/swagger_api/chat_api.py:254
+msgid "Historical days"
+msgstr "歷史天數"
+
+#: community/apps/application/swagger_api/chat_api.py:262
+msgid "or|and comparator"
+msgstr "or|and 比較器"
+
+#: community/apps/application/swagger_api/chat_api.py:266
+#| msgid "Start time"
+msgid "start time"
+msgstr "開始時間"
+
+#: community/apps/application/swagger_api/chat_api.py:291
+msgid "Is it ascending order"
+msgstr "是否昇冪"
+
+#: community/apps/application/swagger_api/chat_api.py:304
+msgid "Session log id"
+msgstr "會話日誌 id"
+
+#: community/apps/application/swagger_api/chat_api.py:305
+msgid "Conversation log id"
+msgstr "對話日誌 ID"
+
+#: community/apps/application/swagger_api/chat_api.py:306
+#: community/apps/application/swagger_api/chat_api.py:307
+#: community/apps/application/swagger_api/chat_api.py:446
+msgid "Voting Status"
+msgstr "投票狀態"
+
+#: community/apps/application/swagger_api/chat_api.py:308
+#: community/apps/application/swagger_api/chat_api.py:309
+msgid "Dataset id"
+msgstr "數據集 id"
+
+#: community/apps/application/swagger_api/chat_api.py:312
+#: community/apps/application/swagger_api/chat_api.py:313
+msgid "Resource ID"
+msgstr "資源 ID"
+
+#: community/apps/application/swagger_api/chat_api.py:314
+#: community/apps/application/swagger_api/chat_api.py:315
+msgid "Resource Type"
+msgstr "資源類型"
+
+#: community/apps/application/swagger_api/chat_api.py:317
+#: community/apps/application/swagger_api/chat_api.py:318
+msgid "Number of tokens consumed by the question"
+msgstr "問題消耗 token 數量"
+
+#: community/apps/application/swagger_api/chat_api.py:320
+#: community/apps/application/swagger_api/chat_api.py:321
+msgid "The number of tokens consumed by the answer"
+msgstr "答案消耗 token 數量"
+
+#: community/apps/application/swagger_api/chat_api.py:324
+#: community/apps/application/swagger_api/chat_api.py:325
+msgid "Improved annotation list"
+msgstr "改進標註列表"
+
+#: community/apps/application/swagger_api/chat_api.py:328
+msgid "Corresponding session Corresponding subscript"
+msgstr "對應會話對應下標"
+
+#: community/apps/application/swagger_api/chat_api.py:329
+msgid "Corresponding session id corresponding subscript"
+msgstr "對應會話id對應下標"
+
+#: community/apps/application/swagger_api/chat_api.py:397
+#: community/apps/application/swagger_api/chat_api.py:398
+msgid "Conversation id list"
+msgstr "會話 id 列表"
+
+#: community/apps/application/swagger_api/chat_api.py:447
+msgid "-1: Cancel vote | 0: Agree | 1: Oppose"
+msgstr "-1:取消投票|0:贊同|1:反對"
+
+#: community/apps/application/swagger_api/chat_api.py:485
+#: community/apps/application/swagger_api/chat_api.py:486
+#: community/apps/common/swagger_api/common_api.py:59
+#: community/apps/common/swagger_api/common_api.py:60
+#: community/apps/dataset/serializers/paragraph_serializers.py:687
+#: community/apps/dataset/serializers/paragraph_serializers.py:707
+#: community/apps/dataset/serializers/paragraph_serializers.py:708
+msgid "title"
+msgstr "標題"
+
+#: community/apps/application/swagger_api/chat_api.py:486
+#: community/apps/common/swagger_api/common_api.py:60
+msgid "Description of xxx"
+msgstr "xxx 描述"
+
+#: community/apps/application/swagger_api/chat_api.py:487
+#: community/apps/application/swagger_api/chat_api.py:488
+#: community/apps/common/swagger_api/common_api.py:61
+#: community/apps/common/swagger_api/common_api.py:62
+msgid "Number of hits"
+msgstr "命中數量"
+
+#: community/apps/application/views/application_version_views.py:28
+#: community/apps/application/views/application_version_views.py:29
+#: community/apps/application/views/application_views.py:489
+#: community/apps/application/views/application_views.py:490
+msgid "Get the application list"
+msgstr "獲取應用列表"
+
+#: community/apps/application/views/application_version_views.py:32
+#: community/apps/application/views/application_version_views.py:50
+#: community/apps/application/views/application_version_views.py:68
+#: community/apps/application/views/application_version_views.py:83
+msgid "Application/Version"
+msgstr "應用/版本"
+
+#: community/apps/application/views/application_version_views.py:45
+#: community/apps/application/views/application_version_views.py:46
+msgid "Get the list of application versions by page"
+msgstr "獲取應用版本列表分頁"
+
+#: community/apps/application/views/application_version_views.py:64
+#: community/apps/application/views/application_version_views.py:65
+msgid "Get application version details"
+msgstr "獲取應用版本詳情"
+
+#: community/apps/application/views/application_version_views.py:78
+#: community/apps/application/views/application_version_views.py:79
+msgid "Modify application version information"
+msgstr "修改應用版本信息"
+
+#: community/apps/application/views/application_views.py:42
+#: community/apps/application/views/application_views.py:43
+msgid "User Statistics"
+msgstr "用戶統計"
+
+#: community/apps/application/views/application_views.py:44
+#: community/apps/application/views/application_views.py:70
+#: community/apps/application/views/application_views.py:95
+#: community/apps/application/views/application_views.py:121
+msgid "Application/Statistics"
+msgstr "應用/統計"
+
+#: community/apps/application/views/application_views.py:68
+#: community/apps/application/views/application_views.py:69
+msgid "User demographic trends"
+msgstr "用戶統計趨勢"
+
+#: community/apps/application/views/application_views.py:93
+#: community/apps/application/views/application_views.py:94
+msgid "Conversation statistics"
+msgstr "對話相關統計"
+
+#: community/apps/application/views/application_views.py:119
+#: community/apps/application/views/application_views.py:120
+msgid "Dialogue-related statistical trends"
+msgstr "對話相關統計趨勢"
+
+#: community/apps/application/views/application_views.py:150
+#: community/apps/application/views/application_views.py:151
+msgid "Modify application icon"
+msgstr "修改應用圖標"
+
+#: community/apps/application/views/application_views.py:152
+#: community/apps/application/views/application_views.py:175
+#: community/apps/application/views/application_views.py:189
+#: community/apps/application/views/application_views.py:202
+#: community/apps/application/views/application_views.py:216
+#: community/apps/application/views/application_views.py:236
+#: community/apps/application/views/application_views.py:255
+#: community/apps/application/views/application_views.py:274
+#: community/apps/application/views/application_views.py:313
+#: community/apps/application/views/application_views.py:482
+#: community/apps/application/views/application_views.py:493
+#: community/apps/application/views/application_views.py:508
+#: community/apps/application/views/application_views.py:535
+#: community/apps/application/views/application_views.py:555
+#: community/apps/application/views/application_views.py:575
+#: community/apps/application/views/application_views.py:593
+#: community/apps/application/views/application_views.py:614
+#: community/apps/application/views/application_views.py:635
+#: community/apps/application/views/application_views.py:670
+msgid "Application"
+msgstr "應用"
+
+#: community/apps/application/views/application_views.py:173
+msgid "Import Application"
+msgstr "導入應用"
+
+#: community/apps/application/views/application_views.py:187
+msgid "Export Application"
+msgstr "導出應用"
+
+#: community/apps/application/views/application_views.py:200
+#: community/apps/application/views/application_views.py:201
+msgid "Get embedded js"
+msgstr "獲取嵌入 js"
+
+#: community/apps/application/views/application_views.py:214
+#: community/apps/application/views/application_views.py:215
+msgid "Get a list of models"
+msgstr "獲取模型列表"
+
+#: community/apps/application/views/application_views.py:234
+#: community/apps/application/views/application_views.py:235
+#: community/apps/setting/views/model.py:100
+#: community/apps/setting/views/model.py:101
+msgid "Get model parameter form"
+msgstr "獲取模型參數表單"
+
+#: community/apps/application/views/application_views.py:253
+#: community/apps/application/views/application_views.py:254
+msgid "Get a list of function libraries"
+msgstr "獲取函數庫列表"
+
+#: community/apps/application/views/application_views.py:272
+#: community/apps/application/views/application_views.py:273
+msgid "Get library details"
+msgstr "獲取函數庫詳情"
+
+#: community/apps/application/views/application_views.py:292
+#: community/apps/application/views/application_views.py:293
+msgid "Get the list of apps created by the current user"
+msgstr "獲取當前用戶創建的應用列表"
+
+#: community/apps/application/views/application_views.py:294
+#: community/apps/application/views/application_views.py:333
+#: community/apps/application/views/chat_views.py:74
+#: community/apps/application/views/chat_views.py:93
+#: community/apps/application/views/chat_views.py:105
+#: community/apps/application/views/chat_views.py:118
+#: community/apps/application/views/chat_views.py:347
+msgid "Application/Chat"
+msgstr "應用/對話"
+
+#: community/apps/application/views/application_views.py:311
+#: community/apps/application/views/application_views.py:312
+msgid "Get application data"
+msgstr "獲取應用數據"
+
+#: community/apps/application/views/application_views.py:331
+#: community/apps/application/views/application_views.py:332
+msgid "Get application related information"
+msgstr "獲取應用相關信息"
+
+#: community/apps/application/views/application_views.py:346
+#: community/apps/application/views/application_views.py:347
+msgid "Add ApiKey"
+msgstr "添加 ApiKey"
+
+#: community/apps/application/views/application_views.py:348
+#: community/apps/application/views/application_views.py:364
+#: community/apps/application/views/application_views.py:383
+#: community/apps/application/views/application_views.py:402
+msgid "Application/API_KEY"
+msgstr "應用/API_KEY"
+
+#: community/apps/application/views/application_views.py:362
+#: community/apps/application/views/application_views.py:363
+msgid "Get the application API_KEY list"
+msgstr "獲取應用 API_KEY 列表"
+
+#: community/apps/application/views/application_views.py:381
+#: community/apps/application/views/application_views.py:382
+msgid "Modify application API_KEY"
+msgstr "修改應用 API_KEY"
+
+#: community/apps/application/views/application_views.py:400
+#: community/apps/application/views/application_views.py:401
+msgid "Delete Application API_KEY"
+msgstr "刪除應用 API_KEY"
+
+#: community/apps/application/views/application_views.py:421
+#: community/apps/application/views/application_views.py:422
+msgid "Modify Application AccessToken"
+msgstr "修改應用访问限制"
+
+#: community/apps/application/views/application_views.py:423
+#: community/apps/application/views/application_views.py:441
+msgid "Application/Public Access"
+msgstr "應用/公共訪問"
+
+#: community/apps/application/views/application_views.py:438
+#: community/apps/application/views/application_views.py:439
+msgid "Get the application AccessToken information"
+msgstr "獲取應用 AccessToken 信息"
+
+#: community/apps/application/views/application_views.py:462
+#: community/apps/application/views/application_views.py:463
+msgid "Application Certification"
+msgstr "應用認證"
+
+#: community/apps/application/views/application_views.py:465
+msgid "Application/Certification"
+msgstr "應用/認證"
+
+#: community/apps/application/views/application_views.py:479
+#: community/apps/application/views/application_views.py:480
+msgid "Create an application"
+msgstr "創建應用"
+
+#: community/apps/application/views/application_views.py:505
+msgid "Hit Test List"
+msgstr "命中測試列表"
+
+#: community/apps/application/views/application_views.py:530
+#: community/apps/application/views/application_views.py:531
+msgid "Publishing an application"
+msgstr "發佈應用"
+
+#: community/apps/application/views/application_views.py:551
+#: community/apps/application/views/application_views.py:552
+msgid "Deleting application"
+msgstr "刪除應用"
+
+#: community/apps/application/views/application_views.py:570
+#: community/apps/application/views/application_views.py:571
+msgid "Modify the application"
+msgstr "修改應用"
+
+#: community/apps/application/views/application_views.py:589
+#: community/apps/application/views/application_views.py:590
+msgid "Get application details"
+msgstr "獲取應用詳情"
+
+#: community/apps/application/views/application_views.py:609
+#: community/apps/application/views/application_views.py:610
+msgid "Get the knowledge base available to the current application"
+msgstr "獲取當前應用可用的知識庫"
+
+#: community/apps/application/views/application_views.py:630
+#: community/apps/application/views/application_views.py:631
+msgid "Get the application list by page"
+msgstr "獲取應用列表分頁"
+
+#: community/apps/application/views/application_views.py:665
+#: community/apps/application/views/application_views.py:666
+#| msgid "Text-to-speech type"
+msgid "text to speech"
+msgstr "文本轉語音類型"
+
+#: community/apps/application/views/chat_views.py:36
+#: community/apps/application/views/chat_views.py:37
+msgid "OpenAI Interface Dialogue"
+msgstr "openai接口對話"
+
+#: community/apps/application/views/chat_views.py:39
+msgid "OpenAI Dialogue"
+msgstr "openai對話"
+
+#: community/apps/application/views/chat_views.py:52
+#: community/apps/application/views/chat_views.py:53
+msgid "Export conversation"
+msgstr "導出對話"
+
+#: community/apps/application/views/chat_views.py:55
+#: community/apps/application/views/chat_views.py:156
+#: community/apps/application/views/chat_views.py:174
+#: community/apps/application/views/chat_views.py:197
+#: community/apps/application/views/chat_views.py:217
+#: community/apps/application/views/chat_views.py:235
+#: community/apps/application/views/chat_views.py:257
+#: community/apps/application/views/chat_views.py:282
+#: community/apps/application/views/chat_views.py:302
+#: community/apps/application/views/chat_views.py:324
+#: community/apps/application/views/chat_views.py:489
+msgid "Application/Conversation Log"
+msgstr "應用/對話日誌"
+
+#: community/apps/application/views/chat_views.py:71
+#: community/apps/application/views/chat_views.py:72
+msgid "Get the session id according to the application id"
+msgstr "獲取應用id對應的會話id"
+
+#: community/apps/application/views/chat_views.py:90
+#: community/apps/application/views/chat_views.py:91
+msgid "Get the workflow temporary session id"
+msgstr "獲取工作流臨時會話id"
+
+#: community/apps/application/views/chat_views.py:102
+#: community/apps/application/views/chat_views.py:103
+msgid "Get a temporary session id"
+msgstr "獲取臨時會話id"
+
+#: community/apps/application/views/chat_views.py:115
+#: community/apps/application/views/chat_views.py:116
+msgid "dialogue"
+msgstr "對話"
+
+#: community/apps/application/views/chat_views.py:152
+#: community/apps/application/views/chat_views.py:153
+msgid "Get the conversation list"
+msgstr "獲取對話列表"
+
+#: community/apps/application/views/chat_views.py:172
+#: community/apps/application/views/chat_views.py:173
+msgid "Delete a conversation"
+msgstr "刪除對話"
+
+#: community/apps/application/views/chat_views.py:192
+#: community/apps/application/views/chat_views.py:193
+msgid "Get client conversation list by paging"
+msgstr "獲取客戶對話列表分頁"
+
+#: community/apps/application/views/chat_views.py:215
+#: community/apps/application/views/chat_views.py:216
+msgid "Client deletes conversation"
+msgstr "客戶端刪除對話"
+
+#: community/apps/application/views/chat_views.py:232
+#: community/apps/application/views/chat_views.py:233
+msgid "Client modifies dialogue summary"
+msgstr "用戶端修改對話摘要"
+
+#: community/apps/application/views/chat_views.py:253
+#: community/apps/application/views/chat_views.py:254
+msgid "Get the conversation list by page"
+msgstr "獲取對話列表分頁"
+
+#: community/apps/application/views/chat_views.py:278
+#: community/apps/application/views/chat_views.py:279
+msgid "Get conversation record details"
+msgstr "獲取對話記錄詳情"
+
+#: community/apps/application/views/chat_views.py:298
+#: community/apps/application/views/chat_views.py:299
+msgid "Get a list of conversation records"
+msgstr "獲取對話記錄列表"
+
+#: community/apps/application/views/chat_views.py:319
+#: community/apps/application/views/chat_views.py:320
+msgid "Get the conversation history list by page"
+msgstr "獲取對話歷史列表分頁"
+
+#: community/apps/application/views/chat_views.py:342
+#: community/apps/application/views/chat_views.py:343
+msgid "Like, Dislike"
+msgstr "點贊,點踩"
+
+#: community/apps/application/views/chat_views.py:365
+#: community/apps/application/views/chat_views.py:366
+msgid "Get the list of marked paragraphs"
+msgstr "獲取標記段落列表"
+
+#: community/apps/application/views/chat_views.py:369
+#: community/apps/application/views/chat_views.py:390
+#: community/apps/application/views/chat_views.py:442
+msgid "Application/Conversation Log/Annotation"
+msgstr "應用/對話日誌/標註"
+
+#: community/apps/application/views/chat_views.py:412
+#: community/apps/application/views/chat_views.py:413
+msgid "Add to Knowledge Base"
+msgstr "添加到知識庫"
+
+#: community/apps/application/views/chat_views.py:416
+msgid "Application/Conversation Log/Add to Knowledge Base"
+msgstr "應用/對話日誌/添加到知識庫"
+
+#: community/apps/application/views/chat_views.py:438
+#: community/apps/application/views/chat_views.py:439
+msgid "Delete a Annotation"
+msgstr "刪除標註"
+
+#: community/apps/application/views/chat_views.py:487
+#: community/apps/dataset/views/file.py:28
+#: community/apps/dataset/views/file.py:29
+#: community/apps/dataset/views/file.py:34
+msgid "Upload file"
+msgstr "上傳文件"
+
+#: community/apps/common/auth/authenticate.py:62
+#: community/apps/common/auth/authenticate.py:83
+msgid "Not logged in, please log in first"
+msgstr "未登錄,請先登錄"
+
+#: community/apps/common/auth/authenticate.py:68
+#: community/apps/common/auth/authenticate.py:74
+#: community/apps/common/auth/authenticate.py:89
+#: community/apps/common/auth/authenticate.py:95
+msgid "Authentication information is incorrect! illegal user"
+msgstr "非法用戶!認證信息不正確"
+
+#: community/apps/common/auth/authentication.py:94
+msgid "No permission to access"
+msgstr "沒有權限訪問"
+
+#: community/apps/common/auth/handle/impl/application_key.py:23
+#: community/apps/common/auth/handle/impl/application_key.py:25
+msgid "Secret key is invalid"
+msgstr "secret key無效"
+
+#: community/apps/common/auth/handle/impl/public_access_token.py:48
+#: community/apps/common/auth/handle/impl/public_access_token.py:50
+#: community/apps/common/auth/handle/impl/public_access_token.py:52
+#: community/apps/common/auth/handle/impl/public_access_token.py:54
+msgid "Authentication information is incorrect"
+msgstr "認證信息不正確"
+
+#: community/apps/common/auth/handle/impl/user_token.py:34
+msgid "Login expired"
+msgstr "登錄過期"
+
+#: community/apps/common/constants/exception_code_constants.py:31
+msgid "The username or password is incorrect"
+msgstr "用戶名或密碼錯誤"
+
+#: community/apps/common/constants/exception_code_constants.py:32
+msgid "Please log in first and bring the user Token"
+msgstr "請先登錄並攜帶用戶Token"
+
+#: community/apps/common/constants/exception_code_constants.py:33
+#: community/apps/users/serializers/user_serializers.py:429
+msgid "Email sending failed"
+msgstr "郵箱發送失敗"
+
+#: community/apps/common/constants/exception_code_constants.py:34
+msgid "Email format error"
+msgstr "郵箱格式錯誤"
+
+#: community/apps/common/constants/exception_code_constants.py:35
+msgid "The email has been registered, please log in directly"
+msgstr "郵箱已註冊,請直接登錄"
+
+#: community/apps/common/constants/exception_code_constants.py:36
+msgid "The email is not registered, please register first"
+msgstr "郵箱未註冊,請先註冊"
+
+#: community/apps/common/constants/exception_code_constants.py:38
+msgid "The verification code is incorrect or the verification code has expired"
+msgstr "驗證碼錯誤或驗證碼已過期"
+
+#: community/apps/common/constants/exception_code_constants.py:39
+msgid "The username has been registered, please log in directly"
+msgstr "用戶名已註冊,請直接登錄"
+
+#: community/apps/common/constants/exception_code_constants.py:41
+msgid ""
+"The username cannot be empty and must be between 6 and 20 characters long."
+msgstr "用戶名不能爲空,且長度必須在6-20個字符之間。"
+
+#: community/apps/common/constants/exception_code_constants.py:43
+msgid "Password and confirmation password are inconsistent"
+msgstr "密碼和確認密碼不一致"
+
+#: community/apps/common/constants/permission_constants.py:61
+msgid "ADMIN"
+msgstr "管理員"
+
+#: community/apps/common/constants/permission_constants.py:61
+msgid "Admin, prefabs are not currently used"
+msgstr "管理員,預製目前不會使用"
+
+#: community/apps/common/constants/permission_constants.py:62
+msgid "USER"
+msgstr "用戶"
+
+#: community/apps/common/constants/permission_constants.py:62
+msgid "All user permissions"
+msgstr "所有用戶權限"
+
+#: community/apps/common/constants/permission_constants.py:63
+msgid "chat"
+msgstr "對話"
+
+#: community/apps/common/constants/permission_constants.py:63
+msgid "Only has application dialog interface permissions"
+msgstr "只擁有應用對話接口權限"
+
+#: community/apps/common/constants/permission_constants.py:64
+msgid "Apply private key"
+msgstr "應用私鑰"
+
+#: community/apps/common/event/__init__.py:30
+msgid "The download process was interrupted, please try again"
+msgstr "下載過程中斷,請重試"
+
+#: community/apps/common/event/listener_manage.py:91
+#, python-brace-format
+msgid "Query vector data: {paragraph_id_list} error {error} {traceback}"
+msgstr "向量數據查詢: {paragraph_id_list} 錯誤 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:96
+#, python-brace-format
+msgid "Start--->Embedding paragraph: {paragraph_id_list}"
+msgstr "開始--->嵌入段落: {paragraph_id_list}"
+
+#: community/apps/common/event/listener_manage.py:108
+#, python-brace-format
+msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}"
+msgstr "向量化段落: {paragraph_id_list} 錯誤 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:114
+#, python-brace-format
+msgid "End--->Embedding paragraph: {paragraph_id_list}"
+msgstr "結束--->嵌入段落: {paragraph_id_list}"
+
+#: community/apps/common/event/listener_manage.py:123
+#, python-brace-format
+msgid "Start--->Embedding paragraph: {paragraph_id}"
+msgstr "開始--->嵌入段落: {paragraph_id}"
+
+#: community/apps/common/event/listener_manage.py:148
+#, python-brace-format
+msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}"
+msgstr "向量化段落: {paragraph_id} 錯誤 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:153
+#, python-brace-format
+msgid "End--->Embedding paragraph: {paragraph_id}"
+msgstr "結束--->嵌入段落: {paragraph_id}"
+
+#: community/apps/common/event/listener_manage.py:269
+#, python-brace-format
+msgid "Start--->Embedding document: {document_id}"
+msgstr "開始--->嵌入文檔: {document_id}"
+
+#: community/apps/common/event/listener_manage.py:291
+#, python-brace-format
+msgid "Vectorized document: {document_id} error {error} {traceback}"
+msgstr "向量化文檔: {document_id} 錯誤 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:296
+#, python-brace-format
+msgid "End--->Embedding document: {document_id}"
+msgstr "結束--->嵌入文檔: {document_id}"
+
+#: community/apps/common/event/listener_manage.py:307
+#, python-brace-format
+msgid "Start--->Embedding dataset: {dataset_id}"
+msgstr "開始--->嵌入知識庫: {dataset_id}"
+
+#: community/apps/common/event/listener_manage.py:311
+#, python-brace-format
+msgid "Start--->Embedding document: {document_list}"
+msgstr "開始--->嵌入文檔: {document_list}"
+
+#: community/apps/common/event/listener_manage.py:315
+#: community/apps/embedding/task/embedding.py:123
+#, python-brace-format
+msgid "Vectorized dataset: {dataset_id} error {error} {traceback}"
+msgstr "向量化知識庫: {dataset_id} 錯誤 {error} {traceback}"
+
+#: community/apps/common/event/listener_manage.py:318
+#, python-brace-format
+msgid "End--->Embedding dataset: {dataset_id}"
+msgstr "結束--->嵌入知識庫: {dataset_id}"
+
+#: community/apps/common/field/common.py:45
+msgid "not a function"
+msgstr "不是函數"
+
+#: community/apps/common/forms/base_field.py:64
+#, python-brace-format
+msgid "The field {field_label} is required"
+msgstr "字段 {field_label} 是必填的"
+
+#: community/apps/common/forms/slider_field.py:56
+#, python-brace-format
+msgid "The {field_label} cannot be less than {min}"
+msgstr "字段 {field_label} 不能小於 {min}"
+
+#: community/apps/common/forms/slider_field.py:62
+#, python-brace-format
+msgid "The {field_label} cannot be greater than {max}"
+msgstr "{field_label} 不能大於 {max}"
+
+#: community/apps/common/handle/handle_exception.py:30
+msgid "Unknown exception"
+msgstr "未知異常"
+
+#: community/apps/common/handle/impl/pdf_split_handle.py:278
+#, python-brace-format
+msgid "This document has no preface and is treated as ordinary text: {e}"
+msgstr "文檔沒有前言,視爲普通文本: {e}"
+
+#: community/apps/common/init/init_doc.py:26
+#: community/apps/common/init/init_doc.py:45
+msgid "Intelligent customer service platform"
+msgstr "智能客服平臺"
+
+#: community/apps/common/job/clean_chat_job.py:25
+msgid "start clean chat log"
+msgstr "開始清理對話日誌"
+
+#: community/apps/common/job/clean_chat_job.py:71
+msgid "end clean chat log"
+msgstr "結束清理對話日誌"
+
+#: community/apps/common/job/clean_debug_file_job.py:21
+msgid "start clean debug file"
+msgstr "開始清理調試文件"
+
+#: community/apps/common/job/clean_debug_file_job.py:25
+msgid "end clean debug file"
+msgstr "結束清理調試文件"
+
+#: community/apps/common/job/client_access_num_job.py:25
+msgid "start reset access_num"
+msgstr "開始重置訪問次數"
+
+#: community/apps/common/job/client_access_num_job.py:27
+msgid "end reset access_num"
+msgstr "結束重置訪問次數"
+
+#: community/apps/common/log/log.py:37
+msgid "unknown"
+msgstr "未知的"
+
+#: community/apps/common/response/result.py:24
+msgid "Success"
+msgstr "成功"
+
+#: community/apps/common/response/result.py:36
+#: community/apps/common/response/result.py:80
+#: community/apps/common/response/result.py:82
+msgid "current page"
+msgstr "當前頁"
+
+#: community/apps/common/response/result.py:42
+#: community/apps/common/response/result.py:85
+#: community/apps/common/response/result.py:87
+msgid "page size"
+msgstr "每頁數量"
+
+#: community/apps/common/response/result.py:53
+#: community/apps/common/response/result.py:101
+#: community/apps/common/response/result.py:130
+msgid "response parameters"
+msgstr "響應參數"
+
+#: community/apps/common/response/result.py:59
+#: community/apps/common/response/result.py:107
+#: community/apps/common/response/result.py:136
+msgid "response code"
+msgstr "響應碼"
+
+#: community/apps/common/response/result.py:61
+#: community/apps/common/response/result.py:109
+#: community/apps/common/response/result.py:138
+msgid "success:200 fail:other"
+msgstr "成功:200 失敗:其他"
+
+#: community/apps/common/response/result.py:64
+#: community/apps/common/response/result.py:112
+#: community/apps/common/response/result.py:141
+msgid "prompt"
+msgstr "提示"
+
+#: community/apps/common/response/result.py:65
+#: community/apps/common/response/result.py:113
+#: community/apps/common/response/result.py:142
+msgid "success"
+msgstr "成功"
+
+#: community/apps/common/response/result.py:66
+#: community/apps/common/response/result.py:114
+#: community/apps/common/response/result.py:143
+msgid "error prompt"
+msgstr "錯誤提示"
+
+#: community/apps/common/response/result.py:72
+#: community/apps/common/response/result.py:74
+msgid "total number of data"
+msgstr "總條數"
+
+#: community/apps/common/swagger_api/common_api.py:24
+#: community/apps/dataset/serializers/dataset_serializers.py:569
+msgid "query text"
+msgstr "查詢文本"
+
+#: community/apps/common/swagger_api/common_api.py:42
+msgid "Retrieval pattern embedding|keywords|blend"
+msgstr "檢索模式 embedding|keywords|blend"
+
+#: community/apps/common/swagger_api/common_api.py:66
+#: community/apps/common/swagger_api/common_api.py:67
+msgid "Number of clicks and dislikes"
+msgstr "點踩數"
+
+#: community/apps/common/swagger_api/common_api.py:74
+#: community/apps/common/swagger_api/common_api.py:75
+msgid "relevance score"
+msgstr "相關性得分"
+
+#: community/apps/common/swagger_api/common_api.py:76
+#: community/apps/common/swagger_api/common_api.py:77
+msgid "Comprehensive score, used for ranking"
+msgstr "綜合得分,用於排序"
+
+#: community/apps/common/swagger_api/common_api.py:78
+#: community/apps/common/swagger_api/common_api.py:79
+#: community/apps/users/serializers/user_serializers.py:591
+#: community/apps/users/serializers/user_serializers.py:592
+msgid "Update time"
+msgstr "更新時間"
+
+#: community/apps/common/swagger_api/common_api.py:81
+#: community/apps/common/swagger_api/common_api.py:82
+#: community/apps/users/serializers/user_serializers.py:589
+#: community/apps/users/serializers/user_serializers.py:590
+msgid "Create time"
+msgstr "創建時間"
+
+#: community/apps/common/util/common.py:239
+msgid "Text-to-speech node, the text content must be of string type"
+msgstr "文字轉語音節點,文字內容必須是字串類型"
+
+#: community/apps/common/util/common.py:241
+msgid "Text-to-speech node, the text content cannot be empty"
+msgstr "文字轉語音節點,文字內容不能為空"
+
+#: community/apps/dataset/serializers/common_serializers.py:87
+msgid "source url"
+msgstr "文檔地址"
+
+#: community/apps/dataset/serializers/common_serializers.py:89
+#: community/apps/dataset/serializers/dataset_serializers.py:333
+#: community/apps/dataset/serializers/dataset_serializers.py:390
+#: community/apps/dataset/serializers/dataset_serializers.py:391
+#: community/apps/dataset/serializers/document_serializers.py:155
+#: community/apps/dataset/serializers/document_serializers.py:181
+msgid "selector"
+msgstr "選擇器"
+
+#: community/apps/dataset/serializers/common_serializers.py:96
+#: community/apps/dataset/serializers/dataset_serializers.py:341
+#, python-brace-format
+msgid "URL error, cannot parse [{source_url}]"
+msgstr "URL錯誤,無法解析 [{source_url}]"
+
+#: community/apps/dataset/serializers/common_serializers.py:105
+#: community/apps/dataset/serializers/common_serializers.py:124
+#: community/apps/dataset/serializers/common_serializers.py:125
+#: community/apps/dataset/serializers/document_serializers.py:85
+#: community/apps/dataset/swagger_api/document_api.py:23
+#: community/apps/dataset/swagger_api/document_api.py:24
+#: community/apps/dataset/swagger_api/document_api.py:49
+#: community/apps/dataset/swagger_api/document_api.py:50
+msgid "id list"
+msgstr "id 列表"
+
+#: community/apps/dataset/serializers/common_serializers.py:115
+#, python-brace-format
+msgid "The following id does not exist: {error_id_list}"
+msgstr "id不存在: {error_id_list}"
+
+#: community/apps/dataset/serializers/common_serializers.py:183
+#: community/apps/dataset/serializers/common_serializers.py:207
+msgid "The knowledge base is inconsistent with the vector model"
+msgstr "知識庫未向量模型不一致"
+
+#: community/apps/dataset/serializers/common_serializers.py:185
+#: community/apps/dataset/serializers/common_serializers.py:209
+msgid "Knowledge base setting error, please reset the knowledge base"
+msgstr "知識庫設置錯誤,請重新設置知識庫"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:109
+#: community/apps/dataset/serializers/dataset_serializers.py:110
+#: community/apps/setting/serializers/model_apply_serializers.py:51
+msgid "model id"
+msgstr "模型 id"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:112
+#: community/apps/dataset/serializers/dataset_serializers.py:114
+msgid "Whether to start multiple rounds of dialogue"
+msgstr "是否開啓多輪對話"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:115
+#: community/apps/dataset/serializers/dataset_serializers.py:116
+msgid "opening remarks"
+msgstr "開場白"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:118
+msgid "example"
+msgstr "示例"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:119
+msgid "User id"
+msgstr "用戶 id"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:121
+#: community/apps/dataset/serializers/dataset_serializers.py:122
+msgid "Whether to publish"
+msgstr "是否發佈"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:124
+#: community/apps/dataset/serializers/dataset_serializers.py:125
+#: community/apps/dataset/serializers/dataset_serializers.py:304
+#: community/apps/dataset/serializers/dataset_serializers.py:305
+#: community/apps/dataset/serializers/dataset_serializers.py:366
+#: community/apps/dataset/serializers/dataset_serializers.py:367
+#: community/apps/dataset/serializers/dataset_serializers.py:511
+#: community/apps/dataset/serializers/dataset_serializers.py:512
+#: community/apps/dataset/serializers/dataset_serializers.py:942
+#: community/apps/dataset/serializers/dataset_serializers.py:943
+#: community/apps/dataset/serializers/document_serializers.py:824
+#: community/apps/dataset/serializers/document_serializers.py:825
+#: community/apps/dataset/serializers/paragraph_serializers.py:200
+#: community/apps/dataset/serializers/paragraph_serializers.py:201
+#: community/apps/dataset/serializers/paragraph_serializers.py:724
+#: community/apps/dataset/serializers/paragraph_serializers.py:725
+#: community/apps/dataset/swagger_api/problem_api.py:33
+#: community/apps/dataset/swagger_api/problem_api.py:34
+#: community/apps/dataset/swagger_api/problem_api.py:135
+#: community/apps/dataset/swagger_api/problem_api.py:136
+#: community/apps/function_lib/swagger_api/function_lib_api.py:32
+#: community/apps/function_lib/swagger_api/function_lib_api.py:33
+msgid "create time"
+msgstr "創建時間"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:127
+#: community/apps/dataset/serializers/dataset_serializers.py:128
+#: community/apps/dataset/serializers/dataset_serializers.py:301
+#: community/apps/dataset/serializers/dataset_serializers.py:302
+#: community/apps/dataset/serializers/dataset_serializers.py:363
+#: community/apps/dataset/serializers/dataset_serializers.py:364
+#: community/apps/dataset/serializers/dataset_serializers.py:508
+#: community/apps/dataset/serializers/dataset_serializers.py:509
+#: community/apps/dataset/serializers/dataset_serializers.py:939
+#: community/apps/dataset/serializers/dataset_serializers.py:940
+#: community/apps/dataset/serializers/document_serializers.py:821
+#: community/apps/dataset/serializers/document_serializers.py:822
+#: community/apps/dataset/serializers/paragraph_serializers.py:197
+#: community/apps/dataset/serializers/paragraph_serializers.py:198
+#: community/apps/dataset/serializers/paragraph_serializers.py:721
+#: community/apps/dataset/serializers/paragraph_serializers.py:722
+#: community/apps/dataset/swagger_api/problem_api.py:30
+#: community/apps/dataset/swagger_api/problem_api.py:31
+#: community/apps/dataset/swagger_api/problem_api.py:132
+#: community/apps/dataset/swagger_api/problem_api.py:133
+#: community/apps/function_lib/swagger_api/function_lib_api.py:34
+#: community/apps/function_lib/swagger_api/function_lib_api.py:35
+msgid "update time"
+msgstr "更新時間"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:257
+#: community/apps/dataset/serializers/dataset_serializers.py:260
+#: community/apps/dataset/serializers/document_serializers.py:211
+#: community/apps/dataset/serializers/document_serializers.py:218
+#: community/apps/dataset/serializers/document_serializers.py:987
+#: community/apps/dataset/serializers/document_serializers.py:1016
+msgid "file list"
+msgstr "文件列表"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:269
+msgid "upload files "
+msgstr "上傳文件"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:297
+#: community/apps/dataset/serializers/dataset_serializers.py:298
+#: community/apps/dataset/serializers/dataset_serializers.py:359
+#: community/apps/dataset/serializers/dataset_serializers.py:360
+#: community/apps/dataset/serializers/dataset_serializers.py:504
+#: community/apps/dataset/serializers/dataset_serializers.py:505
+#: community/apps/dataset/serializers/dataset_serializers.py:935
+#: community/apps/dataset/serializers/dataset_serializers.py:936
+#: community/apps/dataset/serializers/document_serializers.py:814
+#: community/apps/dataset/serializers/document_serializers.py:815
+msgid "char length"
+msgstr "字符長度"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:299
+#: community/apps/dataset/serializers/dataset_serializers.py:300
+#: community/apps/dataset/serializers/dataset_serializers.py:361
+#: community/apps/dataset/serializers/dataset_serializers.py:362
+#: community/apps/dataset/serializers/dataset_serializers.py:506
+#: community/apps/dataset/serializers/dataset_serializers.py:507
+#: community/apps/dataset/serializers/dataset_serializers.py:937
+#: community/apps/dataset/serializers/dataset_serializers.py:938
+msgid "document count"
+msgstr "文檔數量"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:308
+#: community/apps/dataset/serializers/dataset_serializers.py:309
+#: community/apps/dataset/serializers/dataset_serializers.py:370
+#: community/apps/dataset/serializers/dataset_serializers.py:371
+#: community/apps/dataset/serializers/dataset_serializers.py:515
+#: community/apps/dataset/serializers/dataset_serializers.py:516
+#: community/apps/dataset/serializers/document_serializers.py:290
+#: community/apps/dataset/serializers/document_serializers.py:485
+msgid "document list"
+msgstr "文檔列表"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:327
+#: community/apps/dataset/serializers/dataset_serializers.py:388
+#: community/apps/dataset/serializers/dataset_serializers.py:389
+msgid "web source url"
+msgstr "web站點url"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:414
+#: community/apps/setting/serializers/valid_serializers.py:26
+msgid ""
+"The community version supports up to 50 knowledge bases. If you need more "
+"knowledge bases, please contact us (https://fit2cloud.com/)."
+msgstr ""
+"社區版最多支持 50 個知識庫,如需擁有更多知識庫,請聯繫我們(https://"
+"fit2cloud.com/)。"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:533
+#: community/apps/dataset/serializers/dataset_serializers.py:534
+msgid "documents"
+msgstr "文檔"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:577
+msgid "search mode"
+msgstr "搜索模式"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:582
+#: community/apps/dataset/serializers/dataset_serializers.py:618
+#: community/apps/dataset/serializers/dataset_serializers.py:706
+msgid "id does not exist"
+msgstr "ID 不存在"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:609
+msgid "sync type"
+msgstr "同步類型"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:611
+msgid "The synchronization type only supports:replace|complete"
+msgstr "同步類型只支持:replace|complete"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:620
+#: community/apps/dataset/serializers/document_serializers.py:499
+msgid "Synchronization is only supported for web site types"
+msgstr "只有web站點類型才支持同步"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:694
+msgid ""
+"Synchronization type->replace: replacement synchronization, complete: "
+"complete synchronization"
+msgstr "同步類型->replace:替換同步,complete:完整同步"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:803
+#: community/apps/dataset/serializers/document_serializers.py:748
+#: community/apps/setting/models_provider/tools.py:25
+msgid "No permission to use this model"
+msgstr "無權限使用該模型"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:815
+msgid "Failed to send the vectorization task, please try again later!"
+msgstr "向量化任務發送失敗,請稍後再試!"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:911
+#: community/apps/dataset/serializers/document_serializers.py:846
+msgid "meta"
+msgstr "知識庫元數據"
+
+#: community/apps/dataset/serializers/dataset_serializers.py:913
+msgid "Knowledge base metadata->web:{source_url:xxx,selector:'xxx'},base:{}"
+msgstr "知識庫元數據->web:{source_url:xxx,selector:'xxx'},base:{}"
+
+#: community/apps/dataset/serializers/document_serializers.py:87
+#: community/apps/dataset/serializers/document_serializers.py:100
+#: community/apps/dataset/serializers/document_serializers.py:416
+#: community/apps/dataset/swagger_api/document_api.py:37
+#: community/apps/dataset/swagger_api/document_api.py:51
+msgid "task type"
+msgstr "任務類型"
+
+#: community/apps/dataset/serializers/document_serializers.py:95
+#: community/apps/dataset/serializers/document_serializers.py:108
+msgid "task type not support"
+msgstr "任務類型不支持"
+
+#: community/apps/dataset/serializers/document_serializers.py:115
+#: community/apps/dataset/serializers/document_serializers.py:188
+#: community/apps/dataset/serializers/document_serializers.py:200
+#: community/apps/dataset/serializers/document_serializers.py:201
+#: community/apps/dataset/serializers/document_serializers.py:412
+#: community/apps/dataset/serializers/document_serializers.py:476
+#: community/apps/dataset/serializers/document_serializers.py:836
+#: community/apps/dataset/serializers/document_serializers.py:837
+msgid "document name"
+msgstr "文檔名稱"
+
+#: community/apps/dataset/serializers/document_serializers.py:118
+msgid "The type only supports optimization|directly_return"
+msgstr "類型只支持 optimization|directly_return"
+
+#: community/apps/dataset/serializers/document_serializers.py:120
+#: community/apps/dataset/serializers/document_serializers.py:414
+#: community/apps/dataset/serializers/document_serializers.py:480
+#: community/apps/dataset/serializers/document_serializers.py:840
+#: community/apps/dataset/swagger_api/document_api.py:25
+msgid "hit handling method"
+msgstr "命中處理方法"
+
+#: community/apps/dataset/serializers/document_serializers.py:126
+#: community/apps/dataset/serializers/document_serializers.py:844
+#: community/apps/dataset/swagger_api/document_api.py:27
+msgid "directly return similarity"
+msgstr "直接返回相似度"
+
+#: community/apps/dataset/serializers/document_serializers.py:129
+#: community/apps/dataset/serializers/document_serializers.py:415
+msgid "document is active"
+msgstr "文檔是否可用"
+
+#: community/apps/dataset/serializers/document_serializers.py:150
+#: community/apps/dataset/serializers/document_serializers.py:152
+msgid "document url list"
+msgstr "文檔 url 列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:178
+#: community/apps/dataset/serializers/document_serializers.py:179
+msgid "source url list"
+msgstr "文檔地址列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:202
+#: community/apps/dataset/serializers/document_serializers.py:203
+msgid "paragraphs"
+msgstr "段落"
+
+#: community/apps/dataset/serializers/document_serializers.py:227
+msgid "The template type only supports excel|csv"
+msgstr "模版類型只支持 excel|csv"
+
+#: community/apps/dataset/serializers/document_serializers.py:237
+msgid "Export template type csv|excel"
+msgstr "導出模版類型 csv|excel"
+
+#: community/apps/dataset/serializers/document_serializers.py:289
+#: community/apps/dataset/serializers/paragraph_serializers.py:304
+#: community/apps/dataset/serializers/paragraph_serializers.py:436
+msgid "target dataset id"
+msgstr "目標知識庫 id"
+
+#: community/apps/dataset/serializers/document_serializers.py:391
+#: community/apps/dataset/serializers/paragraph_serializers.py:305
+#: community/apps/dataset/serializers/paragraph_serializers.py:441
+msgid "target document id"
+msgstr "目標文檔 id"
+
+#: community/apps/dataset/serializers/document_serializers.py:399
+#: community/apps/dataset/serializers/document_serializers.py:400
+msgid "document id list"
+msgstr "文檔 id 列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:418
+msgid "order by"
+msgstr ""
+
+#: community/apps/dataset/serializers/document_serializers.py:653
+msgid "Section title (optional)"
+msgstr "分段標題(選填)"
+
+#: community/apps/dataset/serializers/document_serializers.py:654
+msgid ""
+"Section content (required, question answer, no more than 4096 characters)"
+msgstr "分段內容(必填,問題答案,最長不超過4096個字元)"
+
+#: community/apps/dataset/serializers/document_serializers.py:655
+msgid "Question (optional, one per line in the cell)"
+msgstr "問題(選填,儲存格內一行一個)"
+
+#: community/apps/dataset/serializers/document_serializers.py:765
+msgid "The task is being executed, please do not send it repeatedly."
+msgstr "任務正在執行中,請勿重複發送"
+
+#: community/apps/dataset/serializers/document_serializers.py:842
+msgid "ai optimization: optimization, direct return: directly_return"
+msgstr "ai優化: optimization, 直接返回: directly_return"
+
+#: community/apps/dataset/serializers/document_serializers.py:848
+msgid "Document metadata->web:{source_url:xxx,selector:'xxx'},base:{}"
+msgstr "文檔元數據->web:{source_url:xxx,selector:'xxx'},base:{}"
+
+#: community/apps/dataset/serializers/document_serializers.py:859
+msgid "dataset id not exist"
+msgstr "知識庫 id 不存在"
+
+#: community/apps/dataset/serializers/document_serializers.py:990
+#: community/apps/dataset/serializers/document_serializers.py:1020
+msgid "limit"
+msgstr "分段長度"
+
+#: community/apps/dataset/serializers/document_serializers.py:994
+#: community/apps/dataset/serializers/document_serializers.py:996
+msgid "patterns"
+msgstr "分段標識列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:999
+msgid "Auto Clean"
+msgstr "自動清洗"
+
+#: community/apps/dataset/serializers/document_serializers.py:1006
+msgid "The maximum size of the uploaded file cannot exceed 100MB"
+msgstr "文件上傳最大大小不能超過100MB"
+
+#: community/apps/dataset/serializers/document_serializers.py:1025
+msgid "Segmented regular list"
+msgstr "分段正則列表"
+
+#: community/apps/dataset/serializers/document_serializers.py:1029
+#: community/apps/dataset/serializers/document_serializers.py:1030
+msgid "Whether to clear special characters"
+msgstr "是否清除特殊字符"
+
+#: community/apps/dataset/serializers/document_serializers.py:1049
+msgid "space"
+msgstr "空格"
+
+#: community/apps/dataset/serializers/document_serializers.py:1050
+msgid "semicolon"
+msgstr "分號"
+
+#: community/apps/dataset/serializers/document_serializers.py:1050
+msgid "comma"
+msgstr "逗號"
+
+#: community/apps/dataset/serializers/document_serializers.py:1051
+msgid "period"
+msgstr "句號"
+
+#: community/apps/dataset/serializers/document_serializers.py:1051
+msgid "enter"
+msgstr "回車"
+
+#: community/apps/dataset/serializers/document_serializers.py:1052
+msgid "blank line"
+msgstr "空行"
+
+#: community/apps/dataset/serializers/document_serializers.py:1165
+msgid "Hit handling method is required"
+msgstr "命中處理方式必填"
+
+#: community/apps/dataset/serializers/document_serializers.py:1167
+msgid "The hit processing method must be directly_return|optimization"
+msgstr "命中處理方式必須是 directly_return|optimization"
+
+#: community/apps/dataset/serializers/document_serializers.py:1213
+#: community/apps/dataset/serializers/paragraph_serializers.py:753
+msgid "The task is being executed, please do not send it again."
+msgstr "任務正在執行中,請勿重複發送"
+
+#: community/apps/dataset/serializers/file_serializers.py:82
+msgid "File not found"
+msgstr "文件不存在"
+
+#: community/apps/dataset/serializers/image_serializers.py:23
+msgid "image"
+msgstr "圖片"
+
+#: community/apps/dataset/serializers/image_serializers.py:42
+msgid "Image not found"
+msgstr "圖片不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:52
+#: community/apps/dataset/serializers/paragraph_serializers.py:68
+#: community/apps/dataset/serializers/paragraph_serializers.py:69
+#: community/apps/dataset/serializers/paragraph_serializers.py:82
+#: community/apps/dataset/serializers/paragraph_serializers.py:85
+#: community/apps/dataset/serializers/paragraph_serializers.py:91
+#: community/apps/dataset/serializers/paragraph_serializers.py:93
+#: community/apps/dataset/serializers/paragraph_serializers.py:653
+msgid "section title"
+msgstr "段落標題"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:65
+#: community/apps/dataset/serializers/paragraph_serializers.py:66
+msgid "section content"
+msgstr "段落內容"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:73
+#: community/apps/dataset/serializers/paragraph_serializers.py:74
+#: community/apps/dataset/serializers/problem_serializers.py:88
+msgid "problem list"
+msgstr "問題列表"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:100
+#: community/apps/dataset/serializers/paragraph_serializers.py:172
+#: community/apps/dataset/serializers/paragraph_serializers.py:214
+#: community/apps/dataset/serializers/paragraph_serializers.py:276
+#: community/apps/dataset/serializers/paragraph_serializers.py:308
+#: community/apps/dataset/serializers/paragraph_serializers.py:456
+#: community/apps/dataset/serializers/paragraph_serializers.py:563
+#: community/apps/dataset/serializers/problem_serializers.py:57
+#: community/apps/dataset/swagger_api/problem_api.py:61
+msgid "paragraph id"
+msgstr "段落 id"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:105
+#: community/apps/dataset/serializers/paragraph_serializers.py:467
+msgid "Paragraph id does not exist"
+msgstr "段落 id 不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:134
+msgid "Already associated, please do not associate again"
+msgstr "已經關聯,請勿重複關聯"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:191
+#: community/apps/dataset/serializers/paragraph_serializers.py:192
+msgid "question content"
+msgstr "問題內容"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:193
+#: community/apps/dataset/serializers/paragraph_serializers.py:709
+#: community/apps/dataset/swagger_api/problem_api.py:26
+msgid "hit num"
+msgstr "命中數量"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:210
+#: community/apps/dataset/serializers/paragraph_serializers.py:281
+#: community/apps/dataset/serializers/problem_serializers.py:39
+#: community/apps/dataset/serializers/problem_serializers.py:64
+#: community/apps/dataset/serializers/problem_serializers.py:194
+#: community/apps/dataset/swagger_api/problem_api.py:101
+msgid "problem id"
+msgstr "問題 id"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:222
+msgid "Paragraph does not exist"
+msgstr "段落不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:224
+msgid "Problem does not exist"
+msgstr "問題不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:306
+#: community/apps/dataset/serializers/paragraph_serializers.py:449
+#: community/apps/dataset/serializers/paragraph_serializers.py:450
+msgid "paragraph id list"
+msgstr "段落 id 列表"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:317
+msgid "The document to be migrated is consistent with the target document"
+msgstr "文檔遷移的文檔與目標文檔一致"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:319
+#, python-brace-format
+msgid "The document id does not exist [{document_id}]"
+msgstr "文檔 id 不存在 [{document_id}]"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:323
+#, python-brace-format
+msgid "The target document id does not exist [{document_id}]"
+msgstr "目標文檔 id 不存在 [{document_id}]"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:503
+msgid "Problem id does not exist"
+msgstr "問題 id 不存在"
+
+#: community/apps/dataset/serializers/paragraph_serializers.py:713
+#: community/apps/dataset/serializers/paragraph_serializers.py:714
+msgid "Number of dislikes"
+msgstr "點踩數量"
+
+#: community/apps/dataset/serializers/problem_serializers.py:50
+msgid "Issue ID is passed when modifying, not when creating."
+msgstr "問題 ID 在修改時傳遞,創建時不傳遞"
+
+#: community/apps/dataset/serializers/problem_serializers.py:62
+#: community/apps/dataset/swagger_api/problem_api.py:51
+#: community/apps/dataset/swagger_api/problem_api.py:52
+#: community/apps/dataset/swagger_api/problem_api.py:83
+#: community/apps/dataset/swagger_api/problem_api.py:84
+msgid "problem id list"
+msgstr "問題 id 列表"
+
+#: community/apps/dataset/swagger_api/document_api.py:38
+#: community/apps/dataset/swagger_api/document_api.py:52
+msgid "1|2|3 1:Vectorization|2:Generate issues|3:Synchronize documents"
+msgstr "1|2|3 1:向量化|2:生成問題|3:同步文檔"
+
+#: community/apps/dataset/swagger_api/document_api.py:64
+#: community/apps/dataset/swagger_api/document_api.py:65
+msgid "state list"
+msgstr "狀態列表"
+
+#: community/apps/dataset/swagger_api/image_api.py:22
+msgid "image file"
+msgstr "圖片文件"
+
+#: community/apps/dataset/swagger_api/problem_api.py:54
+#: community/apps/dataset/swagger_api/problem_api.py:55
+msgid "Associated paragraph information list"
+msgstr "關聯段落信息列表"
+
+#: community/apps/dataset/swagger_api/problem_api.py:131
+msgid "Hit num"
+msgstr "命中數量"
+
+#: community/apps/dataset/task/generate.py:95
+#, python-brace-format
+msgid ""
+"Generate issue based on document: {document_id} error {error}{traceback}"
+msgstr "生成問題基於文檔: {document_id} 錯誤 {error}{traceback}"
+
+#: community/apps/dataset/task/generate.py:99
+#, python-brace-format
+msgid "End--->Generate problem: {document_id}"
+msgstr "結束--->生成問題: {document_id}"
+
+#: community/apps/dataset/task/sync.py:29
+#: community/apps/dataset/task/sync.py:43
+#, python-brace-format
+msgid "Start--->Start synchronization web knowledge base:{dataset_id}"
+msgstr "開始--->開始同步web知識庫:{dataset_id}"
+
+#: community/apps/dataset/task/sync.py:34
+#: community/apps/dataset/task/sync.py:47
+#, python-brace-format
+msgid "End--->End synchronization web knowledge base:{dataset_id}"
+msgstr "結束--->結束同步web知識庫:{dataset_id}"
+
+#: community/apps/dataset/task/sync.py:36
+#: community/apps/dataset/task/sync.py:49
+#, python-brace-format
+msgid "Synchronize web knowledge base:{dataset_id} error{error}{traceback}"
+msgstr "同步web知識庫:{dataset_id} 錯誤{error}{traceback}"
+
+#: community/apps/dataset/task/tools.py:114
+#, python-brace-format
+msgid "Association problem failed {error}"
+msgstr "關聯問題失敗 {error}"
+
+#: community/apps/dataset/views/dataset.py:35
+#: community/apps/dataset/views/dataset.py:36
+msgid "Synchronize the knowledge base of the website"
+msgstr "同步Web網站知識庫"
+
+#: community/apps/dataset/views/dataset.py:57
+#: community/apps/dataset/views/dataset.py:58
+msgid "Create QA knowledge base"
+msgstr "創建QA知識庫"
+
+#: community/apps/dataset/views/dataset.py:77
+#: community/apps/dataset/views/dataset.py:78
+msgid "Create a web site knowledge base"
+msgstr "創建web站點知識庫"
+
+#: community/apps/dataset/views/dataset.py:93
+#: community/apps/dataset/views/dataset.py:94
+msgid "Get a list of applications available in the knowledge base"
+msgstr "獲取知識庫中可用的應用列表"
+
+#: community/apps/dataset/views/dataset.py:105
+#: community/apps/dataset/views/dataset.py:106
+msgid "Get a list of knowledge bases"
+msgstr "獲取知識庫列表"
+
+#: community/apps/dataset/views/dataset.py:119
+#: community/apps/dataset/views/dataset.py:120
+msgid "Create a knowledge base"
+msgstr "創建知識庫"
+
+#: community/apps/dataset/views/dataset.py:134
+msgid "Hit test list"
+msgstr "命中測試列表"
+
+#: community/apps/dataset/views/dataset.py:154
+msgid "Re-vectorize"
+msgstr "重新向量化"
+
+#: community/apps/dataset/views/dataset.py:170
+msgid "Export knowledge base"
+msgstr "導出知識庫"
+
+#: community/apps/dataset/views/dataset.py:184
+#: community/apps/dataset/views/dataset.py:185
+msgid "Export knowledge base containing images"
+msgstr "導出ZIP知識庫"
+
+#: community/apps/dataset/views/dataset.py:199
+msgid "Delete knowledge base"
+msgstr "刪除知識庫"
+
+#: community/apps/dataset/views/dataset.py:213
+#: community/apps/dataset/views/dataset.py:214
+msgid "Query knowledge base details based on knowledge base id"
+msgstr "根據知識庫id查詢知識庫詳情"
+
+#: community/apps/dataset/views/dataset.py:226
+#: community/apps/dataset/views/dataset.py:227
+msgid "Modify knowledge base information"
+msgstr "修改知識庫信息"
+
+#: community/apps/dataset/views/dataset.py:245
+#: community/apps/dataset/views/dataset.py:246
+#: community/apps/dataset/views/document.py:463
+#: community/apps/dataset/views/document.py:464
+msgid "Get the knowledge base paginated list"
+msgstr "獲取知識庫文档分頁列表"
+
+#: community/apps/dataset/views/document.py:31
+#: community/apps/dataset/views/document.py:32
+msgid "Get QA template"
+msgstr "獲取問答模版"
+
+#: community/apps/dataset/views/document.py:44
+#: community/apps/dataset/views/document.py:45
+msgid "Get form template"
+msgstr "獲取表單模版"
+
+#: community/apps/dataset/views/document.py:57
+#: community/apps/dataset/views/document.py:58
+msgid "Create Web site documents"
+msgstr "創建web站點文檔"
+
+#: community/apps/dataset/views/document.py:77
+#: community/apps/dataset/views/document.py:78
+msgid "Import QA and create documentation"
+msgstr "導入問答並創建文檔"
+
+#: community/apps/dataset/views/document.py:98
+#: community/apps/dataset/views/document.py:99
+msgid "Import tables and create documents"
+msgstr "導入表格並創建文檔"
+
+#: community/apps/dataset/views/document.py:118
+#: community/apps/dataset/views/document.py:119
+msgid "Create document"
+msgstr "創建文檔"
+
+#: community/apps/dataset/views/document.py:152
+#: community/apps/dataset/views/document.py:153
+msgid "Modify document hit processing methods in batches"
+msgstr "修改文檔命中處理方式批量"
+
+#: community/apps/dataset/views/document.py:171
+#: community/apps/dataset/views/document.py:172
+msgid "Create documents in batches"
+msgstr "批量創建文檔"
+
+#: community/apps/dataset/views/document.py:187
+#: community/apps/dataset/views/document.py:188
+msgid "Batch sync documents"
+msgstr "批量同步文檔"
+
+#: community/apps/dataset/views/document.py:202
+#: community/apps/dataset/views/document.py:203
+msgid "Delete documents in batches"
+msgstr "批量刪除文檔"
+
+#: community/apps/dataset/views/document.py:220
+#: community/apps/dataset/views/document.py:221
+msgid "Synchronize web site types"
+msgstr "同步web站點類型"
+
+#: community/apps/dataset/views/document.py:239
+#: community/apps/dataset/views/document.py:240
+msgid "Cancel task"
+msgstr "取消任務"
+
+#: community/apps/dataset/views/document.py:260
+#: community/apps/dataset/views/document.py:261
+msgid "Cancel tasks in batches"
+msgstr "批量取消任務"
+
+#: community/apps/dataset/views/document.py:279
+#: community/apps/dataset/views/document.py:280
+msgid "Refresh document vector library"
+msgstr "文檔向量化"
+
+#: community/apps/dataset/views/document.py:300
+#: community/apps/dataset/views/document.py:301
+msgid "Batch refresh document vector library"
+msgstr "批量文檔向量化"
+
+#: community/apps/dataset/views/document.py:319
+#: community/apps/dataset/views/document.py:320
+msgid "Migrate documents in batches"
+msgstr "批量遷移文檔"
+
+#: community/apps/dataset/views/document.py:346
+#: community/apps/dataset/views/document.py:347
+msgid "Export document"
+msgstr "導出文檔"
+
+#: community/apps/dataset/views/document.py:361
+#: community/apps/dataset/views/document.py:362
+msgid "Export Zip document"
+msgstr "導出Zip文檔"
+
+#: community/apps/dataset/views/document.py:376
+#: community/apps/dataset/views/document.py:377
+msgid "Get document details"
+msgstr "獲取文檔詳情"
+
+#: community/apps/dataset/views/document.py:391
+#: community/apps/dataset/views/document.py:392
+msgid "Modify document"
+msgstr "修改文檔"
+
+#: community/apps/dataset/views/document.py:409
+#: community/apps/dataset/views/document.py:410
+msgid "Delete document"
+msgstr "刪除文檔"
+
+#: community/apps/dataset/views/document.py:427
+#: community/apps/dataset/views/document.py:428
+msgid "Get a list of segment IDs"
+msgstr "獲取分段id列表"
+
+#: community/apps/dataset/views/document.py:439
+#: community/apps/dataset/views/document.py:440
+msgid "Segmented document"
+msgstr "分段文檔"
+
+#: community/apps/dataset/views/file.py:42
+#: community/apps/dataset/views/file.py:43
+msgid "Get file"
+msgstr "獲取文件"
+
+#: community/apps/dataset/views/image.py:28
+#: community/apps/dataset/views/image.py:29
+#: community/apps/dataset/views/image.py:34
+msgid "Upload image"
+msgstr "上傳圖片"
+
+#: community/apps/dataset/views/image.py:35
+#: community/apps/dataset/views/image.py:44
+msgid "Image"
+msgstr "圖片"
+
+#: community/apps/dataset/views/image.py:42
+#: community/apps/dataset/views/image.py:43
+msgid "Get Image"
+msgstr "獲取圖片"
+
+#: community/apps/dataset/views/paragraph.py:28
+#: community/apps/dataset/views/paragraph.py:29
+msgid "Paragraph list"
+msgstr "段落列表"
+
+#: community/apps/dataset/views/paragraph.py:32
+#: community/apps/dataset/views/paragraph.py:51
+#: community/apps/dataset/views/paragraph.py:69
+#: community/apps/dataset/views/paragraph.py:85
+#: community/apps/dataset/views/paragraph.py:103
+#: community/apps/dataset/views/paragraph.py:121
+#: community/apps/dataset/views/paragraph.py:140
+#: community/apps/dataset/views/paragraph.py:156
+#: community/apps/dataset/views/paragraph.py:172
+#: community/apps/dataset/views/paragraph.py:193
+#: community/apps/dataset/views/paragraph.py:211
+#: community/apps/dataset/views/paragraph.py:238
+msgid "Knowledge Base/Documentation/Paragraph"
+msgstr "知識庫/文檔/段落"
+
+#: community/apps/dataset/views/paragraph.py:46
+#: community/apps/dataset/views/paragraph.py:47
+msgid "Create Paragraph"
+msgstr "創建段落"
+
+#: community/apps/dataset/views/paragraph.py:64
+#: community/apps/dataset/views/paragraph.py:65
+msgid "Add associated questions"
+msgstr "添加關聯問題"
+
+#: community/apps/dataset/views/paragraph.py:80
+#: community/apps/dataset/views/paragraph.py:81
+msgid "Get a list of paragraph questions"
+msgstr "獲取段落問題列表"
+
+#: community/apps/dataset/views/paragraph.py:99
+#: community/apps/dataset/views/paragraph.py:100
+msgid "Disassociation issue"
+msgstr "取消關聯問題"
+
+#: community/apps/dataset/views/paragraph.py:117
+#: community/apps/dataset/views/paragraph.py:118
+msgid "Related questions"
+msgstr "關聯問題"
+
+#: community/apps/dataset/views/paragraph.py:135
+#: community/apps/dataset/views/paragraph.py:136
+msgid "Modify paragraph data"
+msgstr "修改段落數據"
+
+#: community/apps/dataset/views/paragraph.py:152
+#: community/apps/dataset/views/paragraph.py:153
+msgid "Get paragraph details"
+msgstr "獲取段落詳情"
+
+#: community/apps/dataset/views/paragraph.py:168
+#: community/apps/dataset/views/paragraph.py:169
+msgid "Delete paragraph"
+msgstr "刪除段落"
+
+#: community/apps/dataset/views/paragraph.py:187
+#: community/apps/dataset/views/paragraph.py:188
+msgid "Delete paragraphs in batches"
+msgstr "批量刪除段落"
+
+#: community/apps/dataset/views/paragraph.py:206
+#: community/apps/dataset/views/paragraph.py:207
+msgid "Migrate paragraphs in batches"
+msgstr "批量遷移段落"
+
+#: community/apps/dataset/views/paragraph.py:233
+#: community/apps/dataset/views/paragraph.py:234
+msgid "Get paragraph list by pagination"
+msgstr "獲取分頁段落列表"
+
+#: community/apps/dataset/views/problem.py:28
+#: community/apps/dataset/views/problem.py:29
+msgid "Question list"
+msgstr "問題列表"
+
+#: community/apps/dataset/views/problem.py:32
+#: community/apps/dataset/views/problem.py:50
+#: community/apps/dataset/views/problem.py:68
+#: community/apps/dataset/views/problem.py:88
+#: community/apps/dataset/views/problem.py:103
+#: community/apps/dataset/views/problem.py:120
+#: community/apps/dataset/views/problem.py:136
+#: community/apps/dataset/views/problem.py:155
+msgid "Knowledge Base/Documentation/Paragraph/Question"
+msgstr "知識庫/文檔/段落/問題"
+
+#: community/apps/dataset/views/problem.py:45
+#: community/apps/dataset/views/problem.py:46
+msgid "Create question"
+msgstr "創建問題"
+
+#: community/apps/dataset/views/problem.py:64
+#: community/apps/dataset/views/problem.py:65
+msgid "Get a list of associated paragraphs"
+msgstr "獲取關聯段落列表"
+
+#: community/apps/dataset/views/problem.py:82
+#: community/apps/dataset/views/problem.py:83
+msgid "Batch deletion issues"
+msgstr "批量刪除問題"
+
+#: community/apps/dataset/views/problem.py:98
+#: community/apps/dataset/views/problem.py:99
+msgid "Batch associated paragraphs"
+msgstr "批量關聯段落"
+
+#: community/apps/dataset/views/problem.py:116
+#: community/apps/dataset/views/problem.py:117
+msgid "Delete question"
+msgstr "刪除問題"
+
+#: community/apps/dataset/views/problem.py:131
+#: community/apps/dataset/views/problem.py:132
+msgid "Modify question"
+msgstr "修改問題"
+
+#: community/apps/dataset/views/problem.py:150
+#: community/apps/dataset/views/problem.py:151
+msgid "Get the list of questions by page"
+msgstr "獲取分頁問題列表"
+
+#: community/apps/embedding/task/embedding.py:30
+#: community/apps/embedding/task/embedding.py:81
+#, python-brace-format
+msgid "Failed to obtain vector model: {error} {traceback}"
+msgstr "獲取向量模型失敗: {error} {traceback}"
+
+#: community/apps/embedding/task/embedding.py:110
+#, python-brace-format
+msgid "Start--->Vectorized dataset: {dataset_id}"
+msgstr "開始--->向量化知識庫: {dataset_id}"
+
+#: community/apps/embedding/task/embedding.py:114
+#, python-brace-format
+msgid "Dataset documentation: {document_names}"
+msgstr "知識庫文檔: {document_names}"
+
+#: community/apps/embedding/task/embedding.py:127
+#, python-brace-format
+msgid "End--->Vectorized dataset: {dataset_id}"
+msgstr "結束--->向量化知識庫: {dataset_id}"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:70
+#: community/apps/function_lib/serializers/function_lib_serializer.py:83
+#: community/apps/function_lib/swagger_api/function_lib_api.py:68
+#: community/apps/function_lib/swagger_api/function_lib_api.py:69
+#: community/apps/function_lib/swagger_api/function_lib_api.py:84
+#: community/apps/function_lib/swagger_api/function_lib_api.py:85
+#: community/apps/function_lib/swagger_api/function_lib_api.py:130
+#: community/apps/function_lib/swagger_api/function_lib_api.py:131
+#: community/apps/function_lib/swagger_api/function_lib_api.py:176
+#: community/apps/function_lib/swagger_api/function_lib_api.py:177
+msgid "variable name"
+msgstr "變量名"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:71
+#: community/apps/function_lib/swagger_api/function_lib_api.py:88
+#: community/apps/function_lib/swagger_api/function_lib_api.py:89
+#: community/apps/function_lib/swagger_api/function_lib_api.py:134
+#: community/apps/function_lib/swagger_api/function_lib_api.py:135
+#: community/apps/function_lib/swagger_api/function_lib_api.py:180
+#: community/apps/function_lib/swagger_api/function_lib_api.py:181
+msgid "required"
+msgstr "必填"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:74
+msgid "fields only support string|int|dict|array|float"
+msgstr "字段只支持string|int|dict|array|float"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:85
+#: community/apps/function_lib/swagger_api/function_lib_api.py:72
+#: community/apps/function_lib/swagger_api/function_lib_api.py:73
+msgid "variable value"
+msgstr "變量值"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:93
+#: community/apps/function_lib/serializers/function_lib_serializer.py:104
+#: community/apps/function_lib/serializers/function_lib_serializer.py:119
+#: community/apps/function_lib/serializers/py_lint_serializer.py:23
+#: community/apps/function_lib/swagger_api/function_lib_api.py:28
+#: community/apps/function_lib/swagger_api/function_lib_api.py:29
+#: community/apps/function_lib/swagger_api/function_lib_api.py:75
+#: community/apps/function_lib/swagger_api/function_lib_api.py:76
+#: community/apps/function_lib/swagger_api/function_lib_api.py:117
+#: community/apps/function_lib/swagger_api/function_lib_api.py:118
+#: community/apps/function_lib/swagger_api/function_lib_api.py:163
+#: community/apps/function_lib/swagger_api/function_lib_api.py:164
+#: community/apps/function_lib/swagger_api/py_lint_api.py:22
+#: community/apps/function_lib/swagger_api/py_lint_api.py:23
+msgid "function content"
+msgstr "函數內容"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:98
+#: community/apps/function_lib/serializers/function_lib_serializer.py:114
+#: community/apps/function_lib/serializers/function_lib_serializer.py:135
+#: community/apps/function_lib/serializers/function_lib_serializer.py:388
+#: community/apps/function_lib/swagger_api/function_lib_api.py:24
+#: community/apps/function_lib/swagger_api/function_lib_api.py:25
+#: community/apps/function_lib/swagger_api/function_lib_api.py:46
+#: community/apps/function_lib/swagger_api/function_lib_api.py:113
+#: community/apps/function_lib/swagger_api/function_lib_api.py:114
+#: community/apps/function_lib/swagger_api/function_lib_api.py:159
+#: community/apps/function_lib/swagger_api/function_lib_api.py:160
+msgid "function name"
+msgstr "函數名"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:101
+#: community/apps/function_lib/serializers/function_lib_serializer.py:117
+#: community/apps/function_lib/serializers/function_lib_serializer.py:138
+#: community/apps/function_lib/swagger_api/function_lib_api.py:26
+#: community/apps/function_lib/swagger_api/function_lib_api.py:27
+#: community/apps/function_lib/swagger_api/function_lib_api.py:51
+#: community/apps/function_lib/swagger_api/function_lib_api.py:115
+#: community/apps/function_lib/swagger_api/function_lib_api.py:116
+#: community/apps/function_lib/swagger_api/function_lib_api.py:161
+#: community/apps/function_lib/swagger_api/function_lib_api.py:162
+msgid "function description"
+msgstr "函數描述"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:232
+msgid "field has no value set"
+msgstr "字段沒有設置值"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:248
+#: community/apps/function_lib/serializers/function_lib_serializer.py:253
+msgid "type error"
+msgstr "類型錯誤"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:256
+#, python-brace-format
+msgid "Field: {name} Type: {_type} Value: {value} Type conversion error"
+msgstr "字段: {name} 類型: {_type} 值: {value} 類型轉換錯誤"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:261
+msgid "function id"
+msgstr "函數 id"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:267
+#: community/apps/function_lib/serializers/function_lib_serializer.py:303
+#: community/apps/function_lib/serializers/function_lib_serializer.py:366
+#: community/apps/function_lib/serializers/function_lib_serializer.py:396
+msgid "Function does not exist"
+msgstr "函數不存在"
+
+#: community/apps/function_lib/serializers/function_lib_serializer.py:357
+#: community/apps/function_lib/serializers/function_lib_serializer.py:386
+#| msgid "function"
+msgid "function ID"
+msgstr "函數"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:23
+#: community/apps/function_lib/swagger_api/function_lib_api.py:205
+msgid "ID"
+msgstr ""
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:30
+#: community/apps/function_lib/swagger_api/function_lib_api.py:31
+msgid "input field"
+msgstr "輸入字段"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:62
+#: community/apps/function_lib/swagger_api/function_lib_api.py:78
+#: community/apps/function_lib/swagger_api/function_lib_api.py:124
+#: community/apps/function_lib/swagger_api/function_lib_api.py:170
+msgid "Input variable list"
+msgstr "輸入變量列表"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:94
+#: community/apps/function_lib/swagger_api/function_lib_api.py:140
+#: community/apps/function_lib/swagger_api/function_lib_api.py:186
+msgid "Field type string|int|dict|array|float"
+msgstr "字段類型 string|int|dict|array|float"
+
+#: community/apps/function_lib/swagger_api/function_lib_api.py:100
+#: community/apps/function_lib/swagger_api/function_lib_api.py:146
+#: community/apps/function_lib/swagger_api/function_lib_api.py:192
+msgid "The source only supports custom|reference"
+msgstr "來源只支持custom|reference"
+
+#: community/apps/function_lib/views/function_lib_views.py:28
+#: community/apps/function_lib/views/function_lib_views.py:29
+msgid "Get function list"
+msgstr "獲取函數列表"
+
+#: community/apps/function_lib/views/function_lib_views.py:30
+#: community/apps/function_lib/views/function_lib_views.py:46
+#: community/apps/function_lib/views/function_lib_views.py:59
+#: community/apps/function_lib/views/function_lib_views.py:74
+#: community/apps/function_lib/views/function_lib_views.py:85
+#: community/apps/function_lib/views/function_lib_views.py:95
+#: community/apps/function_lib/views/function_lib_views.py:111
+#: community/apps/function_lib/views/py_lint.py:29
+msgid "Function"
+msgstr "函數库"
+
+#: community/apps/function_lib/views/function_lib_views.py:43
+#: community/apps/function_lib/views/function_lib_views.py:44
+msgid "Create function"
+msgstr "創建函數"
+
+#: community/apps/function_lib/views/function_lib_views.py:56
+#: community/apps/function_lib/views/function_lib_views.py:57
+msgid "Debug function"
+msgstr "調試函數"
+
+#: community/apps/function_lib/views/function_lib_views.py:71
+#: community/apps/function_lib/views/function_lib_views.py:72
+msgid "Update function"
+msgstr "更新函數"
+
+#: community/apps/function_lib/views/function_lib_views.py:83
+#: community/apps/function_lib/views/function_lib_views.py:84
+msgid "Delete function"
+msgstr "刪除函數"
+
+#: community/apps/function_lib/views/function_lib_views.py:93
+#: community/apps/function_lib/views/function_lib_views.py:94
+msgid "Get function details"
+msgstr "獲取函數詳情"
+
+#: community/apps/function_lib/views/function_lib_views.py:106
+#: community/apps/function_lib/views/function_lib_views.py:107
+msgid "Get function list by pagination"
+msgstr "獲取分頁函數列表"
+
+#: community/apps/function_lib/views/function_lib_views.py:129
+#| msgid "not a function"
+msgid "Import function"
+msgstr "导入函數"
+
+#: community/apps/function_lib/views/function_lib_views.py:143
+#| msgid "not a function"
+msgid "Export function"
+msgstr "导出函數"
+
+#: community/apps/function_lib/views/py_lint.py:26
+#: community/apps/function_lib/views/py_lint.py:27
+msgid "Check code"
+msgstr "檢查代碼"
+
+#: community/apps/setting/models_provider/base_model_provider.py:66
+msgid "Model type cannot be empty"
+msgstr "模型類型不能爲空"
+
+#: community/apps/setting/models_provider/base_model_provider.py:91
+msgid "The current platform does not support downloading models"
+msgstr "當前平臺不支持下載模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:146
+msgid "LLM"
+msgstr "大語言模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:147
+msgid "Embedding Model"
+msgstr "向量模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:148
+msgid "Speech2Text"
+msgstr "語音識別"
+
+#: community/apps/setting/models_provider/base_model_provider.py:149
+msgid "TTS"
+msgstr "語音合成"
+
+#: community/apps/setting/models_provider/base_model_provider.py:150
+msgid "Vision Model"
+msgstr "圖片理解"
+
+#: community/apps/setting/models_provider/base_model_provider.py:151
+msgid "Image Generation"
+msgstr "圖片生成"
+
+#: community/apps/setting/models_provider/base_model_provider.py:152
+msgid "Rerank"
+msgstr "重排模型"
+
+#: community/apps/setting/models_provider/base_model_provider.py:226
+msgid "The model does not support"
+msgstr "模型不支持"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42
+msgid ""
+"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi "
+"Lab, developers can integrate high-quality text retrieval and sorting "
+"through the LlamaIndex framework."
+msgstr ""
+"阿里巴巴通義實驗室開發的GTE-Rerank文本排序系列模型,開發者可以通過LlamaIndex"
+"框架進行集成高質量文本檢索、排序。"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45
+msgid ""
+"Chinese (including various dialects such as Cantonese), English, Japanese, "
+"and Korean support free switching between multiple languages."
+msgstr "中文(含粵語等各種方言)、英文、日語、韓語支持多個語種自由切換"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48
+msgid ""
+"CosyVoice is based on a new generation of large generative speech models, "
+"which can predict emotions, intonation, rhythm, etc. based on context, and "
+"has better anthropomorphic effects."
+msgstr ""
+"CosyVoice基於新一代生成式語音大模型,能根據上下文預測情緒、語調、韻律等,具有"
+"更好的擬人效果"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51
+msgid ""
+"Universal text vector is Tongyi Lab's multi-language text unified vector "
+"model based on the LLM base. It provides high-level vector services for "
+"multiple mainstream languages around the world and helps developers quickly "
+"convert text data into high-quality vector data."
+msgstr ""
+"通用文本向量,是通義實驗室基於LLM底座的多語言文本統一向量模型,面向全球多個主"
+"流語種,提供高水準的向量服務,幫助開發者將文本數據快速轉換爲高質量的向量數"
+"據。"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69
+#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40
+msgid ""
+"Tongyi Wanxiang - a large image model for text generation, supports "
+"bilingual input in Chinese and English, and supports the input of reference "
+"pictures for reference content or reference style migration. Key styles "
+"include but are not limited to watercolor, oil painting, Chinese painting, "
+"sketch, flat illustration, two-dimensional, and 3D. Cartoon."
+msgstr ""
+"通義萬相-文本生成圖像大模型,支持中英文雙語輸入,支持輸入參考圖片進行參考內容"
+"或者參考風格遷移,重點風格包括但不限於水彩、油畫、中國畫、素描、扁平插畫、二"
+"次元、3D卡通。"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95
+msgid "Alibaba Cloud Bailian"
+msgstr "阿里雲百鍊"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:28
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:40
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:68
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:55
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:45
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:23
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:58
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:41
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:39
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:44
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:27
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:31
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:44
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:22
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:61
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:40
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:68
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:61
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:40
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:19
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:78
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:42
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:53
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:46
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:27
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:29
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:24
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:47
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:19
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:39
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:25
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:21
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:59
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:39
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:41
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:40
+#, python-brace-format
+msgid "{model_type} Model type is not supported"
+msgstr "模型類型 {model_type} 不支持"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:32
+#, python-brace-format
+msgid "{key} is required"
+msgstr "{key} 是必填項"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:52
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:55
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:43
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:37
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:54
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:56
+#: community/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py:43
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:54
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:56
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py:54
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:52
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py:77
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:60
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:76
+#: community/apps/setting/models_provider/impl/xf_model_provider/model/tts.py:101
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:34
+#: community/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py:44
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:56
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py:49
+msgid "Hello"
+msgstr "你好!"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:58
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:38
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:86
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:73
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:65
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:40
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:77
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:61
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:38
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:45
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:51
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:64
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:39
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:80
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:86
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:64
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:39
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:80
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:66
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:57
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:104
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:55
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:62
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:70
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py:38
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py:38
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:50
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:84
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:41
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:65
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:60
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:40
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:37
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:77
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:56
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:61
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:64
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:59
+#, python-brace-format
+msgid ""
+"Verification failed, please check whether the parameters are correct: {error}"
+msgstr "驗證失敗,請檢查參數是否正確: {error}"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:12
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:20
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:17
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:14
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:22
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:41
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:14
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:22
+msgid "Temperature"
+msgstr "溫度"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:13
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:21
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:18
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:15
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:42
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:16
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:16
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:23
+msgid ""
+"Higher values make the output more random, while lower values make it more "
+"focused and deterministic"
+msgstr "較高的數值會使輸出更加隨機,而較低的數值會使其更加集中和確定"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:21
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:29
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:26
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:31
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:31
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:50
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:23
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:31
+msgid "Output the maximum Tokens"
+msgstr "輸出最大Token數"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:22
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:30
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:27
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:32
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:32
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:24
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:25
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:25
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:32
+msgid "Specify the maximum number of tokens that the model can generate"
+msgstr "指定模型可以生成的最大 tokens"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:72
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:60
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:32
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/image.py:50
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py:28
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:63
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:46
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:46
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:62
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:63
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/image.py:49
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py:27
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:66
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:45
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:72
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py:49
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py:27
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:66
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:45
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py:51
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:65
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py:47
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:58
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:55
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:72
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:34
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py:71
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py:29
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:52
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:40
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:59
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py:29
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py:26
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:64
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:44
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py:46
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py:51
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:45
+#, python-brace-format
+msgid "{key} is required"
+msgstr "{key} 是必填項"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:14
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:15
+msgid "Image size"
+msgstr "圖片尺寸"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:22
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:15
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:22
+msgid "Specify the size of the generated image, such as: 1024x1024"
+msgstr "指定生成圖片的尺寸, 如: 1024x1024"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:43
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:43
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:41
+msgid "Number of pictures"
+msgstr "圖片數量"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:40
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:34
+msgid "Specify the number of generated images"
+msgstr "指定生成圖片的數量"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41
+msgid "Style"
+msgstr "風格"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41
+msgid "Specify the style of generated images"
+msgstr "指定生成圖片的風格"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45
+msgid "Default value, the image style is randomly output by the model"
+msgstr "默認值,圖片風格由模型隨機輸出"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46
+msgid "photography"
+msgstr "攝影"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47
+msgid "Portraits"
+msgstr "人像寫真"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48
+msgid "3D cartoon"
+msgstr "3D卡通"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49
+msgid "animation"
+msgstr "動畫"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50
+msgid "painting"
+msgstr "油畫"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51
+msgid "watercolor"
+msgstr "水彩"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52
+msgid "sketch"
+msgstr "素描"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53
+msgid "Chinese painting"
+msgstr "中國畫"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54
+#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54
+msgid "flat illustration"
+msgstr "扁平插畫"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15
+msgid "timbre"
+msgstr "音色"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
+msgid "Chinese sounds can support mixed scenes of Chinese and English"
+msgstr "中文音色支持中英文混合場景"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20
+msgid "Long Xiaochun"
+msgstr "龍小淳"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21
+msgid "Long Xiaoxia"
+msgstr "龍小夏"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22
+msgid "Long Xiaochen"
+msgstr "龍小誠"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23
+msgid "Long Xiaobai"
+msgstr "龍小白"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24
+msgid "Long laotie"
+msgstr "龍老鐵"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25
+msgid "Long Shu"
+msgstr "龍書"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26
+msgid "Long Shuo"
+msgstr "龍碩"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27
+msgid "Long Jing"
+msgstr "龍婧"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28
+msgid "Long Miao"
+msgstr "龍妙"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29
+msgid "Long Yue"
+msgstr "龍悅"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30
+msgid "Long Yuan"
+msgstr "龍媛"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31
+msgid "Long Fei"
+msgstr "龍飛"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32
+msgid "Long Jielidou"
+msgstr "龍傑力豆"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33
+msgid "Long Tong"
+msgstr "龍彤"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34
+msgid "Long Xiang"
+msgstr "龍祥"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28
+msgid "speaking speed"
+msgstr "語速"
+
+#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
+msgid "[0.5,2], the default is 1, usually one decimal place is enough"
+msgstr "[0.5,2],默認爲1,通常一位小數就足夠了"
+
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:34
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:74
+msgid "API URL"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py:35
+#: community/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py:75
+msgid "API Key"
+msgstr ""
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36
+msgid ""
+"An update to Claude 2 that doubles the context window and improves "
+"reliability, hallucination rates, and evidence-based accuracy in long "
+"documents and RAG contexts."
+msgstr ""
+"Claude 2 的更新,採用雙倍的上下文窗口,並在長文檔和 RAG 上下文中提高可靠性、"
+"幻覺率和循證準確性。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43
+msgid ""
+"Anthropic is a powerful model that can handle a variety of tasks, from "
+"complex dialogue and creative content generation to detailed command "
+"obedience."
+msgstr ""
+"Anthropic 功能強大的模型,可處理各種任務,從複雜的對話和創意內容生成到詳細的"
+"指令服從。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50
+msgid ""
+"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-"
+"instant responsiveness. The model can answer simple queries and requests "
+"quickly. Customers will be able to build seamless AI experiences that mimic "
+"human interactions. Claude 3 Haiku can process images and return text "
+"output, and provides 200K context windows."
+msgstr ""
+"Claude 3 Haiku 是 Anthropic 最快速、最緊湊的模型,具有近乎即時的響應能力。該"
+"模型可以快速回答簡單的查詢和請求。客戶將能夠構建模仿人類交互的無縫人工智能體"
+"驗。 Claude 3 Haiku 可以處理圖像和返回文本輸出,並且提供 200K 上下文窗口。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57
+msgid ""
+"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between "
+"intelligence and speed, especially when it comes to handling enterprise "
+"workloads. This model offers maximum utility while being priced lower than "
+"competing products, and it's been engineered to be a solid choice for "
+"deploying AI at scale."
+msgstr ""
+"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之間取得理想的平衡,尤其是在"
+"處理企業工作負載方面。該模型提供最大的效用,同時價格低於競爭產品,並且其經過"
+"精心設計,是大規模部署人工智能的可靠選擇。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64
+msgid ""
+"The Claude 3.5 Sonnet raises the industry standard for intelligence, "
+"outperforming competing models and the Claude 3 Opus in extensive "
+"evaluations, with the speed and cost-effectiveness of our mid-range models."
+msgstr ""
+"Claude 3.5 Sonnet提高了智能的行業標準,在廣泛的評估中超越了競爭對手的型號和"
+"Claude 3 Opus,具有我們中端型號的速度和成本效益。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71
+msgid ""
+"A faster, more affordable but still very powerful model that can handle a "
+"range of tasks including casual conversation, text analysis, summarization "
+"and document question answering."
+msgstr ""
+"一種更快速、更實惠但仍然非常強大的模型,它可以處理一系列任務,包括隨意對話、"
+"文本分析、摘要和文檔問題回答。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78
+msgid ""
+"Titan Text Premier is the most powerful and advanced model in the Titan Text "
+"series, designed to deliver exceptional performance for a variety of "
+"enterprise applications. With its cutting-edge features, it delivers greater "
+"accuracy and outstanding results, making it an excellent choice for "
+"organizations looking for a top-notch text processing solution."
+msgstr ""
+"Titan Text Premier 是 Titan Text 系列中功能強大且先進的型號,旨在爲各種企業應"
+"用程序提供卓越的性能。憑藉其尖端功能,它提供了更高的準確性和出色的結果,使其"
+"成爲尋求一流文本處理解決方案的組織的絕佳選擇。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85
+msgid ""
+"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-"
+"tuning English-language tasks, including summarization and copywriting, "
+"where customers require smaller, more cost-effective, and highly "
+"customizable models."
+msgstr ""
+"Amazon Titan Text Lite 是一種輕量級的高效模型,非常適合英語任務的微調,包括摘"
+"要和文案寫作等,在這種場景下,客戶需要更小、更經濟高效且高度可定製的模型"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91
+msgid ""
+"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making "
+"it ideal for a variety of high-level general language tasks, such as open-"
+"ended text generation and conversational chat, as well as support in "
+"retrieval-augmented generation (RAG). At launch, the model is optimized for "
+"English, but other languages are supported."
+msgstr ""
+"Amazon Titan Text Express 的上下文長度長達 8000 個 tokens,因而非常適合各種高"
+"級常規語言任務,例如開放式文本生成和對話式聊天,以及檢索增強生成(RAG)中的支"
+"持。在發佈時,該模型針對英語進行了優化,但也支持其他語言。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97
+msgid ""
+"7B dense converter for rapid deployment and easy customization. Small in "
+"size yet powerful in a variety of use cases. Supports English and code, as "
+"well as 32k context windows."
+msgstr ""
+"7B 密集型轉換器,可快速部署,易於定製。體積雖小,但功能強大,適用於各種用例。"
+"支持英語和代碼,以及 32k 的上下文窗口。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103
+msgid ""
+"Advanced Mistral AI large-scale language model capable of handling any "
+"language task, including complex multilingual reasoning, text understanding, "
+"transformation, and code generation."
+msgstr ""
+"先進的 Mistral AI 大型語言模型,能夠處理任何語言任務,包括複雜的多語言推理、"
+"文本理解、轉換和代碼生成。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109
+msgid ""
+"Ideal for content creation, conversational AI, language understanding, R&D, "
+"and enterprise applications"
+msgstr "非常適合內容創作、會話式人工智能、語言理解、研發和企業應用"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115
+msgid ""
+"Ideal for limited computing power and resources, edge devices, and faster "
+"training times."
+msgstr "非常適合有限的計算能力和資源、邊緣設備和更快的訓練時間。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123
+msgid ""
+"Titan Embed Text is the largest embedding model in the Amazon Titan Embed "
+"series and can handle various text embedding tasks, such as text "
+"classification, text similarity calculation, etc."
+msgstr ""
+"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以處理各種文本"
+"嵌入任務,如文本分類、文本相似度計算等。"
+
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28
+#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47
+#, python-brace-format
+msgid "The following fields are required: {keys}"
+msgstr "以下字段是必填項: {keys}"
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py:44
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py:64
+msgid "Verification failed, please check whether the parameters are correct"
+msgstr "驗證失敗,請檢查參數是否正確"
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28
+msgid "Picture quality"
+msgstr "圖片質量"
+
+#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17
+msgid ""
+"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) "
+"to find one that suits your desired tone and audience. The current voiceover "
+"is optimized for English."
+msgstr ""
+"嘗試不同的聲音(合金、回聲、寓言、縞瑪瑙、新星和閃光),找到一種適合您所需的"
+"音調和聽衆的聲音。當前的語音針對英語進行了優化。"
+
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24
+msgid "Good at common conversational tasks, supports 32K contexts"
+msgstr "擅長通用對話任務,支持 32K 上下文"
+
+#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29
+msgid "Good at handling programming tasks, supports 16K contexts"
+msgstr "擅長處理編程任務,支持 16K 上下文"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32
+msgid "Latest Gemini 1.0 Pro model, updated with Google update"
+msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36
+msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update"
+msgstr "最新的Gemini 1.0 Pro Vision模型,隨Google更新而更新"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54
+#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58
+msgid "Latest Gemini 1.5 Flash model, updated with Google updates"
+msgstr "最新的Gemini 1.5 Flash模型,隨Google更新而更新"
+
+#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53
+msgid "convert audio to text"
+msgstr "將音頻轉換爲文本"
+
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py:53
+#: community/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py:54
+msgid "Model catalog"
+msgstr "模型目錄"
+
+#: community/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py:39
+msgid "local model"
+msgstr "本地模型"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:43
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:48
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:35
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:43
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:24
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:44
+msgid "API domain name is invalid"
+msgstr "API域名無效"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py:35
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py:48
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py:53
+#: community/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py:40
+#: community/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py:47
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py:30
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py:48
+msgid "The model does not exist, please download the model first"
+msgstr "模型不存在,請先下載模型"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 7B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。"
+"這是 7B 預訓練模型的存儲庫。其他模型的鏈接可以在底部的索引中找到。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 13B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。"
+"這是 13B 預訓練模型的存儲庫。其他模型的鏈接可以在底部的索引中找到。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64
+msgid ""
+"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
+"in size from 7 billion to 70 billion. This is a repository of 70B pretrained "
+"models. Links to other models can be found in the index at the bottom."
+msgstr ""
+"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。"
+"這是 70B 預訓練模型的存儲庫。其他模型的鏈接可以在底部的索引中找到。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68
+msgid ""
+"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese "
+"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so "
+"that it has strong Chinese conversation capabilities."
+msgstr ""
+"由於Llama2本身的中文對齊較弱,我們採用中文指令集,對meta-llama/Llama-2-13b-"
+"chat-hf進行LoRA微調,使其具備較強的中文對話能力。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72
+msgid ""
+"Meta Llama 3: The most capable public product LLM to date. 8 billion "
+"parameters."
+msgstr "Meta Llama 3:迄今爲止最有能力的公開產品LLM。80億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76
+msgid ""
+"Meta Llama 3: The most capable public product LLM to date. 70 billion "
+"parameters."
+msgstr "Meta Llama 3:迄今爲止最有能力的公開產品LLM。700億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80
+msgid ""
+"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 500 million parameters."
+msgstr ""
+"qwen 1.5 0.5b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有"
+"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。5億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84
+msgid ""
+"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 1.8 billion parameters."
+msgstr ""
+"qwen 1.5 1.8b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有"
+"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。18億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88
+msgid ""
+"Compared with previous versions, qwen 1.5 4b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"4 billion parameters."
+msgstr ""
+"qwen 1.5 4b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯"
+"著增強。所有規模的模型都支持32768個tokens的上下文長度。40億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93
+msgid ""
+"Compared with previous versions, qwen 1.5 7b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"7 billion parameters."
+msgstr ""
+"qwen 1.5 7b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯"
+"著增強。所有規模的模型都支持32768個tokens的上下文長度。70億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97
+msgid ""
+"Compared with previous versions, qwen 1.5 14b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"14 billion parameters."
+msgstr ""
+"qwen 1.5 14b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯"
+"著增強。所有規模的模型都支持32768個tokens的上下文長度。140億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101
+msgid ""
+"Compared with previous versions, qwen 1.5 32b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"32 billion parameters."
+msgstr ""
+"qwen 1.5 32b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯"
+"著增強。所有規模的模型都支持32768個tokens的上下文長度。320億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105
+msgid ""
+"Compared with previous versions, qwen 1.5 72b has significantly enhanced the "
+"model's alignment with human preferences and its multi-language processing "
+"capabilities. Models of all sizes support a context length of 32768 tokens. "
+"72 billion parameters."
+msgstr ""
+"qwen 1.5 72b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯"
+"著增強。所有規模的模型都支持32768個tokens的上下文長度。720億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109
+msgid ""
+"Compared with previous versions, qwen 1.5 110b has significantly enhanced "
+"the model's alignment with human preferences and its multi-language "
+"processing capabilities. Models of all sizes support a context length of "
+"32768 tokens. 110 billion parameters."
+msgstr ""
+"qwen 1.5 110b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有"
+"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。1100億參數。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193
+msgid ""
+"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open "
+"model."
+msgstr "Phi-3 Mini是Microsoft的3.8B參數,輕量級,最先進的開放模型。"
+
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162
+#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197
+msgid ""
+"A high-performance open embedding model with a large token context window."
+msgstr "一個具有大 tokens 上下文窗口的高性能開放嵌入模型。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16
+msgid ""
+"The image generation endpoint allows you to create raw images based on text "
+"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 "
+"or 1792x1024 pixels."
+msgstr ""
+"圖像生成端點允許您根據文本提示創建原始圖像。使用 DALL·E 3 時,圖像的尺寸可以"
+"爲 1024x1024、1024x1792 或 1792x1024 像素。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
+msgid ""
+" \n"
+"By default, images are produced in standard quality, but with DALL·E 3 you "
+"can set quality: \"hd\" to enhance detail. Square, standard quality images "
+"are generated fastest.\n"
+" "
+msgstr ""
+"默認情況下,圖像以標準質量生成,但使用 DALL·E 3 時,您可以設置質量:“hd”以增"
+"強細節。方形、標準質量的圖像生成速度最快。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44
+msgid ""
+"You can use DALL·E 3 to request 1 image at a time (requesting more images by "
+"issuing parallel requests), or use DALL·E 2 with the n parameter to request "
+"up to 10 images at a time."
+msgstr ""
+"您可以使用 DALL·E 3 一次請求 1 個圖像(通過發出並行請求來請求更多圖像),或者"
+"使用帶有 n 參數的 DALL·E 2 一次最多請求 10 個圖像。"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111
+msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments"
+msgstr "最新的gpt-3.5-turbo,隨OpenAI調整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38
+msgid "Latest gpt-4, updated with OpenAI adjustments"
+msgstr "最新的gpt-4,隨OpenAI調整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99
+msgid ""
+"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI "
+"adjustments"
+msgstr "最新的GPT-4o,比gpt-4-turbo更便宜、更快,隨OpenAI調整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102
+msgid ""
+"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI "
+"adjustments"
+msgstr "最新的gpt-4o-mini,比gpt-4o更便宜、更快,隨OpenAI調整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46
+msgid "The latest gpt-4-turbo, updated with OpenAI adjustments"
+msgstr "最新的gpt-4-turbo,隨OpenAI調整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49
+msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments"
+msgstr "最新的gpt-4-turbo-preview,隨OpenAI調整而更新"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53
+msgid ""
+"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 "
+"tokens"
+msgstr "2024年1月25日的gpt-3.5-turbo快照,支持上下文長度16,385 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57
+msgid ""
+"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 "
+"tokens"
+msgstr "2023年11月6日的gpt-3.5-turbo快照,支持上下文長度16,385 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61
+msgid ""
+"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June "
+"13, 2024"
+msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照,將於2024年6月13日棄用"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65
+msgid ""
+"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens"
+msgstr "2024年5月13日的gpt-4o快照,支持上下文長度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69
+msgid ""
+"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 "
+"tokens"
+msgstr "2024年4月9日的gpt-4-turbo快照,支持上下文長度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72
+msgid ""
+"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 "
+"tokens"
+msgstr "2024年1月25日的gpt-4-turbo快照,支持上下文長度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75
+msgid ""
+"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 "
+"tokens"
+msgstr "2023年11月6日的gpt-4-turbo快照,支持上下文長度128,000 tokens"
+
+#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63
+msgid "Tongyi Qianwen"
+msgstr "通義千問"
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:46
+msgid "Please provide server URL"
+msgstr "請提供服務器URL"
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:49
+msgid "Please provide the model"
+msgstr "請提供模型"
+
+#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py:52
+msgid "Please provide the API Key"
+msgstr "請提供API金鑰"
+
+#: community/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58
+msgid "Tencent Cloud"
+msgstr "腾訊云"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py:41
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:88
+#, python-brace-format
+msgid "{keys} is required"
+msgstr "{keys} 是必填項"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14
+msgid "painting style"
+msgstr "繪畫風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14
+msgid "If not passed, the default value is 201 (Japanese anime style)"
+msgstr "如果未傳遞,則默認值爲201(日本動漫風格)"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18
+msgid "Not limited to style"
+msgstr "不限於風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19
+msgid "ink painting"
+msgstr "水墨畫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20
+msgid "concept art"
+msgstr "概念藝術"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21
+msgid "Oil painting 1"
+msgstr "油畫1"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22
+msgid "Oil Painting 2 (Van Gogh)"
+msgstr "油畫2(梵高)"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23
+msgid "watercolor painting"
+msgstr "水彩畫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24
+msgid "pixel art"
+msgstr "像素畫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25
+msgid "impasto style"
+msgstr "厚塗風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26
+msgid "illustration"
+msgstr "插圖"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27
+msgid "paper cut style"
+msgstr "剪紙風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28
+msgid "Impressionism 1 (Monet)"
+msgstr "印象派1(莫奈)"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29
+msgid "Impressionism 2"
+msgstr "印象派2"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31
+msgid "classical portraiture"
+msgstr "古典肖像畫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32
+msgid "black and white sketch"
+msgstr "黑白素描畫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33
+msgid "cyberpunk"
+msgstr "賽博朋克"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34
+msgid "science fiction style"
+msgstr "科幻風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35
+msgid "dark style"
+msgstr "暗黑風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37
+msgid "vaporwave"
+msgstr "蒸汽波"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38
+msgid "Japanese animation"
+msgstr "日系動漫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39
+msgid "monster style"
+msgstr "怪獸風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40
+msgid "Beautiful ancient style"
+msgstr "唯美古風"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41
+msgid "retro anime"
+msgstr "復古動漫"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42
+msgid "Game cartoon hand drawing"
+msgstr "遊戲卡通手繪"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43
+msgid "Universal realistic style"
+msgstr "通用寫實風格"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50
+msgid "Generate image resolution"
+msgstr "生成圖像分辨率"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50
+msgid "If not transmitted, the default value is 768:768."
+msgstr "不傳默認使用768:768。"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38
+msgid ""
+"The most effective version of the current hybrid model, the trillion-level "
+"parameter scale MOE-32K long article model. Reaching the absolute leading "
+"level on various benchmarks, with complex instructions and reasoning, "
+"complex mathematical capabilities, support for function call, and "
+"application focus optimization in fields such as multi-language translation, "
+"finance, law, and medical care"
+msgstr ""
+"當前混元模型中效果最優版本,萬億級參數規模 MOE-32K 長文模型。在各種 "
+"benchmark 上達到絕對領先的水平,複雜指令和推理,具備複雜數學能力,支持 "
+"functioncall,在多語言翻譯、金融法律醫療等領域應用重點優化"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45
+msgid ""
+"A better routing strategy is adopted to simultaneously alleviate the "
+"problems of load balancing and expert convergence. For long articles, the "
+"needle-in-a-haystack index reaches 99.9%"
+msgstr ""
+"採用更優的路由策略,同時緩解了負載均衡和專家趨同的問題。長文方面,大海撈針指"
+"標達到99.9%"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51
+msgid ""
+"Upgraded to MOE structure, the context window is 256k, leading many open "
+"source models in multiple evaluation sets such as NLP, code, mathematics, "
+"industry, etc."
+msgstr ""
+"升級爲 MOE 結構,上下文窗口爲 256k ,在 NLP,代碼,數學,行業等多項評測集上領"
+"先衆多開源模型"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57
+msgid ""
+"Hunyuan's latest version of the role-playing model, a role-playing model "
+"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan "
+"model combined with the role-playing scene data set for additional training, "
+"and has better basic effects in role-playing scenes."
+msgstr ""
+"混元最新版角色扮演模型,混元官方精調訓練推出的角色扮演模型,基於混元模型結合"
+"角色扮演場景數據集進行增訓,在角色扮演場景具有更好的基礎效果"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63
+msgid ""
+"Hunyuan's latest MOE architecture FunctionCall model has been trained with "
+"high-quality FunctionCall data and has a context window of 32K, leading in "
+"multiple dimensions of evaluation indicators."
+msgstr ""
+"混元最新 MOE 架構 FunctionCall 模型,經過高質量的 FunctionCall 數據訓練,上下"
+"文窗口達 32K,在多個維度的評測指標上處於領先。"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69
+msgid ""
+"Hunyuan's latest code generation model, after training the base model with "
+"200B high-quality code data, and iterating on high-quality SFT data for half "
+"a year, the context long window length has been increased to 8K, and it "
+"ranks among the top in the automatic evaluation indicators of code "
+"generation in the five major languages; the five major languages In the "
+"manual high-quality evaluation of 10 comprehensive code tasks that consider "
+"all aspects, the performance is in the first echelon."
+msgstr ""
+"混元最新代碼生成模型,經過 200B 高質量代碼數據增訓基座模型,迭代半年高質量 "
+"SFT 數據訓練,上下文長窗口長度增大到 8K,五大語言代碼生成自動評測指標上位居前"
+"列;五大語言10項考量各方面綜合代碼任務人工高質量評測上,性能處於第一梯隊"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77
+msgid ""
+"Tencent's Hunyuan Embedding interface can convert text into high-quality "
+"vector data. The vector dimension is 1024 dimensions."
+msgstr ""
+"騰訊混元 Embedding 接口,可以將文本轉化爲高質量的向量數據。向量維度爲1024維。"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87
+msgid "Mixed element visual model"
+msgstr "混元視覺模型"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94
+msgid "Hunyuan graph model"
+msgstr "混元生圖模型"
+
+#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125
+msgid "Tencent Hunyuan"
+msgstr "騰訊混元"
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42
+msgid "Facebook’s 125M parameter model"
+msgstr "Facebook的125M參數模型"
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25
+msgid "BAAI’s 7B parameter model"
+msgstr "BAAI的7B參數模型"
+
+#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26
+msgid "BAAI’s 13B parameter mode"
+msgstr "BAAI的13B參數模型"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16
+msgid ""
+"If the gap between width, height and 512 is too large, the picture rendering "
+"effect will be poor and the probability of excessive delay will increase "
+"significantly. Recommended ratio and corresponding width and height before "
+"super score: width*height"
+msgstr ""
+"寬、高與512差距過大,則出圖效果不佳、延遲過長概率顯著增加。超分前建議比例及對"
+"應寬高:width*height"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29
+msgid "Universal female voice"
+msgstr "通用女聲"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25
+msgid "Supernatural timbre-ZiZi 2.0"
+msgstr "超自然音色-梓梓2.0"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26
+msgid "Supernatural timbre-ZiZi"
+msgstr "超自然音色-梓梓"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27
+msgid "Supernatural sound-Ranran 2.0"
+msgstr "超自然音色-燃燃2.0"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28
+msgid "Supernatural sound-Ranran"
+msgstr "超自然音色-燃燃"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30
+msgid "Universal male voice"
+msgstr "通用男聲"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33
+msgid "[0.2,3], the default is 1, usually one decimal place is enough"
+msgstr "[0.2,3],默認爲1,通常保留一位小數即可"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88
+msgid ""
+"The user goes to the model inference page of Volcano Ark to create an "
+"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call "
+"it."
+msgstr ""
+"用戶前往火山方舟的模型推理頁面創建推理接入點,這裏需要輸入ep-xxxxxxxxxx-yyyy"
+"進行調用"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59
+msgid "Universal 2.0-Vincent Diagram"
+msgstr "通用2.0-文生圖"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64
+msgid "Universal 2.0Pro-Vincent Chart"
+msgstr "通用2.0Pro-文生圖"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69
+msgid "Universal 1.4-Vincent Chart"
+msgstr "通用1.4-文生圖"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74
+msgid "Animation 1.3.0-Vincent Picture"
+msgstr "動漫1.3.0-文生圖"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79
+msgid "Animation 1.3.1-Vincent Picture"
+msgstr "動漫1.3.1-文生圖"
+
+#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113
+msgid "volcano engine"
+msgstr "火山引擎"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py:51
+#, python-brace-format
+msgid "{model_name} The model does not support"
+msgstr "{model_name} 模型不支持"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53
+msgid ""
+"ERNIE-Bot-4 is a large language model independently developed by Baidu. It "
+"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
+"content creation and generation."
+msgstr ""
+"ERNIE-Bot-4是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問答、"
+"內容創作生成等能力。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27
+msgid ""
+"ERNIE-Bot is a large language model independently developed by Baidu. It "
+"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
+"content creation and generation."
+msgstr ""
+"ERNIE-Bot是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問答、內"
+"容創作生成等能力。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30
+msgid ""
+"ERNIE-Bot-turbo is a large language model independently developed by Baidu. "
+"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, "
+"content creation and generation, and has a faster response speed."
+msgstr ""
+"ERNIE-Bot-turbo是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問"
+"答、內容創作生成等能力,響應速度更快。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33
+msgid ""
+"BLOOMZ-7B is a well-known large language model in the industry. It was "
+"developed and open sourced by BigScience and can output text in 46 languages "
+"and 13 programming languages."
+msgstr ""
+"BLOOMZ-7B是業內知名的大語言模型,由BigScience研發並開源,能夠以46種語言和13種"
+"編程語言輸出文本。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39
+msgid ""
+"Llama-2-13b-chat was developed by Meta AI and is open source. It performs "
+"well in scenarios such as coding, reasoning and knowledge application. "
+"Llama-2-13b-chat is a native open source version with balanced performance "
+"and effect, suitable for conversation scenarios."
+msgstr ""
+"Llama-2-13b-chat由Meta AI研發並開源,在編碼、推理及知識應用等場景表現優秀,"
+"Llama-2-13b-chat是性能與效果均衡的原生開源版本,適用於對話場景。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42
+msgid ""
+"Llama-2-70b-chat was developed by Meta AI and is open source. It performs "
+"well in scenarios such as coding, reasoning, and knowledge application. "
+"Llama-2-70b-chat is a native open source version with high-precision effects."
+msgstr ""
+"Llama-2-70b-chat由Meta AI研發並開源,在編碼、推理及知識應用等場景表現優秀,"
+"Llama-2-70b-chat是高精度效果的原生開源版本。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45
+msgid ""
+"The Chinese enhanced version developed by the Qianfan team based on "
+"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-"
+"EVAL."
+msgstr ""
+"千帆團隊在Llama-2-7b基礎上的中文增強版本,在CMMLU、C-EVAL等中文知識庫上表現優"
+"異。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49
+msgid ""
+"Embedding-V1 is a text representation model based on Baidu Wenxin large "
+"model technology. It can convert text into a vector form represented by "
+"numerical values and can be used in text retrieval, information "
+"recommendation, knowledge mining and other scenarios. Embedding-V1 provides "
+"the Embeddings interface, which can generate corresponding vector "
+"representations based on input content. You can call this interface to input "
+"text into the model and obtain the corresponding vector representation for "
+"subsequent text processing and analysis."
+msgstr ""
+"Embedding-V1是一個基於百度文心大模型技術的文本表示模型,可以將文本轉化爲用數"
+"值表示的向量形式,用於文本檢索、信息推薦、知識挖掘等場景。 Embedding-V1提供了"
+"Embeddings接口,可以根據輸入內容生成對應的向量表示。您可以通過調用該接口,將"
+"文本輸入到模型中,獲取到對應的向量表示,從而進行後續的文本處理和分析。"
+
+#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66
+msgid "Thousand sails large model"
+msgstr "千帆大模型"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42
+msgid "Please outline this picture"
+msgstr "請描述這張圖片"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15
+msgid "Speaker"
+msgstr "發音人"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16
+msgid ""
+"Speaker, optional value: Please go to the console to add a trial or purchase "
+"speaker. After adding, the speaker parameter value will be displayed."
+msgstr ""
+"發音人,可選值:請到控制檯添加試用或購買發音人,添加後即顯示發音人蔘數值"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21
+msgid "iFlytek Xiaoyan"
+msgstr "訊飛小燕"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22
+msgid "iFlytek Xujiu"
+msgstr "訊飛許久"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23
+msgid "iFlytek Xiaoping"
+msgstr "訊飛小萍"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24
+msgid "iFlytek Xiaojing"
+msgstr "訊飛小婧"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25
+msgid "iFlytek Xuxiaobao"
+msgstr "訊飛許小寶"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28
+msgid "Speech speed, optional value: [0-100], default is 50"
+msgstr "語速,可選值:[0-100],默認爲50"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50
+msgid "Chinese and English recognition"
+msgstr "中英文識別"
+
+#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66
+msgid "iFlytek Spark"
+msgstr "訊飛星火"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15
+msgid ""
+"The image generation endpoint allows you to create raw images based on text "
+"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or "
+"1792x1024 pixels."
+msgstr ""
+"圖像生成端點允許您根據文本提示創建原始圖像。圖像的尺寸可以爲 1024x1024、"
+"1024x1792 或 1792x1024 像素。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29
+msgid ""
+"By default, images are generated in standard quality, you can set quality: "
+"\"hd\" to enhance detail. Square, standard quality images are generated "
+"fastest."
+msgstr ""
+"默認情況下,圖像以標準質量生成,您可以設置質量:“hd”以增強細節。方形、標準質"
+"量的圖像生成速度最快。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42
+msgid ""
+"You can request 1 image at a time (requesting more images by making parallel "
+"requests), or up to 10 images at a time using the n parameter."
+msgstr ""
+"您可以一次請求 1 個圖像(通過發出並行請求來請求更多圖像),或者使用 n 參數一"
+"次最多請求 10 個圖像。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20
+msgid "Chinese female"
+msgstr "中文女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21
+msgid "Chinese male"
+msgstr "中文男"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22
+msgid "Japanese male"
+msgstr "日語男"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23
+msgid "Cantonese female"
+msgstr "粵語女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24
+msgid "English female"
+msgstr "英文女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25
+msgid "English male"
+msgstr "英文男"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26
+msgid "Korean female"
+msgstr "韓語女"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37
+msgid ""
+"Code Llama is a language model specifically designed for code generation."
+msgstr "Code Llama 是一個專門用於代碼生成的語言模型。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44
+msgid ""
+" \n"
+"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, "
+"designed to perform specific tasks.\n"
+" "
+msgstr ""
+"Code Llama Instruct 是 Code Llama 的指令微調版本,專爲執行特定任務而設計。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53
+msgid ""
+"Code Llama Python is a language model specifically designed for Python code "
+"generation."
+msgstr "Code Llama Python 是一個專門用於 Python 代碼生成的語言模型。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60
+msgid ""
+"CodeQwen 1.5 is a language model for code generation with high performance."
+msgstr "CodeQwen 1.5 是一個用於代碼生成的語言模型,具有較高的性能。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67
+msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5."
+msgstr "CodeQwen 1.5 Chat 是一個聊天模型版本的 CodeQwen 1.5。"
+
+#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74
+msgid "Deepseek is a large-scale language model with 13 billion parameters."
+msgstr "Deepseek Chat 是一個聊天模型版本的 Deepseek。"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16
+msgid ""
+"Image size, only cogview-3-plus supports this parameter. Optional range: "
+"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the "
+"default is 1024x1024."
+msgstr ""
+"圖片尺寸,僅 cogview-3-plus 支持該參數。可選範圍:"
+"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默認是"
+"1024x1024。"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34
+msgid ""
+"Have strong multi-modal understanding capabilities. Able to understand up to "
+"five images simultaneously and supports video content understanding"
+msgstr "具有強大的多模態理解能力。能夠同時理解多達五張圖像,並支持視頻內容理解"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37
+msgid ""
+"Focus on single picture understanding. Suitable for scenarios requiring "
+"efficient image analysis"
+msgstr "專注於單圖理解。適用於需要高效圖像解析的場景"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40
+msgid ""
+"Focus on single picture understanding. Suitable for scenarios requiring "
+"efficient image analysis (free)"
+msgstr "專注於單圖理解。適用於需要高效圖像解析的場景(免費)"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46
+msgid ""
+"Quickly and accurately generate images based on user text descriptions. "
+"Resolution supports 1024x1024"
+msgstr "根據用戶文字描述快速、精準生成圖像。分辨率支持1024x1024"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49
+msgid ""
+"Generate high-quality images based on user text descriptions, supporting "
+"multiple image sizes"
+msgstr "根據用戶文字描述生成高質量圖像,支持多圖片尺寸"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52
+msgid ""
+"Generate high-quality images based on user text descriptions, supporting "
+"multiple image sizes (free)"
+msgstr "根據用戶文字描述生成高質量圖像,支持多圖片尺寸(免費)"
+
+#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75
+msgid "zhipu AI"
+msgstr "智譜 AI"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:32
+#: community/apps/setting/serializers/model_apply_serializers.py:37
+msgid "vector text"
+msgstr "向量文本"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:33
+msgid "vector text list"
+msgstr "向量文本列表"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:41
+msgid "text"
+msgstr "文本"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:42
+msgid "metadata"
+msgstr "元數據"
+
+#: community/apps/setting/serializers/model_apply_serializers.py:47
+msgid "query"
+msgstr "查詢"
+
+#: community/apps/setting/serializers/provider_serializers.py:79
+#: community/apps/setting/serializers/provider_serializers.py:83
+#: community/apps/setting/serializers/provider_serializers.py:130
+#: community/apps/setting/serializers/provider_serializers.py:176
+#: community/apps/setting/serializers/provider_serializers.py:190
+#: community/apps/setting/swagger_api/provide_api.py:30
+#: community/apps/setting/swagger_api/provide_api.py:54
+#: community/apps/setting/swagger_api/provide_api.py:55
+#: community/apps/setting/swagger_api/provide_api.py:87
+#: community/apps/setting/swagger_api/provide_api.py:88
+#: community/apps/setting/swagger_api/provide_api.py:170
+msgid "model name"
+msgstr "模型名稱"
+
+#: community/apps/setting/serializers/provider_serializers.py:81
+#: community/apps/setting/serializers/provider_serializers.py:132
+#: community/apps/setting/serializers/provider_serializers.py:142
+#: community/apps/setting/serializers/provider_serializers.py:180
+#: community/apps/setting/swagger_api/provide_api.py:26
+#: community/apps/setting/swagger_api/provide_api.py:51
+#: community/apps/setting/swagger_api/provide_api.py:52
+#: community/apps/setting/swagger_api/provide_api.py:84
+#: community/apps/setting/swagger_api/provide_api.py:85
+#: community/apps/setting/swagger_api/provide_api.py:134
+#: community/apps/setting/swagger_api/provide_api.py:165
+msgid "model type"
+msgstr "模型類型"
+
+#: community/apps/setting/serializers/provider_serializers.py:85
+#: community/apps/setting/serializers/provider_serializers.py:178
+#: community/apps/setting/serializers/provider_serializers.py:402
+#: community/apps/setting/swagger_api/provide_api.py:35
+#: community/apps/setting/swagger_api/provide_api.py:57
+#: community/apps/setting/swagger_api/provide_api.py:58
+#: community/apps/setting/swagger_api/provide_api.py:79
+#: community/apps/setting/swagger_api/provide_api.py:80
+#: community/apps/setting/swagger_api/provide_api.py:105
+#: community/apps/setting/swagger_api/provide_api.py:129
+#: community/apps/setting/swagger_api/provide_api.py:160
+#: community/apps/setting/swagger_api/provide_api.py:179
+msgid "provider"
+msgstr "供應商"
+
+#: community/apps/setting/serializers/provider_serializers.py:87
+#: community/apps/setting/serializers/provider_serializers.py:134
+#: community/apps/setting/serializers/provider_serializers.py:182
+msgid "permission type"
+msgstr "權限類型"
+
+#: community/apps/setting/serializers/provider_serializers.py:89
+msgid "create user"
+msgstr "創建者"
+
+#: community/apps/setting/serializers/provider_serializers.py:138
+#: community/apps/setting/serializers/provider_serializers.py:186
+msgid "permissions only supportPUBLIC|PRIVATE"
+msgstr "權限類型只支持PUBLIC|PRIVATE"
+
+#: community/apps/setting/serializers/provider_serializers.py:145
+#: community/apps/setting/serializers/provider_serializers.py:196
+msgid "certification information"
+msgstr "認證信息"
+
+#: community/apps/setting/serializers/provider_serializers.py:193
+msgid "parameter configuration"
+msgstr "參數配置"
+
+#: community/apps/setting/serializers/provider_serializers.py:202
+#, python-brace-format
+msgid "Model name【{model_name}】already exists"
+msgstr "模型名稱【{model_name}】已存在"
+
+#: community/apps/setting/serializers/system_setting.py:29
+#: community/apps/setting/swagger_api/system_setting.py:25
+#: community/apps/setting/swagger_api/system_setting.py:26
+#: community/apps/setting/swagger_api/system_setting.py:57
+#: community/apps/setting/swagger_api/system_setting.py:58
+msgid "SMTP host"
+msgstr "SMTP 主機"
+
+#: community/apps/setting/serializers/system_setting.py:30
+#: community/apps/setting/swagger_api/system_setting.py:28
+#: community/apps/setting/swagger_api/system_setting.py:29
+#: community/apps/setting/swagger_api/system_setting.py:60
+#: community/apps/setting/swagger_api/system_setting.py:61
+msgid "SMTP port"
+msgstr "SMTP 端口"
+
+#: community/apps/setting/serializers/system_setting.py:31
+#: community/apps/setting/serializers/system_setting.py:35
+#: community/apps/setting/swagger_api/system_setting.py:31
+#: community/apps/setting/swagger_api/system_setting.py:32
+#: community/apps/setting/swagger_api/system_setting.py:43
+#: community/apps/setting/swagger_api/system_setting.py:44
+#: community/apps/setting/swagger_api/system_setting.py:63
+#: community/apps/setting/swagger_api/system_setting.py:64
+#: community/apps/setting/swagger_api/system_setting.py:75
+#: community/apps/setting/swagger_api/system_setting.py:76
+msgid "Sender's email"
+msgstr "發件人郵箱"
+
+#: community/apps/setting/serializers/system_setting.py:32
+#: community/apps/setting/swagger_api/system_setting.py:34
+#: community/apps/setting/swagger_api/system_setting.py:35
+#: community/apps/setting/swagger_api/system_setting.py:66
+#: community/apps/setting/swagger_api/system_setting.py:67
+#: community/apps/users/serializers/user_serializers.py:72
+#: community/apps/users/serializers/user_serializers.py:112
+#: community/apps/users/serializers/user_serializers.py:143
+#: community/apps/users/serializers/user_serializers.py:211
+#: community/apps/users/serializers/user_serializers.py:293
+#: community/apps/users/serializers/user_serializers.py:346
+#: community/apps/users/serializers/user_serializers.py:671
+#: community/apps/users/serializers/user_serializers.py:703
+#: community/apps/users/serializers/user_serializers.py:704
+#: community/apps/users/serializers/user_serializers.py:743
+#: community/apps/users/serializers/user_serializers.py:763
+#: community/apps/users/serializers/user_serializers.py:764
+#: community/apps/users/views/user.py:109
+#: community/apps/users/views/user.py:110
+#: community/apps/users/views/user.py:111
+#: community/apps/users/views/user.py:112
+msgid "Password"
+msgstr "密碼"
+
+#: community/apps/setting/serializers/system_setting.py:33
+#: community/apps/setting/swagger_api/system_setting.py:37
+#: community/apps/setting/swagger_api/system_setting.py:38
+#: community/apps/setting/swagger_api/system_setting.py:69
+#: community/apps/setting/swagger_api/system_setting.py:70
+msgid "Whether to enable TLS"
+msgstr "是否啓用 TLS"
+
+#: community/apps/setting/serializers/system_setting.py:34
+#: community/apps/setting/swagger_api/system_setting.py:40
+#: community/apps/setting/swagger_api/system_setting.py:41
+#: community/apps/setting/swagger_api/system_setting.py:72
+#: community/apps/setting/swagger_api/system_setting.py:73
+msgid "Whether to enable SSL"
+msgstr "是否啓用 SSL"
+
+#: community/apps/setting/serializers/system_setting.py:49
+msgid "Email verification failed"
+msgstr "郵箱驗證失敗"
+
+#: community/apps/setting/serializers/team_serializers.py:43
+#: community/apps/users/serializers/user_serializers.py:70
+#: community/apps/users/serializers/user_serializers.py:111
+#: community/apps/users/serializers/user_serializers.py:136
+#: community/apps/users/serializers/user_serializers.py:209
+#: community/apps/users/serializers/user_serializers.py:470
+#: community/apps/users/serializers/user_serializers.py:493
+#: community/apps/users/serializers/user_serializers.py:518
+#: community/apps/users/serializers/user_serializers.py:519
+#: community/apps/users/serializers/user_serializers.py:581
+#: community/apps/users/serializers/user_serializers.py:627
+#: community/apps/users/serializers/user_serializers.py:628
+#: community/apps/users/serializers/user_serializers.py:663
+#: community/apps/users/serializers/user_serializers.py:700
+#: community/apps/users/serializers/user_serializers.py:701
+msgid "Username"
+msgstr "用戶名"
+
+#: community/apps/setting/serializers/team_serializers.py:44
+#: community/apps/users/serializers/user_serializers.py:131
+#: community/apps/users/serializers/user_serializers.py:210
+#: community/apps/users/serializers/user_serializers.py:226
+#: community/apps/users/serializers/user_serializers.py:256
+#: community/apps/users/serializers/user_serializers.py:287
+#: community/apps/users/serializers/user_serializers.py:343
+#: community/apps/users/serializers/user_serializers.py:356
+#: community/apps/users/serializers/user_serializers.py:438
+#: community/apps/users/serializers/user_serializers.py:471
+#: community/apps/users/serializers/user_serializers.py:494
+#: community/apps/users/serializers/user_serializers.py:520
+#: community/apps/users/serializers/user_serializers.py:582
+#: community/apps/users/serializers/user_serializers.py:629
+#: community/apps/users/serializers/user_serializers.py:658
+#: community/apps/users/serializers/user_serializers.py:702
+#: community/apps/users/serializers/user_serializers.py:713
+#: community/apps/users/serializers/user_serializers.py:734
+msgid "Email"
+msgstr "郵箱"
+
+#: community/apps/setting/serializers/team_serializers.py:47
+#: community/apps/setting/serializers/team_serializers.py:148
+#: community/apps/setting/serializers/team_serializers.py:256
+msgid "team id"
+msgstr "團隊 id"
+
+#: community/apps/setting/serializers/team_serializers.py:48
+#: community/apps/setting/serializers/team_serializers.py:254
+#: community/apps/setting/serializers/team_serializers.py:324
+msgid "member id"
+msgstr "成員 id"
+
+#: community/apps/setting/serializers/team_serializers.py:54
+msgid "use"
+msgstr "使用"
+
+#: community/apps/setting/serializers/team_serializers.py:55
+msgid "manage"
+msgstr "管理"
+
+#: community/apps/setting/serializers/team_serializers.py:60
+msgid "Operation permissions USE, MANAGE permissions"
+msgstr "操作權限 USE, MANAGE 權限"
+
+#: community/apps/setting/serializers/team_serializers.py:63
+msgid "use permission"
+msgstr "使用權限"
+
+#: community/apps/setting/serializers/team_serializers.py:64
+msgid "use permission True|False"
+msgstr "使用權限 True|False"
+
+#: community/apps/setting/serializers/team_serializers.py:66
+msgid "manage permission"
+msgstr "管理權限"
+
+#: community/apps/setting/serializers/team_serializers.py:67
+msgid "manage permission True|False"
+msgstr "管理權限 True|False"
+
+#: community/apps/setting/serializers/team_serializers.py:73
+msgid "target id"
+msgstr "目標 id"
+
+#: community/apps/setting/serializers/team_serializers.py:82
+#: community/apps/setting/serializers/team_serializers.py:83
+msgid "dataset id/application id"
+msgstr "知識庫 id/應用 id"
+
+#: community/apps/setting/serializers/team_serializers.py:105
+msgid "Non-existent application|knowledge base id["
+msgstr "應用|知識庫 id[ 不存在"
+
+#: community/apps/setting/serializers/team_serializers.py:139
+#: community/apps/setting/serializers/team_serializers.py:140
+msgid "Permission data"
+msgstr "權限數據"
+
+#: community/apps/setting/serializers/team_serializers.py:157
+#: community/apps/setting/serializers/team_serializers.py:158
+msgid "user id list"
+msgstr "用戶 id 列表"
+
+#: community/apps/setting/serializers/team_serializers.py:168
+#: community/apps/setting/serializers/team_serializers.py:169
+msgid "Username or email"
+msgstr "用戶名或郵箱"
+
+#: community/apps/setting/serializers/team_serializers.py:217
+msgid "Username or email is required"
+msgstr "用戶名或郵箱是必填項"
+
+#: community/apps/setting/serializers/team_serializers.py:221
+#: community/apps/users/serializers/user_serializers.py:800
+msgid "User does not exist"
+msgstr "用戶不存在"
+
+#: community/apps/setting/serializers/team_serializers.py:224
+msgid "The current members already exist in the team, do not add them again."
+msgstr "當前成員已存在於團隊中,無需再次添加。"
+
+#: community/apps/setting/serializers/team_serializers.py:248
+msgid "member list"
+msgstr "成員列表"
+
+#: community/apps/setting/serializers/team_serializers.py:263
+msgid "The member does not exist, please add a member first"
+msgstr "成員不存在,請先添加成員"
+
+#: community/apps/setting/serializers/team_serializers.py:297
+msgid "Administrator rights do not allow modification"
+msgstr "管理員權限不允許修改"
+
+#: community/apps/setting/serializers/team_serializers.py:311
+msgid "Unable to remove team admin"
+msgstr "不支持移除團隊管理員"
+
+#: community/apps/setting/serializers/valid_serializers.py:32
+#: community/apps/users/serializers/user_serializers.py:190
+#: community/apps/users/serializers/user_serializers.py:777
+msgid ""
+"The community version supports up to 2 users. If you need more users, please "
+"contact us (https://fit2cloud.com/)."
+msgstr ""
+"社區版最多支持 2 個用戶,如需擁有更多用戶,請聯繫我們(https://"
+"fit2cloud.com/)。"
+
+#: community/apps/setting/serializers/valid_serializers.py:41
+#: community/apps/setting/swagger_api/valid_api.py:27
+msgid "check quantity"
+msgstr "檢查數量"
+
+#: community/apps/setting/swagger_api/provide_api.py:43
+#: community/apps/setting/swagger_api/provide_api.py:44
+#: community/apps/setting/swagger_api/provide_api.py:71
+#: community/apps/setting/swagger_api/provide_api.py:72
+#: community/apps/setting/swagger_api/provide_api.py:190
+#: community/apps/setting/swagger_api/provide_api.py:191
+msgid "parameters required to call the function"
+msgstr "調用函數所需要的參數"
+
+#: community/apps/setting/swagger_api/provide_api.py:60
+#: community/apps/setting/swagger_api/provide_api.py:61
+#: community/apps/setting/swagger_api/provide_api.py:90
+#: community/apps/setting/swagger_api/provide_api.py:91
+msgid "model certificate information"
+msgstr "模型認證信息"
+
+#: community/apps/setting/swagger_api/provide_api.py:114
+#: community/apps/setting/swagger_api/provide_api.py:115
+msgid "model type description"
+msgstr "模型類型描述"
+
+#: community/apps/setting/swagger_api/provide_api.py:115
+#| msgid "Create model"
+msgid "large language model"
+msgstr "大型語言模型"
+
+#: community/apps/setting/swagger_api/provide_api.py:116
+#: community/apps/setting/swagger_api/provide_api.py:117
+#: community/apps/setting/swagger_api/provide_api.py:147
+#: community/apps/setting/swagger_api/provide_api.py:148
+msgid "model type value"
+msgstr "模型類型值"
+
+#: community/apps/setting/swagger_api/provide_api.py:145
+#: community/apps/setting/swagger_api/provide_api.py:146
+msgid "model description"
+msgstr "模型描述"
+
+#: community/apps/setting/swagger_api/provide_api.py:184
+msgid "function that needs to be executed"
+msgstr "需要執行的函數"
+
+#: community/apps/setting/swagger_api/system_setting.py:19
+#: community/apps/setting/swagger_api/system_setting.py:20
+#: community/apps/setting/swagger_api/system_setting.py:51
+#: community/apps/setting/swagger_api/system_setting.py:52
+msgid "Email related parameters"
+msgstr "郵箱相關參數"
+
+#: community/apps/setting/swagger_api/valid_api.py:22
+msgid "Verification type: application|dataset|user"
+msgstr "認證類型:application|dataset|user"
+
+#: community/apps/setting/views/Team.py:27
+#: community/apps/setting/views/Team.py:28
+msgid "Get a list of team members"
+msgstr "獲取團隊成員列表"
+
+#: community/apps/setting/views/Team.py:30
+#: community/apps/setting/views/Team.py:40
+#: community/apps/setting/views/Team.py:54
+#: community/apps/setting/views/Team.py:68
+#: community/apps/setting/views/Team.py:80
+#: community/apps/setting/views/Team.py:92
+#: community/apps/users/serializers/user_serializers.py:198
+#: community/apps/users/serializers/user_serializers.py:791
+msgid "team"
+msgstr "團隊"
+
+#: community/apps/setting/views/Team.py:37
+#: community/apps/setting/views/Team.py:38
+msgid "Add member"
+msgstr "添加成員"
+
+#: community/apps/setting/views/Team.py:51
+#: community/apps/setting/views/Team.py:52
+msgid "Add members in batches"
+msgstr "批量添加成員"
+
+#: community/apps/setting/views/Team.py:65
+#: community/apps/setting/views/Team.py:66
+msgid "Get team member permissions"
+msgstr "獲取團隊成員權限"
+
+#: community/apps/setting/views/Team.py:76
+#: community/apps/setting/views/Team.py:77
+msgid "Update team member permissions"
+msgstr "更新團隊成員權限"
+
+#: community/apps/setting/views/Team.py:89
+#: community/apps/setting/views/Team.py:90
+msgid "Remove member"
+msgstr "移除成員"
+
+#: community/apps/setting/views/model.py:30
+#: community/apps/setting/views/model.py:31
+msgid "Create model"
+msgstr "創建模型"
+
+#: community/apps/setting/views/model.py:33
+#: community/apps/setting/views/model.py:45
+#: community/apps/setting/views/model.py:57
+#: community/apps/setting/views/model.py:74
+#: community/apps/setting/views/model.py:88
+#: community/apps/setting/views/model.py:103
+#: community/apps/setting/views/model.py:114
+#: community/apps/setting/views/model.py:129
+#: community/apps/setting/views/model.py:141
+#: community/apps/setting/views/model.py:151
+#: community/apps/setting/views/model.py:170
+#: community/apps/setting/views/model.py:180
+#: community/apps/setting/views/model.py:204
+#: community/apps/setting/views/model.py:219
+#: community/apps/setting/views/model.py:239
+#: community/apps/setting/views/model.py:257
+#: community/apps/setting/views/model_apply.py:26
+#: community/apps/setting/views/model_apply.py:36
+#: community/apps/setting/views/model_apply.py:46
+msgid "model"
+msgstr "模型設置"
+
+#: community/apps/setting/views/model.py:42
+#: community/apps/setting/views/model.py:43
+msgid "Download model, trial only with Ollama platform"
+msgstr "下載模型,僅支持 Ollama 平臺試用"
+
+#: community/apps/setting/views/model.py:54
+#: community/apps/setting/views/model.py:55
+msgid "Get model list"
+msgstr "獲取模型列表"
+
+#: community/apps/setting/views/model.py:71
+#: community/apps/setting/views/model.py:73
+msgid ""
+"Query model meta information, this interface does not carry authentication "
+"information"
+msgstr "查詢模型元信息,該接口不攜帶認證信息"
+
+#: community/apps/setting/views/model.py:86
+#: community/apps/setting/views/model.py:87
+msgid "Pause model download"
+msgstr "下載模型暫停"
+
+#: community/apps/setting/views/model.py:111
+#: community/apps/setting/views/model.py:112
+msgid "Save model parameter form"
+msgstr "保存模型參數表單"
+
+#: community/apps/setting/views/model.py:126
+#: community/apps/setting/views/model.py:127
+msgid "Update model"
+msgstr "更新模型"
+
+#: community/apps/setting/views/model.py:138
+#: community/apps/setting/views/model.py:139
+msgid "Delete model"
+msgstr "刪除模型"
+
+#: community/apps/setting/views/model.py:149
+#: community/apps/setting/views/model.py:150
+msgid "Query model details"
+msgstr "查詢模型詳情"
+
+#: community/apps/setting/views/model.py:166
+#: community/apps/setting/views/model.py:167
+msgid "Call the supplier function to obtain form data"
+msgstr "調用供應商函數,獲取表單數據"
+
+#: community/apps/setting/views/model.py:178
+#: community/apps/setting/views/model.py:179
+msgid "Get a list of model suppliers"
+msgstr "獲取模型供應商列表"
+
+#: community/apps/setting/views/model.py:200
+#: community/apps/setting/views/model.py:201
+msgid "Get a list of model types"
+msgstr "獲取模型類型列表"
+
+#: community/apps/setting/views/model.py:215
+#: community/apps/setting/views/model.py:216
+#: community/apps/setting/views/model.py:236
+#: community/apps/setting/views/model.py:254
+#: community/apps/setting/views/model.py:255
+msgid "Get the model creation form"
+msgstr "獲取模型創建表單"
+
+#: community/apps/setting/views/model.py:235
+msgid "Get model default parameters"
+msgstr "獲取模型默認參數"
+
+#: community/apps/setting/views/model_apply.py:23
+#: community/apps/setting/views/model_apply.py:24
+#: community/apps/setting/views/model_apply.py:33
+#: community/apps/setting/views/model_apply.py:34
+msgid "Vectorization documentation"
+msgstr "向量化文檔"
+
+#: community/apps/setting/views/model_apply.py:43
+#: community/apps/setting/views/model_apply.py:44
+msgid "Reorder documents"
+msgstr "重排序文檔"
+
+#: community/apps/setting/views/system_setting.py:29
+#: community/apps/setting/views/system_setting.py:30
+msgid "Create or update email settings"
+msgstr "創建或更新郵箱設置"
+
+#: community/apps/setting/views/system_setting.py:31
+#: community/apps/setting/views/system_setting.py:45
+#: community/apps/setting/views/system_setting.py:57
+msgid "Email settings"
+msgstr "郵箱設置"
+
+#: community/apps/setting/views/system_setting.py:41
+#: community/apps/setting/views/system_setting.py:42
+msgid "Test email settings"
+msgstr "測試郵箱設置"
+
+#: community/apps/setting/views/system_setting.py:54
+#: community/apps/setting/views/system_setting.py:55
+msgid "Get email settings"
+msgstr "獲取郵箱設置"
+
+#: community/apps/setting/views/valid.py:26
+#: community/apps/setting/views/valid.py:27
+msgid "Get verification results"
+msgstr "獲取認證結果"
+
+#: community/apps/users/serializers/user_serializers.py:62
+#: community/apps/users/serializers/user_serializers.py:63
+msgid "System version number"
+msgstr "系統版本號"
+
+#: community/apps/users/serializers/user_serializers.py:141
+#: community/apps/users/serializers/user_serializers.py:669
+msgid "Username must be 6-20 characters long"
+msgstr "用戶名必須是 6-20 個字符長"
+
+#: community/apps/users/serializers/user_serializers.py:148
+#: community/apps/users/serializers/user_serializers.py:156
+#: community/apps/users/serializers/user_serializers.py:676
+#: community/apps/users/serializers/user_serializers.py:748
+msgid ""
+"The password must be 6-20 characters long and must be a combination of "
+"letters, numbers, and special characters."
+msgstr "密碼必須是 6-20 個字符長,且必須是字母、數字和特殊字符的組合"
+
+#: community/apps/users/serializers/user_serializers.py:151
+#: community/apps/users/serializers/user_serializers.py:212
+#: community/apps/users/serializers/user_serializers.py:213
+#: community/apps/users/serializers/user_serializers.py:300
+#: community/apps/users/serializers/user_serializers.py:347
+#: community/apps/users/serializers/user_serializers.py:348
+#: community/apps/users/serializers/user_serializers.py:749
+#: community/apps/users/serializers/user_serializers.py:765
+#: community/apps/users/serializers/user_serializers.py:766
+msgid "Confirm Password"
+msgstr "確認密碼"
+
+#: community/apps/users/serializers/user_serializers.py:158
+#: community/apps/users/serializers/user_serializers.py:214
+#: community/apps/users/serializers/user_serializers.py:215
+#: community/apps/users/serializers/user_serializers.py:229
+#: community/apps/users/serializers/user_serializers.py:257
+#: community/apps/users/serializers/user_serializers.py:258
+#: community/apps/users/serializers/user_serializers.py:291
+#: community/apps/users/serializers/user_serializers.py:344
+#: community/apps/users/serializers/user_serializers.py:345
+#: community/apps/users/views/user.py:107
+#: community/apps/users/views/user.py:108
+msgid "Verification code"
+msgstr "驗證碼"
+
+#: community/apps/users/serializers/user_serializers.py:232
+#: community/apps/users/serializers/user_serializers.py:259
+#: community/apps/users/serializers/user_serializers.py:360
+#: community/apps/users/serializers/user_serializers.py:439
+msgid "Type"
+msgstr "類型"
+
+#: community/apps/users/serializers/user_serializers.py:236
+#: community/apps/users/serializers/user_serializers.py:362
+msgid "The type only supports register|reset_password"
+msgstr "該類型僅支持 register|reset_password"
+
+#: community/apps/users/serializers/user_serializers.py:266
+msgid "Is it successful"
+msgstr "是否成功"
+
+#: community/apps/users/serializers/user_serializers.py:268
+msgid "Error message"
+msgstr "錯誤信息"
+
+#: community/apps/users/serializers/user_serializers.py:280
+msgid "language only support:"
+msgstr "語言只支持:"
+
+#: community/apps/users/serializers/user_serializers.py:298
+#: community/apps/users/serializers/user_serializers.py:305
+#: community/apps/users/serializers/user_serializers.py:754
+msgid ""
+"The confirmation password must be 6-20 characters long and must be a "
+"combination of letters, numbers, and special characters."
+msgstr "確認密碼長度6-20個字符,必須字母、數字、特殊字符組合"
+
+#: community/apps/users/serializers/user_serializers.py:380
+#, python-brace-format
+msgid "Do not send emails again within {seconds} seconds"
+msgstr "{seconds} 秒內請勿重複發送郵件"
+
+#: community/apps/users/serializers/user_serializers.py:410
+msgid ""
+"The email service has not been set up. Please contact the administrator to "
+"set up the email service in [Email Settings]."
+msgstr "郵箱服務未設置,請聯繫管理員在【郵箱設置】中設置郵箱服務"
+
+#: community/apps/users/serializers/user_serializers.py:421
+#, python-brace-format
+msgid "【Intelligent knowledge base question and answer system-{action}】"
+msgstr "【智能知識庫問答系統-{action}】"
+
+#: community/apps/users/serializers/user_serializers.py:422
+#: community/apps/users/views/user.py:194
+#: community/apps/users/views/user.py:195
+msgid "User registration"
+msgstr "用戶註冊"
+
+#: community/apps/users/serializers/user_serializers.py:422
+#: community/apps/users/views/user.py:212
+#: community/apps/users/views/user.py:213
+#: community/apps/users/views/user.py:301
+#: community/apps/users/views/user.py:302
+msgid "Change password"
+msgstr "修改密碼"
+
+#: community/apps/users/serializers/user_serializers.py:474
+#: community/apps/users/serializers/user_serializers.py:475
+msgid "Permissions"
+msgstr "權限列表"
+
+#: community/apps/users/serializers/user_serializers.py:509
+#: community/apps/users/serializers/user_serializers.py:610
+#: community/apps/users/serializers/user_serializers.py:618
+msgid "Email or username"
+msgstr "郵箱或用戶名"
+
+#: community/apps/users/serializers/user_serializers.py:560
+msgid "All"
+msgstr "全部"
+
+#: community/apps/users/serializers/user_serializers.py:561
+msgid "Me"
+msgstr "我的"
+
+#: community/apps/users/serializers/user_serializers.py:583
+#: community/apps/users/serializers/user_serializers.py:680
+#: community/apps/users/serializers/user_serializers.py:705
+#: community/apps/users/serializers/user_serializers.py:719
+#: community/apps/users/serializers/user_serializers.py:736
+msgid "Phone"
+msgstr "手機號"
+
+#: community/apps/users/serializers/user_serializers.py:587
+msgid "Source"
+msgstr "來源"
+
+#: community/apps/users/serializers/user_serializers.py:588
+#: community/apps/users/serializers/user_serializers.py:678
+#: community/apps/users/serializers/user_serializers.py:706
+#: community/apps/users/serializers/user_serializers.py:717
+#: community/apps/users/serializers/user_serializers.py:735
+msgid "Name"
+msgstr "名字"
+
+#: community/apps/users/serializers/user_serializers.py:727
+msgid "Email is already in use"
+msgstr "郵箱已被使用"
+
+#: community/apps/users/serializers/user_serializers.py:808
+msgid "Unable to delete administrator"
+msgstr "不能刪除管理員"
+
+#: community/apps/users/serializers/user_serializers.py:845
+msgid "Cannot modify administrator status"
+msgstr "不能修改管理員狀態"
+
+#: community/apps/users/views/user.py:37 community/apps/users/views/user.py:38
+msgid "Get MaxKB related information"
+msgstr "獲取 MaxKB 相關信息"
+
+#: community/apps/users/views/user.py:40
+msgid "System parameters"
+msgstr "系統參數"
+
+#: community/apps/users/views/user.py:50 community/apps/users/views/user.py:51
+msgid "Get current user information"
+msgstr "獲取當前用戶信息"
+
+#: community/apps/users/views/user.py:63 community/apps/users/views/user.py:64
+msgid "Get user list"
+msgstr "獲取用戶列表"
+
+#: community/apps/users/views/user.py:67 community/apps/users/views/user.py:90
+#: community/apps/users/views/user.py:116
+#: community/apps/users/views/user.py:136
+#: community/apps/users/views/user.py:152
+#: community/apps/users/views/user.py:178
+#: community/apps/users/views/user.py:199
+#: community/apps/users/views/user.py:217
+#: community/apps/users/views/user.py:234
+#: community/apps/users/views/user.py:249
+#: community/apps/users/views/user.py:373
+msgid "User"
+msgstr "用戶"
+
+#: community/apps/users/views/user.py:79 community/apps/users/views/user.py:80
+msgid "Switch Language"
+msgstr "切换语音"
+
+#: community/apps/users/views/user.py:101
+#: community/apps/users/views/user.py:102
+msgid "Modify current user password"
+msgstr "修改當前用戶密碼"
+
+#: community/apps/users/views/user.py:125
+msgid "Failed to change password"
+msgstr "修改密碼失敗"
+
+#: community/apps/users/views/user.py:133
+#: community/apps/users/views/user.py:134
+msgid "Send email to current user"
+msgstr "給當前用戶發送郵件"
+
+#: community/apps/users/views/user.py:149
+#: community/apps/users/views/user.py:150
+msgid "Sign out"
+msgstr "登出"
+
+#: community/apps/users/views/user.py:205
+msgid "Registration successful"
+msgstr "註冊成功"
+
+#: community/apps/users/views/user.py:229
+#: community/apps/users/views/user.py:230
+msgid "Check whether the verification code is correct"
+msgstr "檢查驗證碼是否正確"
+
+#: community/apps/users/views/user.py:244
+#: community/apps/users/views/user.py:245
+msgid "Send email"
+msgstr "發送郵件"
+
+#: community/apps/users/views/user.py:262
+#: community/apps/users/views/user.py:263
+msgid "Add user"
+msgstr "添加用戶"
+
+#: community/apps/users/views/user.py:266
+#: community/apps/users/views/user.py:282
+#: community/apps/users/views/user.py:306
+#: community/apps/users/views/user.py:324
+#: community/apps/users/views/user.py:338
+#: community/apps/users/views/user.py:354
+msgid "User management"
+msgstr "用戶管理"
+
+#: community/apps/users/views/user.py:280
+#: community/apps/users/views/user.py:281
+msgid "Get user paginated list"
+msgstr "獲取用戶分頁列表"
+
+#: community/apps/users/views/user.py:320
+#: community/apps/users/views/user.py:321
+msgid "Delete user"
+msgstr "刪除用戶"
+
+#: community/apps/users/views/user.py:334
+#: community/apps/users/views/user.py:335
+msgid "Get user information"
+msgstr "獲取用戶信息"
+
+#: community/apps/users/views/user.py:349
+#: community/apps/users/views/user.py:350
+msgid "Update user information"
+msgstr "更新用戶信息"
+
+#: community/apps/users/views/user.py:369
+#: community/apps/users/views/user.py:370
+msgid "Get user list by type"
+msgstr "按類型獲取用戶列表"
+
+#~ msgid "MaxKB table template.csv"
+#~ msgstr "MaxKB表格模版.csv"
+
+#~ msgid "MaxKB table template.xlsx"
+#~ msgstr "MaxKB表格模版.xlsx"
+
+msgid "Fail"
+msgstr "失敗"
+
+msgid "Menu"
+msgstr "操作菜單"
+
+msgid "Operate"
+msgstr "操作"
+
+msgid "Operate user"
+msgstr "操作用戶"
+
+msgid "Ip Address"
+msgstr "IP地址"
+
+msgid "API Details"
+msgstr "API詳情"
+
+msgid "Operate Time"
+msgstr "操作時間"
+
+msgid "System Settings/API Key"
+msgstr "系統 API Key"
+
+msgid "Appearance Settings"
+msgstr "外觀設置"
+
+msgid "Conversation Log"
+msgstr "對話日誌"
+
+msgid "login authentication"
+msgstr "登錄驗證"
+
+msgid "Paragraph"
+msgstr "段落"
+
+msgid "Batch generate related"
+msgstr "分段生成问题"
+
+msgid "Application access"
+msgstr "應用接入"
+
+msgid "Add internal function"
+msgstr "添加內寘函數"
+
+msgid "Batch generate related documents"
+msgstr "批量生成问题"
+
+msgid "No permission to use this function {name}"
+msgstr "無權使用此模型{name}"
+
+msgid "Function {name} is unavailable"
+msgstr "函數{name} 不可用"
+
+msgid "Field: {name} Type: {_type} Value: {value} Type error"
+msgstr "欄位: {name} 類型: {_type} 值: {value} 類型錯誤"
+
+msgid "Field: {name} Type: {_type} Value: {value} Unsupported types"
+msgstr "欄位: {name} 類型: {_type} 值: {value} 不支持的類型"
+
+msgid "Field: {name} No value set"
+msgstr "欄位: {name} 未設定值"
+
+msgid "Generate related"
+msgstr "生成問題"
+
+msgid "Obtain graphical captcha"
+msgstr "獲取圖形驗證碼"
+
+msgid "Captcha code error or expiration"
+msgstr "驗證碼錯誤或過期"
+
+msgid "captcha"
+msgstr "驗證碼"
\ No newline at end of file
diff --git a/apps/ops/__init__.py b/apps/ops/__init__.py
new file mode 100644
index 00000000000..a02f13af3f6
--- /dev/null
+++ b/apps/ops/__init__.py
@@ -0,0 +1,9 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/8/16 14:47
+ @desc:
+"""
+from .celery import app as celery_app
diff --git a/apps/ops/celery/__init__.py b/apps/ops/celery/__init__.py
new file mode 100644
index 00000000000..ece1714bc8b
--- /dev/null
+++ b/apps/ops/celery/__init__.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+import os
+
+from celery import Celery
+from celery.schedules import crontab
+from kombu import Exchange, Queue
+from smartdoc import settings
+from .heartbeat import *
+
+# set the default Django settings module for the 'celery' program.
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smartdoc.settings')
+
+app = Celery('MaxKB')
+
+configs = {k: v for k, v in settings.__dict__.items() if k.startswith('CELERY')}
+configs['worker_concurrency'] = 5
+# Using a string here means the worker will not have to
+# pickle the object when using Windows.
+# app.config_from_object('django.conf:settings', namespace='CELERY')
+
+configs["task_queues"] = [
+ Queue("celery", Exchange("celery"), routing_key="celery"),
+ Queue("model", Exchange("model"), routing_key="model")
+]
+app.namespace = 'CELERY'
+app.conf.update(
+ {key.replace('CELERY_', '') if key.replace('CELERY_', '').lower() == key.replace('CELERY_',
+ '') else key: configs.get(
+ key) for
+ key
+ in configs.keys()})
+app.autodiscover_tasks(lambda: [app_config.split('.')[0] for app_config in settings.INSTALLED_APPS])
diff --git a/apps/ops/celery/const.py b/apps/ops/celery/const.py
new file mode 100644
index 00000000000..2f887023fb3
--- /dev/null
+++ b/apps/ops/celery/const.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+#
+
+CELERY_LOG_MAGIC_MARK = b'\x00\x00\x00\x00\x00'
\ No newline at end of file
diff --git a/apps/ops/celery/decorator.py b/apps/ops/celery/decorator.py
new file mode 100644
index 00000000000..317a7f7aefa
--- /dev/null
+++ b/apps/ops/celery/decorator.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+#
+from functools import wraps
+
+_need_registered_period_tasks = []
+_after_app_ready_start_tasks = []
+_after_app_shutdown_clean_periodic_tasks = []
+
+
+def add_register_period_task(task):
+ _need_registered_period_tasks.append(task)
+
+
+def get_register_period_tasks():
+ return _need_registered_period_tasks
+
+
+def add_after_app_shutdown_clean_task(name):
+ _after_app_shutdown_clean_periodic_tasks.append(name)
+
+
+def get_after_app_shutdown_clean_tasks():
+ return _after_app_shutdown_clean_periodic_tasks
+
+
+def add_after_app_ready_task(name):
+ _after_app_ready_start_tasks.append(name)
+
+
+def get_after_app_ready_tasks():
+ return _after_app_ready_start_tasks
+
+
+def register_as_period_task(
+ crontab=None, interval=None, name=None,
+ args=(), kwargs=None,
+ description=''):
+ """
+ Warning: Task must have not any args and kwargs
+ :param crontab: "* * * * *"
+ :param interval: 60*60*60
+ :param args: ()
+ :param kwargs: {}
+ :param description: "
+ :param name: ""
+ :return:
+ """
+ if crontab is None and interval is None:
+ raise SyntaxError("Must set crontab or interval one")
+
+ def decorate(func):
+ if crontab is None and interval is None:
+ raise SyntaxError("Interval and crontab must set one")
+
+ # Because when this decorator run, the task was not created,
+ # So we can't use func.name
+ task = '{func.__module__}.{func.__name__}'.format(func=func)
+ _name = name if name else task
+ add_register_period_task({
+ _name: {
+ 'task': task,
+ 'interval': interval,
+ 'crontab': crontab,
+ 'args': args,
+ 'kwargs': kwargs if kwargs else {},
+ 'description': description
+ }
+ })
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorate
+
+
+def after_app_ready_start(func):
+ # Because when this decorator run, the task was not created,
+ # So we can't use func.name
+ name = '{func.__module__}.{func.__name__}'.format(func=func)
+ if name not in _after_app_ready_start_tasks:
+ add_after_app_ready_task(name)
+
+ @wraps(func)
+ def decorate(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ return decorate
+
+
+def after_app_shutdown_clean_periodic(func):
+ # Because when this decorator run, the task was not created,
+ # So we can't use func.name
+ name = '{func.__module__}.{func.__name__}'.format(func=func)
+ if name not in _after_app_shutdown_clean_periodic_tasks:
+ add_after_app_shutdown_clean_task(name)
+
+ @wraps(func)
+ def decorate(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ return decorate
diff --git a/apps/ops/celery/heartbeat.py b/apps/ops/celery/heartbeat.py
new file mode 100644
index 00000000000..339a3c60a6c
--- /dev/null
+++ b/apps/ops/celery/heartbeat.py
@@ -0,0 +1,25 @@
+from pathlib import Path
+
+from celery.signals import heartbeat_sent, worker_ready, worker_shutdown
+
+
+@heartbeat_sent.connect
+def heartbeat(sender, **kwargs):
+ worker_name = sender.eventer.hostname.split('@')[0]
+ heartbeat_path = Path('/tmp/worker_heartbeat_{}'.format(worker_name))
+ heartbeat_path.touch()
+
+
+@worker_ready.connect
+def worker_ready(sender, **kwargs):
+ worker_name = sender.hostname.split('@')[0]
+ ready_path = Path('/tmp/worker_ready_{}'.format(worker_name))
+ ready_path.touch()
+
+
+@worker_shutdown.connect
+def worker_shutdown(sender, **kwargs):
+ worker_name = sender.hostname.split('@')[0]
+ for signal in ['ready', 'heartbeat']:
+ path = Path('/tmp/worker_{}_{}'.format(signal, worker_name))
+ path.unlink(missing_ok=True)
diff --git a/apps/ops/celery/logger.py b/apps/ops/celery/logger.py
new file mode 100644
index 00000000000..1b2843c2b85
--- /dev/null
+++ b/apps/ops/celery/logger.py
@@ -0,0 +1,225 @@
+from logging import StreamHandler
+from threading import get_ident
+
+from celery import current_task
+from celery.signals import task_prerun, task_postrun
+from django.conf import settings
+from kombu import Connection, Exchange, Queue, Producer
+from kombu.mixins import ConsumerMixin
+
+from .utils import get_celery_task_log_path
+from .const import CELERY_LOG_MAGIC_MARK
+
+routing_key = 'celery_log'
+celery_log_exchange = Exchange('celery_log_exchange', type='direct')
+celery_log_queue = [Queue('celery_log', celery_log_exchange, routing_key=routing_key)]
+
+
+class CeleryLoggerConsumer(ConsumerMixin):
+ def __init__(self):
+ self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
+
+ def get_consumers(self, Consumer, channel):
+ return [Consumer(queues=celery_log_queue,
+ accept=['pickle', 'json'],
+ callbacks=[self.process_task])
+ ]
+
+ def handle_task_start(self, task_id, message):
+ pass
+
+ def handle_task_end(self, task_id, message):
+ pass
+
+ def handle_task_log(self, task_id, msg, message):
+ pass
+
+ def process_task(self, body, message):
+ action = body.get('action')
+ task_id = body.get('task_id')
+ msg = body.get('msg')
+ if action == CeleryLoggerProducer.ACTION_TASK_LOG:
+ self.handle_task_log(task_id, msg, message)
+ elif action == CeleryLoggerProducer.ACTION_TASK_START:
+ self.handle_task_start(task_id, message)
+ elif action == CeleryLoggerProducer.ACTION_TASK_END:
+ self.handle_task_end(task_id, message)
+
+
+class CeleryLoggerProducer:
+ ACTION_TASK_START, ACTION_TASK_LOG, ACTION_TASK_END = range(3)
+
+ def __init__(self):
+ self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
+
+ @property
+ def producer(self):
+ return Producer(self.connection)
+
+ def publish(self, payload):
+ self.producer.publish(
+ payload, serializer='json', exchange=celery_log_exchange,
+ declare=[celery_log_exchange], routing_key=routing_key
+ )
+
+ def log(self, task_id, msg):
+ payload = {'task_id': task_id, 'msg': msg, 'action': self.ACTION_TASK_LOG}
+ return self.publish(payload)
+
+ def read(self):
+ pass
+
+ def flush(self):
+ pass
+
+ def task_end(self, task_id):
+ payload = {'task_id': task_id, 'action': self.ACTION_TASK_END}
+ return self.publish(payload)
+
+ def task_start(self, task_id):
+ payload = {'task_id': task_id, 'action': self.ACTION_TASK_START}
+ return self.publish(payload)
+
+
+class CeleryTaskLoggerHandler(StreamHandler):
+ terminator = '\r\n'
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ task_prerun.connect(self.on_task_start)
+ task_postrun.connect(self.on_start_end)
+
+ @staticmethod
+ def get_current_task_id():
+ if not current_task:
+ return
+ task_id = current_task.request.root_id
+ return task_id
+
+ def on_task_start(self, sender, task_id, **kwargs):
+ return self.handle_task_start(task_id)
+
+ def on_start_end(self, sender, task_id, **kwargs):
+ return self.handle_task_end(task_id)
+
+ def after_task_publish(self, sender, body, **kwargs):
+ pass
+
+ def emit(self, record):
+ task_id = self.get_current_task_id()
+ if not task_id:
+ return
+ try:
+ self.write_task_log(task_id, record)
+ self.flush()
+ except Exception:
+ self.handleError(record)
+
+ def write_task_log(self, task_id, msg):
+ pass
+
+ def handle_task_start(self, task_id):
+ pass
+
+ def handle_task_end(self, task_id):
+ pass
+
+
+class CeleryThreadingLoggerHandler(CeleryTaskLoggerHandler):
+ @staticmethod
+ def get_current_thread_id():
+ return str(get_ident())
+
+ def emit(self, record):
+ thread_id = self.get_current_thread_id()
+ try:
+ self.write_thread_task_log(thread_id, record)
+ self.flush()
+ except ValueError:
+ self.handleError(record)
+
+ def write_thread_task_log(self, thread_id, msg):
+ pass
+
+ def handle_task_start(self, task_id):
+ pass
+
+ def handle_task_end(self, task_id):
+ pass
+
+ def handleError(self, record) -> None:
+ pass
+
+
+class CeleryTaskMQLoggerHandler(CeleryTaskLoggerHandler):
+ def __init__(self):
+ self.producer = CeleryLoggerProducer()
+ super().__init__(stream=None)
+
+ def write_task_log(self, task_id, record):
+ msg = self.format(record)
+ self.producer.log(task_id, msg)
+
+ def flush(self):
+ self.producer.flush()
+
+
+class CeleryTaskFileHandler(CeleryTaskLoggerHandler):
+ def __init__(self, *args, **kwargs):
+ self.f = None
+ super().__init__(*args, **kwargs)
+
+ def emit(self, record):
+ msg = self.format(record)
+ if not self.f or self.f.closed:
+ return
+ self.f.write(msg)
+ self.f.write(self.terminator)
+ self.flush()
+
+ def flush(self):
+ self.f and self.f.flush()
+
+ def handle_task_start(self, task_id):
+ log_path = get_celery_task_log_path(task_id)
+ self.f = open(log_path, 'a')
+
+ def handle_task_end(self, task_id):
+ self.f and self.f.close()
+
+
+class CeleryThreadTaskFileHandler(CeleryThreadingLoggerHandler):
+ def __init__(self, *args, **kwargs):
+ self.thread_id_fd_mapper = {}
+ self.task_id_thread_id_mapper = {}
+ super().__init__(*args, **kwargs)
+
+ def write_thread_task_log(self, thread_id, record):
+ f = self.thread_id_fd_mapper.get(thread_id, None)
+ if not f:
+ raise ValueError('Not found thread task file')
+ msg = self.format(record)
+ f.write(msg.encode())
+ f.write(self.terminator.encode())
+ f.flush()
+
+ def flush(self):
+ for f in self.thread_id_fd_mapper.values():
+ f.flush()
+
+ def handle_task_start(self, task_id):
+ print('handle_task_start')
+ log_path = get_celery_task_log_path(task_id)
+ thread_id = self.get_current_thread_id()
+ self.task_id_thread_id_mapper[task_id] = thread_id
+ f = open(log_path, 'ab')
+ self.thread_id_fd_mapper[thread_id] = f
+
+ def handle_task_end(self, task_id):
+ print('handle_task_end')
+ ident_id = self.task_id_thread_id_mapper.get(task_id, '')
+ f = self.thread_id_fd_mapper.pop(ident_id, None)
+ if f and not f.closed:
+ f.write(CELERY_LOG_MAGIC_MARK)
+ f.close()
+ self.task_id_thread_id_mapper.pop(task_id, None)
diff --git a/apps/ops/celery/signal_handler.py b/apps/ops/celery/signal_handler.py
new file mode 100644
index 00000000000..46671a0d8fa
--- /dev/null
+++ b/apps/ops/celery/signal_handler.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+#
+import logging
+import os
+
+from celery import subtask
+from celery.signals import (
+ worker_ready, worker_shutdown, after_setup_logger, task_revoked, task_prerun
+)
+from django.core.cache import cache
+from django_celery_beat.models import PeriodicTask
+
+from .decorator import get_after_app_ready_tasks, get_after_app_shutdown_clean_tasks
+from .logger import CeleryThreadTaskFileHandler
+
+logger = logging.getLogger(__file__)
+safe_str = lambda x: x
+
+
+@worker_ready.connect
+def on_app_ready(sender=None, headers=None, **kwargs):
+ if cache.get("CELERY_APP_READY", 0) == 1:
+ return
+ cache.set("CELERY_APP_READY", 1, 10)
+ tasks = get_after_app_ready_tasks()
+ logger.debug("Work ready signal recv")
+ logger.debug("Start need start task: [{}]".format(", ".join(tasks)))
+ for task in tasks:
+ periodic_task = PeriodicTask.objects.filter(task=task).first()
+ if periodic_task and not periodic_task.enabled:
+ logger.debug("Periodic task [{}] is disabled!".format(task))
+ continue
+ subtask(task).delay()
+
+
+def delete_files(directory):
+ if os.path.isdir(directory):
+ for filename in os.listdir(directory):
+ file_path = os.path.join(directory, filename)
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+
+
+@worker_shutdown.connect
+def after_app_shutdown_periodic_tasks(sender=None, **kwargs):
+ if cache.get("CELERY_APP_SHUTDOWN", 0) == 1:
+ return
+ cache.set("CELERY_APP_SHUTDOWN", 1, 10)
+ tasks = get_after_app_shutdown_clean_tasks()
+ logger.debug("Worker shutdown signal recv")
+ logger.debug("Clean period tasks: [{}]".format(', '.join(tasks)))
+ PeriodicTask.objects.filter(name__in=tasks).delete()
+
+
+@after_setup_logger.connect
+def add_celery_logger_handler(sender=None, logger=None, loglevel=None, format=None, **kwargs):
+ if not logger:
+ return
+ task_handler = CeleryThreadTaskFileHandler()
+ task_handler.setLevel(loglevel)
+ formatter = logging.Formatter(format)
+ task_handler.setFormatter(formatter)
+ logger.addHandler(task_handler)
+
+
+@task_revoked.connect
+def on_task_revoked(request, terminated, signum, expired, **kwargs):
+ print('task_revoked', terminated)
+
+
+@task_prerun.connect
+def on_taskaa_start(sender, task_id, **kwargs):
+ pass
+ # sender.update_state(state='REVOKED',
+# meta={'exc_type': 'Exception', 'exc': 'Exception', 'message': '暂停任务', 'exc_message': ''})
diff --git a/apps/ops/celery/utils.py b/apps/ops/celery/utils.py
new file mode 100644
index 00000000000..288089f6f2e
--- /dev/null
+++ b/apps/ops/celery/utils.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+#
+import logging
+import os
+import uuid
+
+from django.conf import settings
+from django_celery_beat.models import (
+ PeriodicTasks
+)
+
+from smartdoc.const import PROJECT_DIR
+
+logger = logging.getLogger(__file__)
+
+
+def disable_celery_periodic_task(task_name):
+ from django_celery_beat.models import PeriodicTask
+ PeriodicTask.objects.filter(name=task_name).update(enabled=False)
+ PeriodicTasks.update_changed()
+
+
+def delete_celery_periodic_task(task_name):
+ from django_celery_beat.models import PeriodicTask
+ PeriodicTask.objects.filter(name=task_name).delete()
+ PeriodicTasks.update_changed()
+
+
+def get_celery_periodic_task(task_name):
+ from django_celery_beat.models import PeriodicTask
+ task = PeriodicTask.objects.filter(name=task_name).first()
+ return task
+
+
+def make_dirs(name, mode=0o755, exist_ok=False):
+ """ 默认权限设置为 0o755 """
+ return os.makedirs(name, mode=mode, exist_ok=exist_ok)
+
+
+def get_task_log_path(base_path, task_id, level=2):
+ task_id = str(task_id)
+ try:
+ uuid.UUID(task_id)
+ except:
+ return os.path.join(PROJECT_DIR, 'data', 'caution.txt')
+
+ rel_path = os.path.join(*task_id[:level], task_id + '.log')
+ path = os.path.join(base_path, rel_path)
+ make_dirs(os.path.dirname(path), exist_ok=True)
+ return path
+
+
+def get_celery_task_log_path(task_id):
+ return get_task_log_path(settings.CELERY_LOG_DIR, task_id)
+
+
+def get_celery_status():
+ from . import app
+ i = app.control.inspect()
+ ping_data = i.ping() or {}
+ active_nodes = [k for k, v in ping_data.items() if v.get('ok') == 'pong']
+ active_queue_worker = set([n.split('@')[0] for n in active_nodes if n])
+ # Celery Worker 数量: 2
+ if len(active_queue_worker) < 2:
+ print("Not all celery worker worked")
+ return False
+ else:
+ return True
diff --git a/apps/setting/migrations/0005_model_permission_type.py b/apps/setting/migrations/0005_model_permission_type.py
new file mode 100644
index 00000000000..dba081a1965
--- /dev/null
+++ b/apps/setting/migrations/0005_model_permission_type.py
@@ -0,0 +1,46 @@
+# Generated by Django 4.2.13 on 2024-07-15 15:23
+import json
+
+from django.db import migrations, models
+from django.db.models import QuerySet
+
+from common.util.rsa_util import rsa_long_encrypt
+from setting.models import Status, PermissionType
+from smartdoc.const import CONFIG
+
+default_embedding_model_id = '42f63a3d-427e-11ef-b3ec-a8a1595801ab'
+
+
+def save_default_embedding_model(apps, schema_editor):
+ ModelModel = apps.get_model('setting', 'Model')
+ cache_folder = CONFIG.get('EMBEDDING_MODEL_PATH')
+ model_name = CONFIG.get('EMBEDDING_MODEL_NAME')
+ credential = {'cache_folder': cache_folder}
+ model_credential_str = json.dumps(credential)
+ model = ModelModel(id=default_embedding_model_id, name='maxkb-embedding', status=Status.SUCCESS,
+ model_type="EMBEDDING", model_name=model_name, user_id='f0dd8f71-e4ee-11ee-8c84-a8a1595801ab',
+ provider='model_local_provider',
+ credential=rsa_long_encrypt(model_credential_str), meta={},
+ permission_type=PermissionType.PUBLIC)
+ model.save()
+
+
+def reverse_code_embedding_model(apps, schema_editor):
+ ModelModel = apps.get_model('setting', 'Model')
+ QuerySet(ModelModel).filter(id=default_embedding_model_id).delete()
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ('setting', '0004_alter_model_credential'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='model',
+ name='permission_type',
+ field=models.CharField(choices=[('PUBLIC', '公开'), ('PRIVATE', '私有')], default='PRIVATE', max_length=20,
+ verbose_name='权限类型'),
+ ),
+ migrations.RunPython(save_default_embedding_model, reverse_code_embedding_model)
+ ]
diff --git a/apps/setting/migrations/0006_alter_model_status.py b/apps/setting/migrations/0006_alter_model_status.py
new file mode 100644
index 00000000000..209f57c94d8
--- /dev/null
+++ b/apps/setting/migrations/0006_alter_model_status.py
@@ -0,0 +1,18 @@
+# Generated by Django 4.2.14 on 2024-07-23 18:14
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('setting', '0005_model_permission_type'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='model',
+ name='status',
+ field=models.CharField(choices=[('SUCCESS', '成功'), ('ERROR', '失败'), ('DOWNLOAD', '下载中'), ('PAUSE_DOWNLOAD', '暂停下载')], default='SUCCESS', max_length=20, verbose_name='设置类型'),
+ ),
+ ]
diff --git a/apps/setting/migrations/0007_model_model_params_form.py b/apps/setting/migrations/0007_model_model_params_form.py
new file mode 100644
index 00000000000..fa40b660d63
--- /dev/null
+++ b/apps/setting/migrations/0007_model_model_params_form.py
@@ -0,0 +1,18 @@
+# Generated by Django 4.2.15 on 2024-10-15 14:49
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('setting', '0006_alter_model_status'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='model',
+ name='model_params_form',
+ field=models.JSONField(default=list, verbose_name='模型参数配置'),
+ ),
+ ]
diff --git a/apps/setting/migrations/0008_modelparam.py b/apps/setting/migrations/0008_modelparam.py
new file mode 100644
index 00000000000..8be3892b38b
--- /dev/null
+++ b/apps/setting/migrations/0008_modelparam.py
@@ -0,0 +1,25 @@
+# Generated by Django 4.2.15 on 2024-10-16 13:10
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('setting', '0007_model_model_params_form'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='ModelParam',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('label', models.CharField(max_length=128, verbose_name='参数')),
+ ('field', models.CharField(max_length=256, verbose_name='显示名称')),
+ ('default_value', models.CharField(max_length=1000, verbose_name='默认值')),
+ ('input_type', models.CharField(max_length=32, verbose_name='组件类型')),
+ ('attrs', models.JSONField(verbose_name='属性')),
+ ('required', models.BooleanField(verbose_name='必填')),
+ ],
+ ),
+ ]
diff --git a/apps/setting/migrations/0009_set_default_model_params_form.py b/apps/setting/migrations/0009_set_default_model_params_form.py
new file mode 100644
index 00000000000..6b4d4b4531f
--- /dev/null
+++ b/apps/setting/migrations/0009_set_default_model_params_form.py
@@ -0,0 +1,19 @@
+# Generated by Django 4.2.15 on 2024-10-15 14:49
+
+from django.db import migrations, models
+
+sql = """
+UPDATE "public"."model"
+SET "model_params_form" = '[{"attrs": {"max": 1, "min": 0.1, "step": 0.01, "precision": 2, "show-input": true, "show-input-controls": false}, "field": "temperature", "label": {"attrs": {"tooltip": "较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定"}, "label": "温度", "input_type": "TooltipLabel", "props_info": {}}, "required": true, "input_type": "Slider", "props_info": {}, "trigger_type": "OPTION_LIST", "default_value": 0.5, "relation_show_field_dict": {}, "relation_trigger_field_dict": {}}, {"attrs": {"max": 100000, "min": 1, "step": 1, "precision": 0, "show-input": true, "show-input-controls": false}, "field": "max_tokens", "label": {"attrs": {"tooltip": "指定模型可生成的最大token个数"}, "label": "输出最大Tokens", "input_type": "TooltipLabel", "props_info": {}}, "required": true, "input_type": "Slider", "props_info": {}, "trigger_type": "OPTION_LIST", "default_value": 4096, "relation_show_field_dict": {}, "relation_trigger_field_dict": {}}]'
+WHERE jsonb_array_length(model_params_form)=0
+"""
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ('setting', '0008_modelparam'),
+ ]
+
+ operations = [
+ migrations.RunSQL(sql)
+ ]
diff --git a/apps/setting/migrations/0010_log.py b/apps/setting/migrations/0010_log.py
new file mode 100644
index 00000000000..2ce90187131
--- /dev/null
+++ b/apps/setting/migrations/0010_log.py
@@ -0,0 +1,33 @@
+# Generated by Django 4.2.18 on 2025-03-25 03:22
+
+import common.encoder.encoder
+from django.db import migrations, models
+import uuid
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('setting', '0009_set_default_model_params_form'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Log',
+ fields=[
+ ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
+ ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
+ ('id', models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False, verbose_name='主键id')),
+ ('menu', models.CharField(max_length=128, verbose_name='操作菜单')),
+ ('operate', models.CharField(max_length=128, verbose_name='操作')),
+ ('operation_object', models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='操作对象')),
+ ('user', models.JSONField(default=dict, verbose_name='用户信息')),
+ ('status', models.IntegerField(verbose_name='状态')),
+ ('ip_address', models.CharField(max_length=128, verbose_name='ip地址')),
+ ('details', models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='详情')),
+ ],
+ options={
+ 'db_table': 'log',
+ },
+ ),
+ ]
diff --git a/apps/setting/models/log_management.py b/apps/setting/models/log_management.py
new file mode 100644
index 00000000000..66de20468bc
--- /dev/null
+++ b/apps/setting/models/log_management.py
@@ -0,0 +1,38 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: log_management.py
+ @date:2025/3/17 9:54
+ @desc:
+"""
+import uuid
+
+from django.db import models
+
+from common.encoder.encoder import SystemEncoder
+from common.mixins.app_model_mixin import AppModelMixin
+
+
+class Log(AppModelMixin):
+ """
+ 审计日志
+ """
+ id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
+
+ menu = models.CharField(max_length=128, verbose_name="操作菜单")
+
+ operate = models.CharField(max_length=128, verbose_name="操作")
+
+ operation_object = models.JSONField(verbose_name="操作对象", default=dict, encoder=SystemEncoder)
+
+ user = models.JSONField(verbose_name="用户信息", default=dict)
+
+ status = models.IntegerField(verbose_name="状态")
+
+ ip_address = models.CharField(max_length=128, verbose_name="ip地址")
+
+ details = models.JSONField(verbose_name="详情", default=dict, encoder=SystemEncoder)
+
+ class Meta:
+ db_table = "log"
diff --git a/apps/setting/models/model_management.py b/apps/setting/models/model_management.py
index 5bdd1b296e1..638161e4630 100644
--- a/apps/setting/models/model_management.py
+++ b/apps/setting/models/model_management.py
@@ -22,6 +22,20 @@ class Status(models.TextChoices):
DOWNLOAD = "DOWNLOAD", '下载中'
+ PAUSE_DOWNLOAD = "PAUSE_DOWNLOAD", '暂停下载'
+
+
+class PermissionType(models.TextChoices):
+ PUBLIC = "PUBLIC", '公开'
+ PRIVATE = "PRIVATE", "私有"
+
+class ModelParam(models.Model):
+ label = models.CharField(max_length=128, verbose_name="参数")
+ field = models.CharField(max_length=256, verbose_name="显示名称")
+ default_value = models.CharField(max_length=1000, verbose_name="默认值")
+ input_type = models.CharField(max_length=32, verbose_name="组件类型")
+ attrs = models.JSONField(verbose_name="属性")
+ required = models.BooleanField(verbose_name="必填")
class Model(AppModelMixin):
"""
@@ -46,6 +60,17 @@ class Model(AppModelMixin):
meta = models.JSONField(verbose_name="模型元数据,用于存储下载,或者错误信息", default=dict)
+ permission_type = models.CharField(max_length=20, verbose_name='权限类型', choices=PermissionType.choices,
+ default=PermissionType.PRIVATE)
+
+ model_params_form = models.JSONField(verbose_name="模型参数配置", default=list)
+
+
+ def is_permission(self, user_id):
+ if self.permission_type == PermissionType.PUBLIC or str(user_id) == str(self.user_id):
+ return True
+ return False
+
class Meta:
db_table = "model"
unique_together = ['name', 'user_id']
diff --git a/apps/setting/models_provider/__init__.py b/apps/setting/models_provider/__init__.py
index 53b7001e589..fb278630ad3 100644
--- a/apps/setting/models_provider/__init__.py
+++ b/apps/setting/models_provider/__init__.py
@@ -6,3 +6,89 @@
@date:2023/10/31 17:16
@desc:
"""
+import json
+from typing import Dict
+
+from common.util.rsa_util import rsa_long_decrypt
+from setting.models_provider.constants.model_provider_constants import ModelProvideConstants
+
+
+def get_model_(provider, model_type, model_name, credential, model_id, use_local=False, **kwargs):
+ """
+ 获取模型实例
+ @param provider: 供应商
+ @param model_type: 模型类型
+ @param model_name: 模型名称
+ @param credential: 认证信息
+ @param model_id: 模型id
+ @param use_local: 是否调用本地模型 只适用于本地供应商
+ @return: 模型实例
+ """
+ model = get_provider(provider).get_model(model_type, model_name,
+ json.loads(
+ rsa_long_decrypt(credential)),
+ model_id=model_id,
+ use_local=use_local,
+ streaming=True, **kwargs)
+ return model
+
+
+def get_model(model, **kwargs):
+ """
+ 获取模型实例
+ @param model: model 数据库Model实例对象
+ @return: 模型实例
+ """
+ return get_model_(model.provider, model.model_type, model.model_name, model.credential, str(model.id), **kwargs)
+
+
+def get_provider(provider):
+ """
+ 获取供应商实例
+ @param provider: 供应商字符串
+ @return: 供应商实例
+ """
+ return ModelProvideConstants[provider].value
+
+
+def get_model_list(provider, model_type):
+ """
+ 获取模型列表
+ @param provider: 供应商字符串
+ @param model_type: 模型类型
+ @return: 模型列表
+ """
+ return get_provider(provider).get_model_list(model_type)
+
+
+def get_model_credential(provider, model_type, model_name):
+ """
+ 获取模型认证实例
+ @param provider: 供应商字符串
+ @param model_type: 模型类型
+ @param model_name: 模型名称
+ @return: 认证实例对象
+ """
+ return get_provider(provider).get_model_credential(model_type, model_name)
+
+
+def get_model_type_list(provider):
+ """
+ 获取模型类型列表
+ @param provider: 供应商字符串
+ @return: 模型类型列表
+ """
+ return get_provider(provider).get_model_type_list()
+
+
+def is_valid_credential(provider, model_type, model_name, model_credential: Dict[str, object], model_params, raise_exception=False):
+ """
+ 校验模型认证参数
+ @param provider: 供应商字符串
+ @param model_type: 模型类型
+ @param model_name: 模型名称
+ @param model_credential: 模型认证数据
+ @param raise_exception: 是否抛出错误
+ @return: True|False
+ """
+ return get_provider(provider).is_valid_credential(model_type, model_name, model_credential, model_params, raise_exception)
diff --git a/apps/setting/models_provider/base_model_provider.py b/apps/setting/models_provider/base_model_provider.py
index 3796b5bbe5b..2b02bdc1fb1 100644
--- a/apps/setting/models_provider/base_model_provider.py
+++ b/apps/setting/models_provider/base_model_provider.py
@@ -9,11 +9,14 @@
from abc import ABC, abstractmethod
from enum import Enum
from functools import reduce
-from typing import Dict, Iterator
+from typing import Dict, Iterator, Type, List
-from langchain.chat_models.base import BaseChatModel
+from pydantic import BaseModel
from common.exception.app_exception import AppApiException
+from django.utils.translation import gettext_lazy as _
+
+from common.util.common import encryption
class DownModelChunkStatus(Enum):
@@ -47,39 +50,74 @@ def to_dict(self):
class IModelProvider(ABC):
+ @abstractmethod
+ def get_model_info_manage(self):
+ pass
@abstractmethod
def get_model_provide_info(self):
pass
- @abstractmethod
def get_model_type_list(self):
- pass
+ return self.get_model_info_manage().get_model_type_list()
- @abstractmethod
def get_model_list(self, model_type):
- pass
+ if model_type is None:
+ raise AppApiException(500, _('Model type cannot be empty'))
+ return self.get_model_info_manage().get_model_list_by_model_type(model_type)
- @abstractmethod
def get_model_credential(self, model_type, model_name):
- pass
+ model_info = self.get_model_info_manage().get_model_info(model_type, model_name)
+ return model_info.model_credential
- @abstractmethod
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel:
- pass
+ def get_model_params(self, model_type, model_name):
+ model_info = self.get_model_info_manage().get_model_info(model_type, model_name)
+ return model_info.model_credential
+
+ def is_valid_credential(self, model_type, model_name, model_credential: Dict[str, object],
+ model_params: Dict[str, object], raise_exception=False):
+ model_info = self.get_model_info_manage().get_model_info(model_type, model_name)
+ return model_info.model_credential.is_valid(model_type, model_name, model_credential, model_params, self,
+ raise_exception=raise_exception)
+
+ def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseModel:
+ model_info = self.get_model_info_manage().get_model_info(model_type, model_name)
+ return model_info.model_class.new_instance(model_type, model_name, model_credential, **model_kwargs)
- @abstractmethod
def get_dialogue_number(self):
- pass
+ return 3
def down_model(self, model_type: str, model_name, model_credential: Dict[str, object]) -> Iterator[DownModelChunk]:
- raise AppApiException(500, "当前平台不支持下载模型")
+ raise AppApiException(500, _('The current platform does not support downloading models'))
+
+
+class MaxKBBaseModel(ABC):
+ @staticmethod
+ @abstractmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ pass
+
+ @staticmethod
+ def is_cache_model():
+ return True
+
+ @staticmethod
+ def filter_optional_params(model_kwargs):
+ optional_params = {}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming', 'show_ref_label']:
+ if key == 'extra_body' and isinstance(value, dict):
+ optional_params = {**optional_params, **value}
+ else:
+ optional_params[key] = value
+ return optional_params
class BaseModelCredential(ABC):
@abstractmethod
- def is_valid(self, model_type: str, model_name, model: Dict[str, object], raise_exception=False):
+ def is_valid(self, model_type: str, model_name, model: Dict[str, object], model_params, provider,
+ raise_exception=True):
pass
@abstractmethod
@@ -90,6 +128,13 @@ def encryption_dict(self, model_info: Dict[str, object]):
"""
pass
+ def get_model_params_setting_form(self, model_name):
+ """
+ 模型参数设置表单
+ :return:
+ """
+ pass
+
@staticmethod
def encryption(message: str):
"""
@@ -97,31 +142,28 @@ def encryption(message: str):
:param message:
:return:
"""
- max_pre_len = 8
- max_post_len = 4
- message_len = len(message)
- pre_len = int(message_len / 5 * 2)
- post_len = int(message_len / 5 * 1)
- pre_str = "".join([message[index] for index in
- range(0, max_pre_len if pre_len > max_pre_len else 1 if pre_len <= 0 else int(pre_len))])
- end_str = "".join(
- [message[index] for index in
- range(message_len - (int(post_len) if pre_len < max_post_len else max_post_len), message_len)])
- content = "***************"
- return pre_str + content + end_str
+ return encryption(message)
class ModelTypeConst(Enum):
- LLM = {'code': 'LLM', 'message': '大语言模型'}
+ LLM = {'code': 'LLM', 'message': _('LLM')}
+ EMBEDDING = {'code': 'EMBEDDING', 'message': _('Embedding Model')}
+ STT = {'code': 'STT', 'message': _('Speech2Text')}
+ TTS = {'code': 'TTS', 'message': _('TTS')}
+ IMAGE = {'code': 'IMAGE', 'message': _('Vision Model')}
+ TTI = {'code': 'TTI', 'message': _('Image Generation')}
+ RERANKER = {'code': 'RERANKER', 'message': _('Rerank')}
class ModelInfo:
def __init__(self, name: str, desc: str, model_type: ModelTypeConst, model_credential: BaseModelCredential,
+ model_class: Type[MaxKBBaseModel],
**keywords):
self.name = name
self.desc = desc
self.model_type = model_type.name
self.model_credential = model_credential
+ self.model_class = model_class
if keywords is not None:
for key in keywords.keys():
self.__setattr__(key, keywords.get(key))
@@ -143,10 +185,69 @@ def get_desc(self):
def get_model_type(self):
return self.model_type
+ def get_model_class(self):
+ return self.model_class
+
def to_dict(self):
return reduce(lambda x, y: {**x, **y},
[{attr: self.__getattribute__(attr)} for attr in vars(self) if
- not attr.startswith("__") and not attr == 'model_credential'], {})
+ not attr.startswith("__") and not attr == 'model_credential' and not attr == 'model_class'], {})
+
+
+class ModelInfoManage:
+ def __init__(self):
+ self.model_dict = {}
+ self.model_list = []
+ self.default_model_list = []
+ self.default_model_dict = {}
+
+ def append_model_info(self, model_info: ModelInfo):
+ self.model_list.append(model_info)
+ model_type_dict = self.model_dict.get(model_info.model_type)
+ if model_type_dict is None:
+ self.model_dict[model_info.model_type] = {model_info.name: model_info}
+ else:
+ model_type_dict[model_info.name] = model_info
+
+ def append_default_model_info(self, model_info: ModelInfo):
+ self.default_model_list.append(model_info)
+ self.default_model_dict[model_info.model_type] = model_info
+
+ def get_model_list(self):
+ return [model.to_dict() for model in self.model_list]
+
+ def get_model_list_by_model_type(self, model_type):
+ return [model.to_dict() for model in self.model_list if model.model_type == model_type]
+
+ def get_model_type_list(self):
+ return [{'key': _type.value.get('message'), 'value': _type.value.get('code')} for _type in ModelTypeConst if
+ len([model for model in self.model_list if model.model_type == _type.name]) > 0]
+
+ def get_model_info(self, model_type, model_name) -> ModelInfo:
+ model_info = self.model_dict.get(model_type, {}).get(model_name, self.default_model_dict.get(model_type))
+ if model_info is None:
+ raise AppApiException(500, _('The model does not support'))
+ return model_info
+
+ class builder:
+ def __init__(self):
+ self.modelInfoManage = ModelInfoManage()
+
+ def append_model_info(self, model_info: ModelInfo):
+ self.modelInfoManage.append_model_info(model_info)
+ return self
+
+ def append_model_info_list(self, model_info_list: List[ModelInfo]):
+ for model_info in model_info_list:
+ self.modelInfoManage.append_model_info(model_info)
+ return self
+
+ def append_default_model_info(self, model_info: ModelInfo):
+ self.modelInfoManage.append_default_model_info(model_info)
+ return self
+
+ def build(self):
+ return self.modelInfoManage
class ModelProvideInfo:
diff --git a/apps/setting/models_provider/constants/model_provider_constants.py b/apps/setting/models_provider/constants/model_provider_constants.py
index 0a7565f383b..e68b9361f0b 100644
--- a/apps/setting/models_provider/constants/model_provider_constants.py
+++ b/apps/setting/models_provider/constants/model_provider_constants.py
@@ -8,16 +8,32 @@
"""
from enum import Enum
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.aliyun_bai_lian_model_provider import \
+ AliyunBaiLianModelProvider
+from setting.models_provider.impl.anthropic_model_provider.anthropic_model_provider import AnthropicModelProvider
+from setting.models_provider.impl.aws_bedrock_model_provider.aws_bedrock_model_provider import BedrockModelProvider
from setting.models_provider.impl.azure_model_provider.azure_model_provider import AzureModelProvider
+from setting.models_provider.impl.deepseek_model_provider.deepseek_model_provider import DeepSeekModelProvider
+from setting.models_provider.impl.gemini_model_provider.gemini_model_provider import GeminiModelProvider
+from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import KimiModelProvider
from setting.models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider
from setting.models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider
from setting.models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider
+from setting.models_provider.impl.regolo_model_provider.regolo_model_provider import \
+ RegoloModelProvider
+from setting.models_provider.impl.siliconCloud_model_provider.siliconCloud_model_provider import \
+ SiliconCloudModelProvider
+from setting.models_provider.impl.tencent_cloud_model_provider.tencent_cloud_model_provider import \
+ TencentCloudModelProvider
+from setting.models_provider.impl.tencent_model_provider.tencent_model_provider import TencentModelProvider
+from setting.models_provider.impl.vllm_model_provider.vllm_model_provider import VllmModelProvider
+from setting.models_provider.impl.volcanic_engine_model_provider.volcanic_engine_model_provider import \
+ VolcanicEngineModelProvider
from setting.models_provider.impl.wenxin_model_provider.wenxin_model_provider import WenxinModelProvider
-from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import KimiModelProvider
from setting.models_provider.impl.xf_model_provider.xf_model_provider import XunFeiModelProvider
+from setting.models_provider.impl.xinference_model_provider.xinference_model_provider import XinferenceModelProvider
from setting.models_provider.impl.zhipu_model_provider.zhipu_model_provider import ZhiPuModelProvider
-from setting.models_provider.impl.deepseek_model_provider.deepseek_model_provider import DeepSeekModelProvider
-from setting.models_provider.impl.gemini_model_provider.gemini_model_provider import GeminiModelProvider
+from setting.models_provider.impl.local_model_provider.local_model_provider import LocalModelProvider
class ModelProvideConstants(Enum):
@@ -31,3 +47,14 @@ class ModelProvideConstants(Enum):
model_xf_provider = XunFeiModelProvider()
model_deepseek_provider = DeepSeekModelProvider()
model_gemini_provider = GeminiModelProvider()
+ model_volcanic_engine_provider = VolcanicEngineModelProvider()
+ model_tencent_provider = TencentModelProvider()
+ model_tencent_cloud_provider = TencentCloudModelProvider()
+ model_aws_bedrock_provider = BedrockModelProvider()
+ model_local_provider = LocalModelProvider()
+ model_xinference_provider = XinferenceModelProvider()
+ model_vllm_provider = VllmModelProvider()
+ aliyun_bai_lian_model_provider = AliyunBaiLianModelProvider()
+ model_anthropic_provider = AnthropicModelProvider()
+ model_siliconCloud_provider = SiliconCloudModelProvider()
+ model_regolo_provider = RegoloModelProvider()
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/__init__.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/__init__.py
new file mode 100644
index 00000000000..3c10c5535f7
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py
+ @date:2024/9/9 17:42
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py
new file mode 100644
index 00000000000..b1d72f0869a
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py
@@ -0,0 +1,117 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: aliyun_bai_lian_model_provider.py
+ @date:2024/9/9 17:43
+ @desc:
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
+ ModelInfoManage
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.embedding import \
+ AliyunBaiLianEmbeddingCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.image import QwenVLModelCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.llm import BaiLianLLMModelCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.reranker import \
+ AliyunBaiLianRerankerCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.stt import AliyunBaiLianSTTModelCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.tti import QwenTextToImageModelCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.tts import AliyunBaiLianTTSModelCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.image import QwenVLChatModel
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.llm import BaiLianChatModel
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.stt import AliyunBaiLianSpeechToText
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tti import QwenTextToImageModel
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tts import AliyunBaiLianTextToSpeech
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _, gettext
+
+aliyun_bai_lian_model_credential = AliyunBaiLianRerankerCredential()
+aliyun_bai_lian_tts_model_credential = AliyunBaiLianTTSModelCredential()
+aliyun_bai_lian_stt_model_credential = AliyunBaiLianSTTModelCredential()
+aliyun_bai_lian_embedding_model_credential = AliyunBaiLianEmbeddingCredential()
+aliyun_bai_lian_llm_model_credential = BaiLianLLMModelCredential()
+qwenvl_model_credential = QwenVLModelCredential()
+qwentti_model_credential = QwenTextToImageModelCredential()
+
+model_info_list = [ModelInfo('gte-rerank',
+ _('With the GTE-Rerank text sorting series model developed by Alibaba Tongyi Lab, developers can integrate high-quality text retrieval and sorting through the LlamaIndex framework.'),
+ ModelTypeConst.RERANKER, aliyun_bai_lian_model_credential, AliyunBaiLianReranker),
+ ModelInfo('paraformer-realtime-v2',
+ _('Chinese (including various dialects such as Cantonese), English, Japanese, and Korean support free switching between multiple languages.'),
+ ModelTypeConst.STT, aliyun_bai_lian_stt_model_credential, AliyunBaiLianSpeechToText),
+ ModelInfo('cosyvoice-v1',
+ _('CosyVoice is based on a new generation of large generative speech models, which can predict emotions, intonation, rhythm, etc. based on context, and has better anthropomorphic effects.'),
+ ModelTypeConst.TTS, aliyun_bai_lian_tts_model_credential, AliyunBaiLianTextToSpeech),
+ ModelInfo('text-embedding-v1',
+ _("Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data."),
+ ModelTypeConst.EMBEDDING, aliyun_bai_lian_embedding_model_credential,
+ AliyunBaiLianEmbedding),
+ ModelInfo('qwen3-0.6b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-1.7b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-4b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-8b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-14b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-32b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-30b-a3b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen3-235b-a22b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+
+ ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen-plus', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel),
+ ModelInfo('qwen-max', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
+ BaiLianChatModel)
+ ]
+
+module_info_vl_list = [
+ ModelInfo('qwen-vl-max', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
+ ModelInfo('qwen-vl-max-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
+ ModelInfo('qwen-vl-plus-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
+]
+module_info_tti_list = [
+ ModelInfo('wanx-v1',
+ _('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'),
+ ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_model_info_list(module_info_vl_list)
+ .append_default_model_info(module_info_vl_list[0])
+ .append_model_info_list(module_info_tti_list)
+ .append_default_model_info(module_info_tti_list[0])
+ .append_default_model_info(model_info_list[1])
+ .append_default_model_info(model_info_list[2])
+ .append_default_model_info(model_info_list[3])
+ .append_default_model_info(model_info_list[4])
+ .append_default_model_info(model_info_list[0])
+ .build()
+)
+
+
+class AliyunBaiLianModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='aliyun_bai_lian_model_provider', name=gettext('Alibaba Cloud Bailian'),
+ icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl',
+ 'aliyun_bai_lian_model_provider',
+ 'icon',
+ 'aliyun_bai_lian_icon_svg')))
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..f8d527ff39d
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/10/16 17:01
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import ValidCode, BaseModelCredential
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding
+
+
+class AliyunBaiLianEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['dashscope_api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model: AliyunBaiLianEmbedding = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'dashscope_api_key': super().encryption(model.get('dashscope_api_key', ''))}
+
+ dashscope_api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py
new file mode 100644
index 00000000000..3f3caafa0fd
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:41
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QwenModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=1.0,
+ _min=0.1,
+ _max=1.9,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class QwenVLModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return QwenModelParams()
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py
new file mode 100644
index 00000000000..9da30b72796
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py
@@ -0,0 +1,100 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class BaiLianLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class BaiLianLLMStreamModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+ stream = forms.SwitchField(label=TooltipLabel(_('Is the answer in streaming mode'),
+ _('Is the answer in streaming mode')),
+ required=True, default_value=True)
+
+
+class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ if model_params.get('stream'):
+ for res in model.stream([HumanMessage(content=gettext('Hello'))]):
+ pass
+ else:
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ if 'qwen3' in model_name:
+ return BaiLianLLMStreamModelParams()
+ return BaiLianLLMModelParams()
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py
new file mode 100644
index 00000000000..8386c562ef0
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py
+ @date:2024/9/9 17:51
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+from langchain_core.documents import Document
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker
+
+
+class AliyunBaiLianRerankerCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ if not model_type == 'RERANKER':
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['dashscope_api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model: AliyunBaiLianReranker = provider.get_model(model_type, model_name, model_credential)
+ model.compress_documents([Document(page_content=_('Hello'))], _('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'dashscope_api_key': super().encryption(model.get('dashscope_api_key', ''))}
+
+ dashscope_api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py
new file mode 100644
index 00000000000..e659b31af92
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py
@@ -0,0 +1,48 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AliyunBaiLianSTTModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py
new file mode 100644
index 00000000000..cc904fe226f
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py
@@ -0,0 +1,98 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:41
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QwenModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')),
+ required=True,
+ default_value='1024*1024',
+ option_list=[
+ {'value': '1024*1024', 'label': '1024*1024'},
+ {'value': '720*1280', 'label': '720*1280'},
+ {'value': '768*1152', 'label': '768*1152'},
+ {'value': '1280*720', 'label': '1280*720'},
+ ],
+ text_field='label',
+ value_field='value')
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')),
+ required=True, default_value=1,
+ _min=1,
+ _max=4,
+ _step=1,
+ precision=0)
+ style = forms.SingleSelect(
+ TooltipLabel(_('Style'), _('Specify the style of generated images')),
+ required=True,
+ default_value='',
+ option_list=[
+ {'value': '', 'label': _('Default value, the image style is randomly output by the model')},
+ {'value': '', 'label': _('photography')},
+ {'value': '', 'label': _('Portraits')},
+ {'value': '<3d cartoon>', 'label': _('3D cartoon')},
+ {'value': '', 'label': _('animation')},
+ {'value': '', 'label': _('painting')},
+ {'value': '', 'label': _('watercolor')},
+ {'value': '', 'label': _('sketch')},
+ {'value': '', 'label': _('Chinese painting')},
+ {'value': '', 'label': _('flat illustration')},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+
+class QwenTextToImageModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return QwenModelParams()
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py
new file mode 100644
index 00000000000..6e2c64c84ee
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py
@@ -0,0 +1,83 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AliyunBaiLianTTSModelGeneralParams(BaseForm):
+ voice = forms.SingleSelect(
+ TooltipLabel(_('timbre'), _('Chinese sounds can support mixed scenes of Chinese and English')),
+ required=True, default_value='longxiaochun',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': _('Long Xiaochun'), 'value': 'longxiaochun'},
+ {'text': _('Long Xiaoxia'), 'value': 'longxiaoxia'},
+ {'text': _('Long Xiaochen'), 'value': 'longxiaocheng'},
+ {'text': _('Long Xiaobai'), 'value': 'longxiaobai'},
+ {'text': _('Long laotie'), 'value': 'longlaotie'},
+ {'text': _('Long Shu'), 'value': 'longshu'},
+ {'text': _('Long Shuo'), 'value': 'longshuo'},
+ {'text': _('Long Jing'), 'value': 'longjing'},
+ {'text': _('Long Miao'), 'value': 'longmiao'},
+ {'text': _('Long Yue'), 'value': 'longyue'},
+ {'text': _('Long Yuan'), 'value': 'longyuan'},
+ {'text': _('Long Fei'), 'value': 'longfei'},
+ {'text': _('Long Jielidou'), 'value': 'longjielidou'},
+ {'text': _('Long Tong'), 'value': 'longtong'},
+ {'text': _('Long Xiang'), 'value': 'longxiang'},
+ {'text': 'Stella', 'value': 'loongstella'},
+ {'text': 'Bella', 'value': 'loongbella'},
+ ])
+ speech_rate = forms.SliderField(
+ TooltipLabel(_('speaking speed'), _('[0.5,2], the default is 1, usually one decimal place is enough')),
+ required=True, default_value=1,
+ _min=0.5,
+ _max=2,
+ _step=0.1,
+ precision=1)
+
+
+class AliyunBaiLianTTSModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return AliyunBaiLianTTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/icon/aliyun_bai_lian_icon_svg b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/icon/aliyun_bai_lian_icon_svg
new file mode 100644
index 00000000000..0678828dd27
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/icon/aliyun_bai_lian_icon_svg
@@ -0,0 +1 @@
+【icon】阿里百炼大模型
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/embedding.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/embedding.py
new file mode 100644
index 00000000000..401d12ee924
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/embedding.py
@@ -0,0 +1,66 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/10/16 16:34
+ @desc:
+"""
+from functools import reduce
+from typing import Dict, List
+
+from langchain_community.embeddings import DashScopeEmbeddings
+from langchain_community.embeddings.dashscope import embed_with_retry
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def proxy_embed_documents(texts: List[str], step_size, embed_documents):
+ value = [embed_documents(texts[start_index:start_index + step_size]) for start_index in
+ range(0, len(texts), step_size)]
+ return reduce(lambda x, y: [*x, *y], value, [])
+
+
+class AliyunBaiLianEmbedding(MaxKBBaseModel, DashScopeEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return AliyunBaiLianEmbedding(
+ model=model_name,
+ dashscope_api_key=model_credential.get('dashscope_api_key')
+ )
+
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ if self.model == 'text-embedding-v3':
+ return proxy_embed_documents(texts, 6, self._embed_documents)
+ return self._embed_documents(texts)
+
+ def _embed_documents(self, texts: List[str]) -> List[List[float]]:
+ """Call out to DashScope's embedding endpoint for embedding search docs.
+
+ Args:
+ texts: The list of texts to embed.
+ chunk_size: The chunk size of embeddings. If None, will use the chunk size
+ specified by the class.
+
+ Returns:
+ List of embeddings, one for each text.
+ """
+ embeddings = embed_with_retry(
+ self, input=texts, text_type="document", model=self.model
+ )
+ embedding_list = [item["embedding"] for item in embeddings]
+ return embedding_list
+
+ def embed_query(self, text: str) -> List[float]:
+ """Call out to DashScope's embedding endpoint for embedding query text.
+
+ Args:
+ text: The text to embed.
+
+ Returns:
+ Embedding for the text.
+ """
+ embedding = embed_with_retry(
+ self, input=[text], text_type="document", model=self.model
+ )[0]["embedding"]
+ return embedding
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/iat_mp3_16k.mp3
new file mode 100644
index 00000000000..75e744c8ff5
Binary files /dev/null and b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/iat_mp3_16k.mp3 differ
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py
new file mode 100644
index 00000000000..7cda97f2388
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py
@@ -0,0 +1,22 @@
+# coding=utf-8
+
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ chat_tong_yi = QwenVLChatModel(
+ model_name=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
+ return chat_tong_yi
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py
new file mode 100644
index 00000000000..ee3ee6488c2
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ if 'qwen-omni-turbo' in model_name or 'qwq' in model_name:
+ optional_params['streaming'] = True
+ return BaiLianChatModel(
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/reranker.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/reranker.py
new file mode 100644
index 00000000000..5c9bea4af2a
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/reranker.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py.py
+ @date:2024/9/2 16:42
+ @desc:
+"""
+from typing import Dict
+
+from langchain_community.document_compressors import DashScopeRerank
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class AliyunBaiLianReranker(MaxKBBaseModel, DashScopeRerank):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return AliyunBaiLianReranker(model=model_name, dashscope_api_key=model_credential.get('dashscope_api_key'),
+ top_n=model_kwargs.get('top_n', 3))
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/stt.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/stt.py
new file mode 100644
index 00000000000..ad17a32a95b
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/stt.py
@@ -0,0 +1,75 @@
+import os
+import tempfile
+from typing import Dict
+
+import dashscope
+from dashscope.audio.asr import (Recognition)
+from pydub import AudioSegment
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+
+class AliyunBaiLianSpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ api_key: str
+ model: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.model = kwargs.get('model')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ if model_name == 'qwen-omni-turbo':
+ optional_params['streaming'] = True
+ return AliyunBaiLianSpeechToText(
+ model=model_name,
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f:
+ self.speech_to_text(f)
+
+ def speech_to_text(self, audio_file):
+ dashscope.api_key = self.api_key
+ recognition = Recognition(model=self.model,
+ format='mp3',
+ sample_rate=16000,
+ callback=None)
+ with tempfile.NamedTemporaryFile(delete=False) as temp_file:
+ # 将上传的文件保存到临时文件中
+ temp_file.write(audio_file.read())
+ # 获取临时文件的路径
+ temp_file_path = temp_file.name
+
+ try:
+ audio = AudioSegment.from_file(temp_file_path)
+ if audio.channels != 1:
+ audio = audio.set_channels(1)
+ audio = audio.set_frame_rate(16000)
+
+ # 将转换后的音频文件保存到临时文件中
+ audio.export(temp_file_path, format='mp3')
+ # 识别临时文件
+ result = recognition.call(temp_file_path)
+ text = ''
+ if result.status_code == 200:
+ result_sentence = result.get_sentence()
+ if result_sentence is not None:
+ for sentence in result_sentence:
+ text += sentence['text']
+ return text
+ else:
+ raise Exception('Error: ', result.message)
+ finally:
+ # 删除临时文件
+ os.remove(temp_file_path)
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py
new file mode 100644
index 00000000000..c39e1b3a7fc
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py
@@ -0,0 +1,59 @@
+# coding=utf-8
+from http import HTTPStatus
+from typing import Dict
+
+from dashscope import ImageSynthesis
+from django.utils.translation import gettext
+from langchain_community.chat_models import ChatTongyi
+from langchain_core.messages import HumanMessage
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
+ api_key: str
+ model_name: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.model_name = kwargs.get('model_name')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024*1024', 'style': '', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ chat_tong_yi = QwenTextToImageModel(
+ model_name=model_name,
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+ return chat_tong_yi
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max')
+ chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])])
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
+ rsp = ImageSynthesis.call(api_key=self.api_key,
+ model=self.model_name,
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ **self.params)
+ file_urls = []
+ if rsp.status_code == HTTPStatus.OK:
+ for result in rsp.output.results:
+ file_urls.append(result.url)
+ else:
+ print('sync_call Failed, status_code: %s, code: %s, message: %s' %
+ (rsp.status_code, rsp.code, rsp.message))
+ return file_urls
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py
new file mode 100644
index 00000000000..60c1a77fad8
--- /dev/null
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py
@@ -0,0 +1,57 @@
+from typing import Dict
+
+import dashscope
+
+from django.utils.translation import gettext as _
+
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+
+
+class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'voice': 'longxiaochun', 'speech_rate': 1.0}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+
+ return AliyunBaiLianTextToSpeech(
+ model=model_name,
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ self.text_to_speech(_('Hello'))
+
+ def text_to_speech(self, text):
+ dashscope.api_key = self.api_key
+ text = _remove_empty_lines(text)
+ if 'sambert' in self.model:
+ from dashscope.audio.tts import SpeechSynthesizer
+ audio = SpeechSynthesizer.call(model=self.model, text=text, **self.params).get_audio_data()
+ else:
+ from dashscope.audio.tts_v2 import SpeechSynthesizer
+ synthesizer = SpeechSynthesizer(model=self.model, **self.params)
+ audio = synthesizer.call(text)
+ if audio is None:
+ raise Exception('Failed to generate audio')
+ if type(audio) == str:
+ print(audio)
+ raise Exception(audio)
+ return audio
+
+ def is_cache_model(self):
+ return False
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/__init__.py b/apps/setting/models_provider/impl/anthropic_model_provider/__init__.py
new file mode 100644
index 00000000000..2dc4ab10db4
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/3/28 16:25
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/anthropic_model_provider.py b/apps/setting/models_provider/impl/anthropic_model_provider/anthropic_model_provider.py
new file mode 100644
index 00000000000..7b3f91f1a32
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/anthropic_model_provider.py
@@ -0,0 +1,62 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: openai_model_provider.py
+ @date:2024/3/28 16:26
+ @desc:
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.anthropic_model_provider.credential.image import AnthropicImageModelCredential
+from setting.models_provider.impl.anthropic_model_provider.credential.llm import AnthropicLLMModelCredential
+from setting.models_provider.impl.anthropic_model_provider.model.image import AnthropicImage
+from setting.models_provider.impl.anthropic_model_provider.model.llm import AnthropicChatModel
+from smartdoc.conf import PROJECT_DIR
+
+openai_llm_model_credential = AnthropicLLMModelCredential()
+openai_image_model_credential = AnthropicImageModelCredential()
+
+model_info_list = [
+ ModelInfo('claude-3-opus-20240229', '', ModelTypeConst.LLM,
+ openai_llm_model_credential, AnthropicChatModel
+ ),
+ ModelInfo('claude-3-sonnet-20240229', '', ModelTypeConst.LLM, openai_llm_model_credential,
+ AnthropicChatModel),
+ ModelInfo('claude-3-haiku-20240307', '', ModelTypeConst.LLM, openai_llm_model_credential,
+ AnthropicChatModel),
+ ModelInfo('claude-3-5-sonnet-20240620', '', ModelTypeConst.LLM, openai_llm_model_credential,
+ AnthropicChatModel),
+ ModelInfo('claude-3-5-haiku-20241022', '', ModelTypeConst.LLM, openai_llm_model_credential,
+ AnthropicChatModel),
+ ModelInfo('claude-3-5-sonnet-20241022', '', ModelTypeConst.LLM, openai_llm_model_credential,
+ AnthropicChatModel),
+]
+
+image_model_info = [
+ ModelInfo('claude-3-5-sonnet-20241022', '', ModelTypeConst.IMAGE, openai_image_model_credential,
+ AnthropicImage),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(model_info_list[0])
+ .append_model_info_list(image_model_info)
+ .append_default_model_info(image_model_info[0])
+ .build()
+)
+
+
+class AnthropicModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_anthropic_provider', name='Anthropic', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'anthropic_model_provider', 'icon',
+ 'anthropic_icon_svg')))
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py b/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py
new file mode 100644
index 00000000000..bf49c2c4578
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/credential/image.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AnthropicImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class AnthropicImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField(_('API URL'), required=True)
+ api_key = forms.PasswordInputField(_('API Key'), required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext("Hello")}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return AnthropicImageModelParams()
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py b/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py
new file mode 100644
index 00000000000..350cd9414f7
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/credential/llm.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:32
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class AnthropicLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class AnthropicLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField(_('API URL'), required=True)
+ api_key = forms.PasswordInputField(_('API Key'), required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return AnthropicLLMModelParams()
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/icon/anthropic_icon_svg b/apps/setting/models_provider/impl/anthropic_model_provider/icon/anthropic_icon_svg
new file mode 100644
index 00000000000..342d40be8a9
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/icon/anthropic_icon_svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/model/image.py b/apps/setting/models_provider/impl/anthropic_model_provider/model/image.py
new file mode 100644
index 00000000000..9582522cc6e
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/model/image.py
@@ -0,0 +1,26 @@
+from typing import Dict
+
+from langchain_anthropic import ChatAnthropic
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class AnthropicImage(MaxKBBaseModel, ChatAnthropic):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return AnthropicImage(
+ model=model_name,
+ anthropic_api_url=model_credential.get('api_base'),
+ anthropic_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ **optional_params,
+ )
diff --git a/apps/setting/models_provider/impl/anthropic_model_provider/model/llm.py b/apps/setting/models_provider/impl/anthropic_model_provider/model/llm.py
new file mode 100644
index 00000000000..de055e1044e
--- /dev/null
+++ b/apps/setting/models_provider/impl/anthropic_model_provider/model/llm.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/18 15:28
+ @desc:
+"""
+from typing import List, Dict
+
+from langchain_anthropic import ChatAnthropic
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class AnthropicChatModel(MaxKBBaseModel, ChatAnthropic):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ azure_chat_open_ai = AnthropicChatModel(
+ model=model_name,
+ anthropic_api_url=model_credential.get('api_base'),
+ anthropic_api_key=model_credential.get('api_key'),
+ **optional_params,
+ custom_get_token_ids=custom_get_token_ids
+ )
+ return azure_chat_open_ai
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ try:
+ return super().get_num_tokens_from_messages(messages)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ try:
+ return super().get_num_tokens(text)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/__init__.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/__init__.py
new file mode 100644
index 00000000000..8cb7f459eae
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/__init__.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py
new file mode 100644
index 00000000000..e5bb0dd44a4
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+import os
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import (
+ IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, ModelInfoManage
+)
+from setting.models_provider.impl.aws_bedrock_model_provider.credential.embedding import BedrockEmbeddingCredential
+from setting.models_provider.impl.aws_bedrock_model_provider.credential.llm import BedrockLLMModelCredential
+from setting.models_provider.impl.aws_bedrock_model_provider.model.embedding import BedrockEmbeddingModel
+from setting.models_provider.impl.aws_bedrock_model_provider.model.llm import BedrockModel
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+
+def _create_model_info(model_name, description, model_type, credential_class, model_class):
+ return ModelInfo(
+ name=model_name,
+ desc=description,
+ model_type=model_type,
+ model_credential=credential_class(),
+ model_class=model_class
+ )
+
+
+def _get_aws_bedrock_icon_path():
+ return os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'aws_bedrock_model_provider',
+ 'icon', 'bedrock_icon_svg')
+
+
+def _initialize_model_info():
+ model_info_list = [
+ _create_model_info(
+ 'anthropic.claude-v2:1',
+ _('An update to Claude 2 that doubles the context window and improves reliability, hallucination rates, and evidence-based accuracy in long documents and RAG contexts.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'anthropic.claude-v2',
+ _('Anthropic is a powerful model that can handle a variety of tasks, from complex dialogue and creative content generation to detailed command obedience.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'anthropic.claude-3-haiku-20240307-v1:0',
+ _("The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-instant responsiveness. The model can answer simple queries and requests quickly. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text output, and provides 200K context windows."),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'anthropic.claude-3-sonnet-20240229-v1:0',
+ _("The Claude 3 Sonnet model from Anthropic strikes the ideal balance between intelligence and speed, especially when it comes to handling enterprise workloads. This model offers maximum utility while being priced lower than competing products, and it's been engineered to be a solid choice for deploying AI at scale."),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'anthropic.claude-3-5-sonnet-20240620-v1:0',
+ _('The Claude 3.5 Sonnet raises the industry standard for intelligence, outperforming competing models and the Claude 3 Opus in extensive evaluations, with the speed and cost-effectiveness of our mid-range models.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'anthropic.claude-instant-v1',
+ _('A faster, more affordable but still very powerful model that can handle a range of tasks including casual conversation, text analysis, summarization and document question answering.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'amazon.titan-text-premier-v1:0',
+ _("Titan Text Premier is the most powerful and advanced model in the Titan Text series, designed to deliver exceptional performance for a variety of enterprise applications. With its cutting-edge features, it delivers greater accuracy and outstanding results, making it an excellent choice for organizations looking for a top-notch text processing solution."),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel
+ ),
+ _create_model_info(
+ 'amazon.titan-text-lite-v1',
+ _('Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-tuning English-language tasks, including summarization and copywriting, where customers require smaller, more cost-effective, and highly customizable models.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel),
+ _create_model_info(
+ 'amazon.titan-text-express-v1',
+ _('Amazon Titan Text Express has context lengths of up to 8,000 tokens, making it ideal for a variety of high-level general language tasks, such as open-ended text generation and conversational chat, as well as support in retrieval-augmented generation (RAG). At launch, the model is optimized for English, but other languages are supported.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel),
+ _create_model_info(
+ 'mistral.mistral-7b-instruct-v0:2',
+ _('7B dense converter for rapid deployment and easy customization. Small in size yet powerful in a variety of use cases. Supports English and code, as well as 32k context windows.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel),
+ _create_model_info(
+ 'mistral.mistral-large-2402-v1:0',
+ _('Advanced Mistral AI large-scale language model capable of handling any language task, including complex multilingual reasoning, text understanding, transformation, and code generation.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel),
+ _create_model_info(
+ 'meta.llama3-70b-instruct-v1:0',
+ _('Ideal for content creation, conversational AI, language understanding, R&D, and enterprise applications'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel),
+ _create_model_info(
+ 'meta.llama3-8b-instruct-v1:0',
+ _('Ideal for limited computing power and resources, edge devices, and faster training times.'),
+ ModelTypeConst.LLM,
+ BedrockLLMModelCredential,
+ BedrockModel),
+ ]
+ embedded_model_info_list = [
+ _create_model_info(
+ 'amazon.titan-embed-text-v1',
+ _('Titan Embed Text is the largest embedding model in the Amazon Titan Embed series and can handle various text embedding tasks, such as text classification, text similarity calculation, etc.'),
+ ModelTypeConst.EMBEDDING,
+ BedrockEmbeddingCredential,
+ BedrockEmbeddingModel
+ ),
+ ]
+
+ model_info_manage = ModelInfoManage.builder() \
+ .append_model_info_list(model_info_list) \
+ .append_default_model_info(model_info_list[0]) \
+ .append_model_info_list(embedded_model_info_list) \
+ .append_default_model_info(embedded_model_info_list[0]) \
+ .build()
+
+ return model_info_manage
+
+
+class BedrockModelProvider(IModelProvider):
+ def __init__(self):
+ self._model_info_manage = _initialize_model_info()
+
+ def get_model_info_manage(self):
+ return self._model_info_manage
+
+ def get_model_provide_info(self):
+ icon_path = _get_aws_bedrock_icon_path()
+ icon_data = get_file_content(icon_path)
+ return ModelProvideInfo(
+ provider='model_aws_bedrock_provider',
+ name='Amazon Bedrock',
+ icon=icon_data
+ )
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..380335ce060
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.aws_bedrock_model_provider.model.embedding import BedrockEmbeddingModel
+
+
+class BedrockEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(mt.get('value') == model_type for mt in model_type_list):
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ return False
+
+ required_keys = ['region_name', 'access_key_id', 'secret_access_key']
+ if not all(key in model_credential for key in required_keys):
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('The following fields are required: {keys}').format(
+ keys=", ".join(required_keys)))
+ return False
+
+ try:
+ model: BedrockEmbeddingModel = provider.get_model(model_type, model_name, model_credential)
+ aa = model.embed_query(_('Hello'))
+ print(aa)
+ except AppApiException:
+ raise
+ except Exception as e:
+ traceback.print_exc()
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ return False
+
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'secret_access_key': super().encryption(model.get('secret_access_key', ''))}
+
+ region_name = forms.TextInputField('Region Name', required=True)
+ access_key_id = forms.TextInputField('Access Key ID', required=True)
+ secret_access_key = forms.PasswordInputField('Secret Access Key', required=True)
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py
new file mode 100644
index 00000000000..cc8f81f43b3
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py
@@ -0,0 +1,76 @@
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import ValidCode, BaseModelCredential
+
+
+class BedrockLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class BedrockLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(mt.get('value') == model_type for mt in model_type_list):
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ return False
+
+ required_keys = ['region_name', 'access_key_id', 'secret_access_key']
+ if not all(key in model_credential for key in required_keys):
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('The following fields are required: {keys}').format(
+ keys=", ".join(required_keys)))
+ return False
+
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except AppApiException:
+ raise
+ except Exception as e:
+ traceback.print_exc()
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ return False
+
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'secret_access_key': super().encryption(model.get('secret_access_key', ''))}
+
+ region_name = forms.TextInputField('Region Name', required=True)
+ access_key_id = forms.TextInputField('Access Key ID', required=True)
+ secret_access_key = forms.PasswordInputField('Secret Access Key', required=True)
+ base_url = forms.TextInputField('Proxy URL', required=False)
+
+ def get_model_params_setting_form(self, model_name):
+ return BedrockLLMModelParams()
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/icon/bedrock_icon_svg b/apps/setting/models_provider/impl/aws_bedrock_model_provider/icon/bedrock_icon_svg
new file mode 100644
index 00000000000..5f176a7d27d
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/icon/bedrock_icon_svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/embedding.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/embedding.py
new file mode 100644
index 00000000000..1375422524c
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/embedding.py
@@ -0,0 +1,60 @@
+from langchain_community.embeddings import BedrockEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from typing import Dict, List
+
+from setting.models_provider.impl.aws_bedrock_model_provider.model.llm import _update_aws_credentials
+
+
+class BedrockEmbeddingModel(MaxKBBaseModel, BedrockEmbeddings):
+ def __init__(self, model_id: str, region_name: str, credentials_profile_name: str,
+ **kwargs):
+ super().__init__(model_id=model_id, region_name=region_name,
+ credentials_profile_name=credentials_profile_name, **kwargs)
+
+ @classmethod
+ def new_instance(cls, model_type: str, model_name: str, model_credential: Dict[str, str],
+ **model_kwargs) -> 'BedrockModel':
+ _update_aws_credentials(model_credential['access_key_id'], model_credential['access_key_id'],
+ model_credential['secret_access_key'])
+ return cls(
+ model_id=model_name,
+ region_name=model_credential['region_name'],
+ credentials_profile_name=model_credential['access_key_id'],
+ )
+
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ """Compute doc embeddings using a Bedrock model.
+
+ Args:
+ texts: The list of texts to embed
+
+ Returns:
+ List of embeddings, one for each text.
+ """
+ results = []
+ for text in texts:
+ response = self._embedding_func(text)
+
+ if self.normalize:
+ response = self._normalize_vector(response)
+
+ results.append(response)
+
+ return results
+
+ def embed_query(self, text: str) -> List[float]:
+ """Compute query embeddings using a Bedrock model.
+
+ Args:
+ text: The text to embed.
+
+ Returns:
+ Embeddings for the text.
+ """
+ embedding = self._embedding_func(text)
+
+ if self.normalize:
+ return self._normalize_vector(embedding)
+
+ return embedding
diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/llm.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/llm.py
new file mode 100644
index 00000000000..7b0088a4ab4
--- /dev/null
+++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/model/llm.py
@@ -0,0 +1,104 @@
+import os
+import re
+from typing import Dict, List
+
+from botocore.config import Config
+from langchain_community.chat_models import BedrockChat
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def get_max_tokens_keyword(model_name):
+ """
+ 根据模型名称返回正确的 max_tokens 关键字。
+
+ :param model_name: 模型名称字符串
+ :return: 对应的 max_tokens 关键字字符串
+ """
+ maxTokens = ["ai21.j2-ultra-v1", "ai21.j2-mid-v1"]
+ # max_tokens_to_sample = ["anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-instant-v1"]
+ maxTokenCount = ["amazon.titan-text-lite-v1", "amazon.titan-text-express-v1"]
+ max_new_tokens = [
+ "us.meta.llama3-2-1b-instruct-v1:0", "us.meta.llama3-2-3b-instruct-v1:0", "us.meta.llama3-2-11b-instruct-v1:0",
+ "us.meta.llama3-2-90b-instruct-v1:0"]
+ if model_name in maxTokens:
+ return 'maxTokens'
+ elif model_name in maxTokenCount:
+ return 'maxTokenCount'
+ elif model_name in max_new_tokens:
+ return 'max_new_tokens'
+ else:
+ return 'max_tokens'
+
+
+class BedrockModel(MaxKBBaseModel, BedrockChat):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ def __init__(self, model_id: str, region_name: str, credentials_profile_name: str,
+ streaming: bool = False, config: Config = None, **kwargs):
+ super().__init__(model_id=model_id, region_name=region_name,
+ credentials_profile_name=credentials_profile_name, streaming=streaming, config=config,
+ **kwargs)
+
+ @classmethod
+ def new_instance(cls, model_type: str, model_name: str, model_credential: Dict[str, str],
+ **model_kwargs) -> 'BedrockModel':
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+
+ config = {}
+ # 判断model_kwargs是否包含 base_url 且不为空
+ if 'base_url' in model_credential and model_credential['base_url']:
+ proxy_url = model_credential['base_url']
+ config = Config(
+ proxies={
+ 'http': proxy_url,
+ 'https': proxy_url
+ },
+ connect_timeout=60,
+ read_timeout=60
+ )
+ _update_aws_credentials(model_credential['access_key_id'], model_credential['access_key_id'],
+ model_credential['secret_access_key'])
+
+ return cls(
+ model_id=model_name,
+ region_name=model_credential['region_name'],
+ credentials_profile_name=model_credential['access_key_id'],
+ streaming=model_kwargs.pop('streaming', True),
+ model_kwargs=optional_params,
+ config=config
+ )
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ try:
+ return super().get_num_tokens_from_messages(messages)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ try:
+ return super().get_num_tokens(text)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+
+
+def _update_aws_credentials(profile_name, access_key_id, secret_access_key):
+ credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials")
+ os.makedirs(os.path.dirname(credentials_path), exist_ok=True)
+
+ content = open(credentials_path, 'r').read() if os.path.exists(credentials_path) else ''
+ pattern = rf'\n*\[{profile_name}\]\n*(aws_access_key_id = .*)\n*(aws_secret_access_key = .*)\n*'
+ content = re.sub(pattern, '', content, flags=re.DOTALL)
+
+ if not re.search(rf'\[{profile_name}\]', content):
+ content += f"\n[{profile_name}]\naws_access_key_id = {access_key_id}\naws_secret_access_key = {secret_access_key}\n"
+
+ with open(credentials_path, 'w') as file:
+ file.write(content)
diff --git a/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py b/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py
index 3164dd8ea3e..e249f0b7cd1 100644
--- a/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py
+++ b/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py
@@ -7,98 +7,110 @@
@desc:
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
- ModelInfo, \
- ModelTypeConst, ValidCode
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.azure_model_provider.credential.embedding import AzureOpenAIEmbeddingCredential
+from setting.models_provider.impl.azure_model_provider.credential.image import AzureOpenAIImageModelCredential
+from setting.models_provider.impl.azure_model_provider.credential.llm import AzureLLMModelCredential
+from setting.models_provider.impl.azure_model_provider.credential.stt import AzureOpenAISTTModelCredential
+from setting.models_provider.impl.azure_model_provider.credential.tti import AzureOpenAITextToImageModelCredential
+from setting.models_provider.impl.azure_model_provider.credential.tts import AzureOpenAITTSModelCredential
from setting.models_provider.impl.azure_model_provider.model.azure_chat_model import AzureChatModel
+from setting.models_provider.impl.azure_model_provider.model.embedding import AzureOpenAIEmbeddingModel
+from setting.models_provider.impl.azure_model_provider.model.image import AzureOpenAIImage
+from setting.models_provider.impl.azure_model_provider.model.stt import AzureOpenAISpeechToText
+from setting.models_provider.impl.azure_model_provider.model.tti import AzureOpenAITextToImage
+from setting.models_provider.impl.azure_model_provider.model.tts import AzureOpenAITextToSpeech
from smartdoc.conf import PROJECT_DIR
-
-
-class DefaultAzureLLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = AzureModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
-
- for key in ['api_base', 'api_key', 'deployment_name', 'api_version']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = AzureModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, '校验失败,请检查参数是否正确')
- else:
- return False
-
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_version = forms.TextInputField("API 版本 (api_version)", required=True)
-
- api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True)
-
- api_key = forms.PasswordInputField("API Key (api_key)", required=True)
-
- deployment_name = forms.TextInputField("部署名 (deployment_name)", required=True)
-
-
-base_azure_llm_model_credential = DefaultAzureLLMModelCredential()
-
-model_dict = {
- 'deployment_name': ModelInfo('Azure OpenAI', '具体的基础模型由部署名决定', ModelTypeConst.LLM,
- base_azure_llm_model_credential, api_version='2024-02-15-preview'
- )
-}
+from django.utils.translation import gettext_lazy as _
+
+base_azure_llm_model_credential = AzureLLMModelCredential()
+base_azure_embedding_model_credential = AzureOpenAIEmbeddingCredential()
+base_azure_image_model_credential = AzureOpenAIImageModelCredential()
+base_azure_tti_model_credential = AzureOpenAITextToImageModelCredential()
+base_azure_tts_model_credential = AzureOpenAITTSModelCredential()
+base_azure_stt_model_credential = AzureOpenAISTTModelCredential()
+
+default_model_info = [
+ ModelInfo('Azure OpenAI', '', ModelTypeConst.LLM,
+ base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview'
+ ),
+ ModelInfo('gpt-4', '', ModelTypeConst.LLM,
+ base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview'
+ ),
+ ModelInfo('gpt-4o', '', ModelTypeConst.LLM,
+ base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview'
+ ),
+ ModelInfo('gpt-4o-mini', '', ModelTypeConst.LLM,
+ base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview'
+ ),
+]
+
+embedding_model_info = [
+ ModelInfo('text-embedding-3-large', '', ModelTypeConst.EMBEDDING,
+ base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15'
+ ),
+ ModelInfo('text-embedding-3-small', '', ModelTypeConst.EMBEDDING,
+ base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15'
+ ),
+ ModelInfo('text-embedding-ada-002', '', ModelTypeConst.EMBEDDING,
+ base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15'
+ ),
+]
+
+image_model_info = [
+ ModelInfo('gpt-4o', '', ModelTypeConst.IMAGE,
+ base_azure_image_model_credential, AzureOpenAIImage, api_version='2023-05-15'
+ ),
+ ModelInfo('gpt-4o-mini', '', ModelTypeConst.IMAGE,
+ base_azure_image_model_credential, AzureOpenAIImage, api_version='2023-05-15'
+ ),
+]
+
+tti_model_info = [
+ ModelInfo('dall-e-3', '', ModelTypeConst.TTI,
+ base_azure_tti_model_credential, AzureOpenAITextToImage, api_version='2023-05-15'
+ ),
+]
+
+tts_model_info = [
+ ModelInfo('tts', '', ModelTypeConst.TTS,
+ base_azure_tts_model_credential, AzureOpenAITextToSpeech, api_version='2023-05-15'
+ ),
+]
+
+stt_model_info = [
+ ModelInfo('whisper', '', ModelTypeConst.STT,
+ base_azure_stt_model_credential, AzureOpenAISpeechToText, api_version='2023-05-15'
+ ),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_default_model_info(default_model_info[0])
+ .append_model_info_list(default_model_info)
+ .append_model_info_list(embedding_model_info)
+ .append_default_model_info(embedding_model_info[0])
+ .append_model_info_list(image_model_info)
+ .append_default_model_info(image_model_info[0])
+ .append_model_info_list(stt_model_info)
+ .append_default_model_info(stt_model_info[0])
+ .append_model_info_list(tts_model_info)
+ .append_default_model_info(tts_model_info[0])
+ .append_model_info_list(tti_model_info)
+ .append_default_model_info(tti_model_info[0])
+ .build()
+)
class AzureModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 3
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> AzureChatModel:
- azure_chat_open_ai = AzureChatModel(
- azure_endpoint=model_credential.get('api_base'),
- openai_api_version=model_credential.get('api_version', '2024-02-15-preview'),
- deployment_name=model_credential.get('deployment_name'),
- openai_api_key=model_credential.get('api_key'),
- openai_api_type="azure"
- )
- return azure_chat_open_ai
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return base_azure_llm_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_azure_provider', name='Azure OpenAI', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'azure_model_provider', 'icon',
'azure_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..471e6c38400
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py
@@ -0,0 +1,57 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 17:08
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AzureOpenAIEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key', 'api_version']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct'))
+ else:
+ return False
+
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_version = forms.TextInputField("Api Version", required=True)
+
+ api_base = forms.TextInputField('Azure Endpoint', required=True)
+
+ api_key = forms.PasswordInputField("API Key", required=True)
diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/image.py b/apps/setting/models_provider/impl/azure_model_provider/credential/image.py
new file mode 100644
index 00000000000..ee8e7b850d6
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/credential/image.py
@@ -0,0 +1,75 @@
+# coding=utf-8
+import base64
+import os
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class AzureOpenAIImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class AzureOpenAIImageModelCredential(BaseForm, BaseModelCredential):
+ api_version = forms.TextInputField("API Version", required=True)
+ api_base = forms.TextInputField('Azure Endpoint', required=True)
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key', 'api_version']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return AzureOpenAIImageModelParams()
diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py
new file mode 100644
index 00000000000..ac17279240c
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py
@@ -0,0 +1,96 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 17:08
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+from openai import BadRequestError
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class AzureLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class o3MiniLLMModelParams(BaseForm):
+ max_completion_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=5000,
+ _step=1,
+ precision=0)
+
+
+class AzureLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key', 'deployment_name', 'api_version']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException) or isinstance(e, BadRequestError):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('Verification failed, please check whether the parameters are correct'))
+ else:
+ return False
+
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_version = forms.TextInputField("API Version", required=True)
+
+ api_base = forms.TextInputField('Azure Endpoint', required=True)
+
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ deployment_name = forms.TextInputField("Deployment name", required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ if 'o3' in model_name or 'o1' in model_name:
+ return o3MiniLLMModelParams()
+ return AzureLLMModelParams()
diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py b/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py
new file mode 100644
index 00000000000..f1575dbefef
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py
@@ -0,0 +1,50 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AzureOpenAISTTModelCredential(BaseForm, BaseModelCredential):
+ api_version = forms.TextInputField("API Version", required=True)
+ api_base = forms.TextInputField('Azure Endpoint', required=True)
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key', 'api_version']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py b/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py
new file mode 100644
index 00000000000..34d0c510622
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py
@@ -0,0 +1,87 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AzureOpenAITTIModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')),
+ required=True,
+ default_value='1024x1024',
+ option_list=[
+ {'value': '1024x1024', 'label': '1024x1024'},
+ {'value': '1024x1792', 'label': '1024x1792'},
+ {'value': '1792x1024', 'label': '1792x1024'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ quality = forms.SingleSelect(
+ TooltipLabel(_('Picture quality'), ''),
+ required=True,
+ default_value='standard',
+ option_list=[
+ {'value': 'standard', 'label': 'standard'},
+ {'value': 'hd', 'label': 'hd'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')),
+ required=True, default_value=1,
+ _min=1,
+ _max=10,
+ _step=1,
+ precision=0)
+
+
+class AzureOpenAITextToImageModelCredential(BaseForm, BaseModelCredential):
+ api_version = forms.TextInputField("API Version", required=True)
+ api_base = forms.TextInputField('Azure Endpoint', required=True)
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key', 'api_version']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return AzureOpenAITTIModelParams()
diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py b/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py
new file mode 100644
index 00000000000..a41365fecca
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class AzureOpenAITTSModelGeneralParams(BaseForm):
+ # alloy, echo, fable, onyx, nova, shimmer
+ voice = forms.SingleSelect(
+ TooltipLabel('Voice',
+ _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')),
+ required=True, default_value='alloy',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': 'alloy', 'value': 'alloy'},
+ {'text': 'echo', 'value': 'echo'},
+ {'text': 'fable', 'value': 'fable'},
+ {'text': 'onyx', 'value': 'onyx'},
+ {'text': 'nova', 'value': 'nova'},
+ {'text': 'shimmer', 'value': 'shimmer'},
+ ])
+
+
+class AzureOpenAITTSModelCredential(BaseForm, BaseModelCredential):
+ api_version = forms.TextInputField("API Version", required=True)
+ api_base = forms.TextInputField('Azure Endpoint', required=True)
+ api_key = forms.PasswordInputField("API Key", required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key', 'api_version']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return AzureOpenAITTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg b/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg
index d6499d2ad97..e5a2f98a0b0 100644
--- a/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg
+++ b/apps/setting/models_provider/impl/azure_model_provider/icon/azure_icon_svg
@@ -1,9 +1 @@
-
-
-
-
-
-
-
-
-
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py b/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py
index 6388dbde27b..6046ae67ebf 100644
--- a/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py
+++ b/apps/setting/models_provider/impl/azure_model_provider/model/azure_chat_model.py
@@ -6,15 +6,36 @@
@date:2024/4/28 11:45
@desc:
"""
-from typing import List
+
+from typing import List, Dict
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_openai import AzureChatOpenAI
from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class AzureChatModel(MaxKBBaseModel, AzureChatOpenAI):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return AzureChatModel(
+ azure_endpoint=model_credential.get('api_base'),
+ model_name=model_name,
+ openai_api_version=model_credential.get('api_version', '2024-02-15-preview'),
+ deployment_name=model_credential.get('deployment_name'),
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_type="azure",
+ **optional_params,
+ streaming=True,
+ )
-class AzureChatModel(AzureChatOpenAI):
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return super().get_num_tokens_from_messages(messages)
diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/embedding.py b/apps/setting/models_provider/impl/azure_model_provider/model/embedding.py
new file mode 100644
index 00000000000..f1b35db8ea8
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/model/embedding.py
@@ -0,0 +1,25 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 17:44
+ @desc:
+"""
+from typing import Dict
+
+from langchain_openai import AzureOpenAIEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class AzureOpenAIEmbeddingModel(MaxKBBaseModel, AzureOpenAIEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return AzureOpenAIEmbeddingModel(
+ model=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ azure_endpoint=model_credential.get('api_base'),
+ openai_api_version=model_credential.get('api_version'),
+ openai_api_type="azure",
+ )
diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/image.py b/apps/setting/models_provider/impl/azure_model_provider/model/image.py
new file mode 100644
index 00000000000..14abab3af9a
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/model/image.py
@@ -0,0 +1,42 @@
+from typing import Dict, List
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_openai import AzureChatOpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return AzureOpenAIImage(
+ model_name=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ azure_endpoint=model_credential.get('api_base'),
+ openai_api_version=model_credential.get('api_version'),
+ openai_api_type="azure",
+ streaming=True,
+ **optional_params,
+ )
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ try:
+ return super().get_num_tokens_from_messages(messages)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ try:
+ return super().get_num_tokens(text)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/stt.py b/apps/setting/models_provider/impl/azure_model_provider/model/stt.py
new file mode 100644
index 00000000000..5a4aab5fd06
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/model/stt.py
@@ -0,0 +1,62 @@
+import io
+from typing import Dict
+
+from openai import AzureOpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class AzureOpenAISpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ api_base: str
+ api_key: str
+ api_version: str
+ model: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.api_version = kwargs.get('api_version')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return AzureOpenAISpeechToText(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ api_version=model_credential.get('api_version'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = AzureOpenAI(
+ azure_endpoint=self.api_base,
+ api_key=self.api_key,
+ api_version=self.api_version
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def speech_to_text(self, audio_file):
+ client = AzureOpenAI(
+ azure_endpoint=self.api_base,
+ api_key=self.api_key,
+ api_version=self.api_version
+ )
+ audio_data = audio_file.read()
+ buffer = io.BytesIO(audio_data)
+ buffer.name = "file.mp3" # this is the important line
+ res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer)
+ return res.text
diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/tti.py b/apps/setting/models_provider/impl/azure_model_provider/model/tti.py
new file mode 100644
index 00000000000..fd5c6ffab6b
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/model/tti.py
@@ -0,0 +1,61 @@
+from typing import Dict
+
+from openai import AzureOpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class AzureOpenAITextToImage(MaxKBBaseModel, BaseTextToImage):
+ api_base: str
+ api_key: str
+ api_version: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.api_version = kwargs.get('api_version')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return AzureOpenAITextToImage(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ api_version=model_credential.get('api_version'),
+ **optional_params,
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = AzureOpenAI(api_key=self.api_key, azure_endpoint=self.api_base, api_version=self.api_version)
+ response_list = chat.models.with_raw_response.list()
+
+ # self.generate_image('生成一个小猫图片')
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ chat = AzureOpenAI(api_key=self.api_key, azure_endpoint=self.api_base, api_version=self.api_version)
+ res = chat.images.generate(model=self.model, prompt=prompt, **self.params)
+ file_urls = []
+ for content in res.data:
+ url = content.url
+ file_urls.append(url)
+
+ return file_urls
diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/tts.py b/apps/setting/models_provider/impl/azure_model_provider/model/tts.py
new file mode 100644
index 00000000000..fa0676534e6
--- /dev/null
+++ b/apps/setting/models_provider/impl/azure_model_provider/model/tts.py
@@ -0,0 +1,69 @@
+from typing import Dict
+
+from openai import AzureOpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class AzureOpenAITextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ api_base: str
+ api_key: str
+ api_version: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.api_version = kwargs.get('api_version')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'voice': 'alloy'}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return AzureOpenAITextToSpeech(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ api_version=model_credential.get('api_version'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = AzureOpenAI(
+ azure_endpoint=self.api_base,
+ api_key=self.api_key,
+ api_version=self.api_version
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def text_to_speech(self, text):
+ client = AzureOpenAI(
+ azure_endpoint=self.api_base,
+ api_key=self.api_key,
+ api_version=self.api_version
+ )
+ text = _remove_empty_lines(text)
+ with client.audio.speech.with_streaming_response.create(
+ model=self.model,
+ input=text,
+ **self.params
+ ) as response:
+ return response.read()
+
+ def is_cache_model(self):
+ return False
diff --git a/apps/setting/models_provider/impl/base_chat_open_ai.py b/apps/setting/models_provider/impl/base_chat_open_ai.py
new file mode 100644
index 00000000000..ccafc0e0bf2
--- /dev/null
+++ b/apps/setting/models_provider/impl/base_chat_open_ai.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping
+
+from langchain_core.language_models import LanguageModelInput
+from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \
+ SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk
+from langchain_core.messages.ai import UsageMetadata
+from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk
+from langchain_core.outputs import ChatGenerationChunk
+from langchain_core.runnables import RunnableConfig, ensure_config
+from langchain_core.tools import BaseTool
+from langchain_openai import ChatOpenAI
+from langchain_openai.chat_models.base import _create_usage_metadata
+
+from common.config.tokenizer_manage_config import TokenizerManage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+def _convert_delta_to_message_chunk(
+ _dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
+) -> BaseMessageChunk:
+ id_ = _dict.get("id")
+ role = cast(str, _dict.get("role"))
+ content = cast(str, _dict.get("content") or "")
+ additional_kwargs: dict = {}
+ if 'reasoning_content' in _dict:
+ additional_kwargs['reasoning_content'] = _dict.get('reasoning_content')
+ if _dict.get("function_call"):
+ function_call = dict(_dict["function_call"])
+ if "name" in function_call and function_call["name"] is None:
+ function_call["name"] = ""
+ additional_kwargs["function_call"] = function_call
+ tool_call_chunks = []
+ if raw_tool_calls := _dict.get("tool_calls"):
+ additional_kwargs["tool_calls"] = raw_tool_calls
+ try:
+ tool_call_chunks = [
+ tool_call_chunk(
+ name=rtc["function"].get("name"),
+ args=rtc["function"].get("arguments"),
+ id=rtc.get("id"),
+ index=rtc["index"],
+ )
+ for rtc in raw_tool_calls
+ ]
+ except KeyError:
+ pass
+
+ if role == "user" or default_class == HumanMessageChunk:
+ return HumanMessageChunk(content=content, id=id_)
+ elif role == "assistant" or default_class == AIMessageChunk:
+ return AIMessageChunk(
+ content=content,
+ additional_kwargs=additional_kwargs,
+ id=id_,
+ tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
+ )
+ elif role in ("system", "developer") or default_class == SystemMessageChunk:
+ if role == "developer":
+ additional_kwargs = {"__openai_role__": "developer"}
+ else:
+ additional_kwargs = {}
+ return SystemMessageChunk(
+ content=content, id=id_, additional_kwargs=additional_kwargs
+ )
+ elif role == "function" or default_class == FunctionMessageChunk:
+ return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
+ elif role == "tool" or default_class == ToolMessageChunk:
+ return ToolMessageChunk(
+ content=content, tool_call_id=_dict["tool_call_id"], id=id_
+ )
+ elif role or default_class == ChatMessageChunk:
+ return ChatMessageChunk(content=content, role=role, id=id_)
+ else:
+ return default_class(content=content, id=id_) # type: ignore
+
+
+class BaseChatOpenAI(ChatOpenAI):
+ usage_metadata: dict = {}
+ custom_get_token_ids = custom_get_token_ids
+
+ def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
+ return self.usage_metadata
+
+ def get_num_tokens_from_messages(
+ self,
+ messages: list[BaseMessage],
+ tools: Optional[
+ Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
+ ] = None,
+ ) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ try:
+ return super().get_num_tokens_from_messages(messages)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+ return self.usage_metadata.get('input_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ try:
+ return super().get_num_tokens(text)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+ return self.get_last_generation_info().get('output_tokens', 0)
+
+ def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
+ kwargs['stream_usage'] = True
+ for chunk in super()._stream(*args, **kwargs):
+ if chunk.message.usage_metadata is not None:
+ self.usage_metadata = chunk.message.usage_metadata
+ yield chunk
+
+ def _convert_chunk_to_generation_chunk(
+ self,
+ chunk: dict,
+ default_chunk_class: type,
+ base_generation_info: Optional[dict],
+ ) -> Optional[ChatGenerationChunk]:
+ if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
+ return None
+ token_usage = chunk.get("usage")
+ choices = (
+ chunk.get("choices", [])
+ # from beta.chat.completions.stream
+ or chunk.get("chunk", {}).get("choices", [])
+ )
+
+ usage_metadata: Optional[UsageMetadata] = (
+ _create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None
+ )
+ if len(choices) == 0:
+ # logprobs is implicitly None
+ generation_chunk = ChatGenerationChunk(
+ message=default_chunk_class(content="", usage_metadata=usage_metadata)
+ )
+ return generation_chunk
+
+ choice = choices[0]
+ if choice["delta"] is None:
+ return None
+
+ message_chunk = _convert_delta_to_message_chunk(
+ choice["delta"], default_chunk_class
+ )
+ generation_info = {**base_generation_info} if base_generation_info else {}
+
+ if finish_reason := choice.get("finish_reason"):
+ generation_info["finish_reason"] = finish_reason
+ if model_name := chunk.get("model"):
+ generation_info["model_name"] = model_name
+ if system_fingerprint := chunk.get("system_fingerprint"):
+ generation_info["system_fingerprint"] = system_fingerprint
+
+ logprobs = choice.get("logprobs")
+ if logprobs:
+ generation_info["logprobs"] = logprobs
+
+ if usage_metadata and isinstance(message_chunk, AIMessageChunk):
+ message_chunk.usage_metadata = usage_metadata
+
+ generation_chunk = ChatGenerationChunk(
+ message=message_chunk, generation_info=generation_info or None
+ )
+ return generation_chunk
+
+ def invoke(
+ self,
+ input: LanguageModelInput,
+ config: Optional[RunnableConfig] = None,
+ *,
+ stop: Optional[list[str]] = None,
+ **kwargs: Any,
+ ) -> BaseMessage:
+ config = ensure_config(config)
+ chat_result = cast(
+ "ChatGeneration",
+ self.generate_prompt(
+ [self._convert_input(input)],
+ stop=stop,
+ callbacks=config.get("callbacks"),
+ tags=config.get("tags"),
+ metadata=config.get("metadata"),
+ run_name=config.get("run_name"),
+ run_id=config.pop("run_id", None),
+ **kwargs,
+ ).generations[0][0],
+
+ ).message
+
+ self.usage_metadata = chat_result.response_metadata[
+ 'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata
+ return chat_result
diff --git a/apps/setting/models_provider/impl/base_stt.py b/apps/setting/models_provider/impl/base_stt.py
new file mode 100644
index 00000000000..aae72a559eb
--- /dev/null
+++ b/apps/setting/models_provider/impl/base_stt.py
@@ -0,0 +1,14 @@
+# coding=utf-8
+from abc import abstractmethod
+
+from pydantic import BaseModel
+
+
+class BaseSpeechToText(BaseModel):
+ @abstractmethod
+ def check_auth(self):
+ pass
+
+ @abstractmethod
+ def speech_to_text(self, audio_file):
+ pass
diff --git a/apps/setting/models_provider/impl/base_tti.py b/apps/setting/models_provider/impl/base_tti.py
new file mode 100644
index 00000000000..5e34d12cd11
--- /dev/null
+++ b/apps/setting/models_provider/impl/base_tti.py
@@ -0,0 +1,14 @@
+# coding=utf-8
+from abc import abstractmethod
+
+from pydantic import BaseModel
+
+
+class BaseTextToImage(BaseModel):
+ @abstractmethod
+ def check_auth(self):
+ pass
+
+ @abstractmethod
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ pass
diff --git a/apps/setting/models_provider/impl/base_tts.py b/apps/setting/models_provider/impl/base_tts.py
new file mode 100644
index 00000000000..6311f268653
--- /dev/null
+++ b/apps/setting/models_provider/impl/base_tts.py
@@ -0,0 +1,14 @@
+# coding=utf-8
+from abc import abstractmethod
+
+from pydantic import BaseModel
+
+
+class BaseTextToSpeech(BaseModel):
+ @abstractmethod
+ def check_auth(self):
+ pass
+
+ @abstractmethod
+ def text_to_speech(self, text):
+ pass
diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py
new file mode 100644
index 00000000000..015deacee48
--- /dev/null
+++ b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py
@@ -0,0 +1,77 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 17:51
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class DeepSeekLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class DeepSeekLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return DeepSeekLLMModelParams()
diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py b/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py
index 3baa5f04ad7..0ebb2884932 100644
--- a/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py
+++ b/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py
@@ -7,91 +7,41 @@
@Date :5/12/24 7:40 AM
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
- ModelInfo, ModelTypeConst, ValidCode
-from setting.models_provider.impl.deepseek_model_provider.model.deepseek_chat_model import DeepSeekChatModel
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
+ ModelInfoManage
+from setting.models_provider.impl.deepseek_model_provider.credential.llm import DeepSeekLLMModelCredential
+from setting.models_provider.impl.deepseek_model_provider.model.llm import DeepSeekChatModel
from smartdoc.conf import PROJECT_DIR
-
-
-class DeepSeekLLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = DeepSeekModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
-
- for key in ['api_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = DeepSeekModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_key = forms.PasswordInputField('API Key', required=True)
-
+from django.utils.translation import gettext as _
deepseek_llm_model_credential = DeepSeekLLMModelCredential()
+deepseek_reasoner = ModelInfo('deepseek-reasoner', '', ModelTypeConst.LLM,
+ deepseek_llm_model_credential, DeepSeekChatModel
+ )
-model_dict = {
- 'deepseek-chat': ModelInfo('deepseek-chat', '擅长通用对话任务,支持 32K 上下文', ModelTypeConst.LLM,
- deepseek_llm_model_credential,
- ),
- 'deepseek-coder': ModelInfo('deepseek-coder', '擅长处理编程任务,支持 16K 上下文', ModelTypeConst.LLM,
- deepseek_llm_model_credential,
- ),
-}
+deepseek_chat = ModelInfo('deepseek-chat', _('Good at common conversational tasks, supports 32K contexts'),
+ ModelTypeConst.LLM,
+ deepseek_llm_model_credential, DeepSeekChatModel
+ )
+deepseek_coder = ModelInfo('deepseek-coder', _('Good at handling programming tasks, supports 16K contexts'),
+ ModelTypeConst.LLM,
+ deepseek_llm_model_credential,
+ DeepSeekChatModel)
-class DeepSeekModelProvider(IModelProvider):
+model_info_manage = ModelInfoManage.builder().append_model_info(deepseek_reasoner).append_model_info(deepseek_chat).append_model_info(
+ deepseek_coder).append_default_model_info(
+ deepseek_coder).build()
- def get_dialogue_number(self):
- return 3
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> DeepSeekChatModel:
- deepseek_chat_open_ai = DeepSeekChatModel(
- model=model_name,
- openai_api_base='https://api.deepseek.com',
- openai_api_key=model_credential.get('api_key')
- )
- return deepseek_chat_open_ai
+class DeepSeekModelProvider(IModelProvider):
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return deepseek_llm_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_deepseek_provider', name='DeepSeek', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'deepseek_model_provider', 'icon',
'deepseek_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/model/deepseek_chat_model.py b/apps/setting/models_provider/impl/deepseek_model_provider/model/deepseek_chat_model.py
deleted file mode 100644
index b7a54b302d9..00000000000
--- a/apps/setting/models_provider/impl/deepseek_model_provider/model/deepseek_chat_model.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
-"""
-@Project :MaxKB
-@File :deepseek_chat_model.py
-@Author :Brian Yang
-@Date :5/12/24 7:44 AM
-"""
-from typing import List
-
-from langchain_core.messages import BaseMessage, get_buffer_string
-from langchain_openai import ChatOpenAI
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class DeepSeekChatModel(ChatOpenAI):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- try:
- return super().get_num_tokens_from_messages(messages)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- try:
- return super().get_num_tokens(text)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py
new file mode 100644
index 00000000000..081d648a716
--- /dev/null
+++ b/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+"""
+@Project :MaxKB
+@File :llm.py
+@Author :Brian Yang
+@Date :5/12/24 7:44 AM
+"""
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class DeepSeekChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+
+ deepseek_chat_open_ai = DeepSeekChatModel(
+ model=model_name,
+ openai_api_base='https://api.deepseek.com',
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params
+ )
+ return deepseek_chat_open_ai
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..22724ec08e9
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py
@@ -0,0 +1,52 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 16:45
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class GeminiEmbeddingCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py
new file mode 100644
index 00000000000..87d667a61c8
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py
@@ -0,0 +1,71 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class GeminiImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class GeminiImageModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return GeminiImageModelParams()
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py
new file mode 100644
index 00000000000..d02b9fdc356
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 17:57
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class GeminiLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class GeminiLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.invoke([HumanMessage(content=gettext('Hello'))])
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return GeminiLLMModelParams()
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py
new file mode 100644
index 00000000000..0092d955e29
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py
@@ -0,0 +1,48 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class GeminiSTTModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py b/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py
index 5ddddf782af..0771b0c128d 100644
--- a/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py
+++ b/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py
@@ -7,93 +7,91 @@
@Date :5/13/24 7:47 AM
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
- ModelInfo, ModelTypeConst, ValidCode
-from setting.models_provider.impl.gemini_model_provider.model.gemini_chat_model import GeminiChatModel
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
+ ModelInfoManage
+from setting.models_provider.impl.gemini_model_provider.credential.embedding import GeminiEmbeddingCredential
+from setting.models_provider.impl.gemini_model_provider.credential.image import GeminiImageModelCredential
+from setting.models_provider.impl.gemini_model_provider.credential.llm import GeminiLLMModelCredential
+from setting.models_provider.impl.gemini_model_provider.credential.stt import GeminiSTTModelCredential
+from setting.models_provider.impl.gemini_model_provider.model.embedding import GeminiEmbeddingModel
+from setting.models_provider.impl.gemini_model_provider.model.image import GeminiImage
+from setting.models_provider.impl.gemini_model_provider.model.llm import GeminiChatModel
+from setting.models_provider.impl.gemini_model_provider.model.stt import GeminiSpeechToText
from smartdoc.conf import PROJECT_DIR
-
-
-class GeminiLLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = GeminiModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
-
- for key in ['api_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = GeminiModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_key = forms.PasswordInputField('API Key', required=True)
+from django.utils.translation import gettext as _
gemini_llm_model_credential = GeminiLLMModelCredential()
-
-model_dict = {
- 'gemini-1.0-pro': ModelInfo('gemini-1.0-pro', '最新的Gemini 1.0 Pro模型,随Google更新而更新',
- ModelTypeConst.LLM,
- gemini_llm_model_credential,
- ),
- 'gemini-1.0-pro-vision': ModelInfo('gemini-1.0-pro-vision', '最新的Gemini 1.0 Pro Vision模型,随Google更新而更新',
- ModelTypeConst.LLM,
- gemini_llm_model_credential,
- ),
-}
+gemini_image_model_credential = GeminiImageModelCredential()
+gemini_stt_model_credential = GeminiSTTModelCredential()
+gemini_embedding_model_credential = GeminiEmbeddingCredential()
+
+model_info_list = [
+ ModelInfo('gemini-1.0-pro', _('Latest Gemini 1.0 Pro model, updated with Google update'),
+ ModelTypeConst.LLM,
+ gemini_llm_model_credential,
+ GeminiChatModel),
+ ModelInfo('gemini-1.0-pro-vision', _('Latest Gemini 1.0 Pro Vision model, updated with Google update'),
+ ModelTypeConst.LLM,
+ gemini_llm_model_credential,
+ GeminiChatModel),
+]
+
+model_image_info_list = [
+ ModelInfo('gemini-1.5-flash', _('Latest Gemini 1.5 Flash model, updated with Google updates'),
+ ModelTypeConst.IMAGE,
+ gemini_image_model_credential,
+ GeminiImage),
+ ModelInfo('gemini-1.5-pro', _('Latest Gemini 1.5 Flash model, updated with Google updates'),
+ ModelTypeConst.IMAGE,
+ gemini_image_model_credential,
+ GeminiImage),
+]
+
+model_stt_info_list = [
+ ModelInfo('gemini-1.5-flash', _('Latest Gemini 1.5 Flash model, updated with Google updates'),
+ ModelTypeConst.STT,
+ gemini_stt_model_credential,
+ GeminiSpeechToText),
+ ModelInfo('gemini-1.5-pro', _('Latest Gemini 1.5 Flash model, updated with Google updates'),
+ ModelTypeConst.STT,
+ gemini_stt_model_credential,
+ GeminiSpeechToText),
+]
+
+model_embedding_info_list = [
+ ModelInfo('models/embedding-001', '',
+ ModelTypeConst.EMBEDDING,
+ gemini_embedding_model_credential,
+ GeminiEmbeddingModel),
+ ModelInfo('models/text-embedding-004', '',
+ ModelTypeConst.EMBEDDING,
+ gemini_embedding_model_credential,
+ GeminiEmbeddingModel),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_model_info_list(model_image_info_list)
+ .append_model_info_list(model_stt_info_list)
+ .append_model_info_list(model_embedding_info_list)
+ .append_default_model_info(model_info_list[0])
+ .append_default_model_info(model_image_info_list[0])
+ .append_default_model_info(model_stt_info_list[0])
+ .append_default_model_info(model_embedding_info_list[0])
+ .build()
+)
class GeminiModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 3
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object],
- **model_kwargs) -> GeminiChatModel:
- gemini_chat = GeminiChatModel(
- model=model_name,
- google_api_key=model_credential.get('api_key')
- )
- return gemini_chat
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return gemini_llm_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_gemini_provider', name='Gemini', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'gemini_model_provider', 'icon',
'gemini_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg b/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg
index 00c48a35944..3ff8bdaf4f4 100644
--- a/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg
+++ b/apps/setting/models_provider/impl/gemini_model_provider/icon/gemini_icon_svg
@@ -1,10 +1,2 @@
-
-
-
-
-
-
-
-
-
-
+
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/embedding.py b/apps/setting/models_provider/impl/gemini_model_provider/model/embedding.py
new file mode 100644
index 00000000000..5d82b07e99d
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/model/embedding.py
@@ -0,0 +1,22 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 17:44
+ @desc:
+"""
+from typing import Dict
+
+from langchain_google_genai import GoogleGenerativeAIEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class GeminiEmbeddingModel(MaxKBBaseModel, GoogleGenerativeAIEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return GeminiEmbeddingModel(
+ google_api_key=model_credential.get('api_key'),
+ model=model_name,
+ )
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/gemini_chat_model.py b/apps/setting/models_provider/impl/gemini_model_provider/model/gemini_chat_model.py
deleted file mode 100644
index 7a972d9d531..00000000000
--- a/apps/setting/models_provider/impl/gemini_model_provider/model/gemini_chat_model.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
-"""
-@Project :MaxKB
-@File :gemini_chat_model.py
-@Author :Brian Yang
-@Date :5/13/24 7:40 AM
-"""
-from typing import List
-
-from langchain_core.messages import BaseMessage, get_buffer_string
-from langchain_google_genai import ChatGoogleGenerativeAI
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class GeminiChatModel(ChatGoogleGenerativeAI):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- try:
- return super().get_num_tokens_from_messages(messages)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- try:
- return super().get_num_tokens(text)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/image.py b/apps/setting/models_provider/impl/gemini_model_provider/model/image.py
new file mode 100644
index 00000000000..2e48a81b23b
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/model/image.py
@@ -0,0 +1,24 @@
+from typing import Dict
+
+from langchain_google_genai import ChatGoogleGenerativeAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class GeminiImage(MaxKBBaseModel, ChatGoogleGenerativeAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return GeminiImage(
+ model=model_name,
+ google_api_key=model_credential.get('api_key'),
+ streaming=True,
+ **optional_params,
+ )
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py
new file mode 100644
index 00000000000..af23d0341a4
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+"""
+@Project :MaxKB
+@File :llm.py
+@Author :Brian Yang
+@Date :5/13/24 7:40 AM
+"""
+from typing import List, Dict, Optional, Sequence, Union, Any, Iterator, cast
+
+from google.ai.generativelanguage_v1 import GenerateContentResponse
+from google.ai.generativelanguage_v1beta.types import (
+ Tool as GoogleTool,
+)
+from langchain_core.callbacks import CallbackManagerForLLMRun
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_core.outputs import ChatGenerationChunk
+from langchain_google_genai import ChatGoogleGenerativeAI
+from langchain_google_genai._function_utils import _ToolConfigDict, _ToolDict
+from langchain_google_genai.chat_models import _chat_with_retry, _response_to_result, \
+ _FunctionDeclarationType
+from langchain_google_genai._common import (
+ SafetySettingDict,
+)
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class GeminiChatModel(MaxKBBaseModel, ChatGoogleGenerativeAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+
+ gemini_chat = GeminiChatModel(
+ model=model_name,
+ google_api_key=model_credential.get('api_key'),
+ **optional_params
+ )
+ return gemini_chat
+
+ def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
+ return self.__dict__.get('_last_generation_info')
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ try:
+ return self.get_last_generation_info().get('input_tokens', 0)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ try:
+ return self.get_last_generation_info().get('output_tokens', 0)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+
+ def _stream(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ *,
+ tools: Optional[Sequence[Union[_ToolDict, GoogleTool]]] = None,
+ functions: Optional[Sequence[_FunctionDeclarationType]] = None,
+ safety_settings: Optional[SafetySettingDict] = None,
+ tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
+ generation_config: Optional[Dict[str, Any]] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ request = self._prepare_request(
+ messages,
+ stop=stop,
+ tools=tools,
+ functions=functions,
+ safety_settings=safety_settings,
+ tool_config=tool_config,
+ generation_config=generation_config,
+ )
+ response: GenerateContentResponse = _chat_with_retry(
+ request=request,
+ generation_method=self.client.stream_generate_content,
+ **kwargs,
+ metadata=self.default_metadata,
+ )
+ for chunk in response:
+ _chat_result = _response_to_result(chunk, stream=True)
+ gen = cast(ChatGenerationChunk, _chat_result.generations[0])
+ if gen.message:
+ token_usage = gen.message.usage_metadata
+ self.__dict__.setdefault('_last_generation_info', {}).update(token_usage)
+ if run_manager:
+ run_manager.on_llm_new_token(gen.text)
+ yield gen
diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py b/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py
new file mode 100644
index 00000000000..5d559ac0065
--- /dev/null
+++ b/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py
@@ -0,0 +1,57 @@
+from typing import Dict
+
+from django.utils.translation import gettext as _
+from langchain_core.messages import HumanMessage
+from langchain_google_genai import ChatGoogleGenerativeAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class GeminiSpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ api_key: str
+ model: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return GeminiSpeechToText(
+ model=model_name,
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = ChatGoogleGenerativeAI(
+ model=self.model,
+ google_api_key=self.api_key
+ )
+ response_list = client.invoke(_('Hello'))
+ # print(response_list)
+
+ def speech_to_text(self, audio_file):
+ client = ChatGoogleGenerativeAI(
+ model=self.model,
+ google_api_key=self.api_key
+ )
+ audio_data = audio_file.read()
+ msg = HumanMessage(content=[
+ {'type': 'text', 'text': _('convert audio to text')},
+ {"type": "media", 'mime_type': 'audio/mp3', "data": audio_data}
+ ])
+ res = client.invoke([msg])
+ return res.content
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py
new file mode 100644
index 00000000000..9fbdc21f961
--- /dev/null
+++ b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py
@@ -0,0 +1,77 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:06
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class KimiLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.3,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class KimiLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return KimiLLMModelParams()
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg b/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg
index 80bfcabffb3..8bd2a78fe4f 100644
--- a/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg
+++ b/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg
@@ -1,9 +1 @@
-
-
-
-
-
-
-
-
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py b/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py
index 6394e5902b2..1347df46c64 100644
--- a/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py
+++ b/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py
@@ -7,103 +7,36 @@
@desc:
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-from langchain.chat_models.base import BaseChatModel
-
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
- ModelInfo, \
- ModelTypeConst, ValidCode
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.kimi_model_provider.credential.llm import KimiLLMModelCredential
+from setting.models_provider.impl.kimi_model_provider.model.llm import KimiChatModel
from smartdoc.conf import PROJECT_DIR
-from setting.models_provider.impl.kimi_model_provider.model.kimi_chat_model import KimiChatModel
-
-
-
-
-class KimiLLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = KimiModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
-
- for key in ['api_base', 'api_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- # llm_kimi = Moonshot(
- # model_name=model_name,
- # base_url=model_credential['api_base'],
- # moonshot_api_key=model_credential['api_key']
- # )
-
- model = KimiModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_base = forms.TextInputField('API 域名', required=True)
- api_key = forms.PasswordInputField('API Key', required=True)
-
kimi_llm_model_credential = KimiLLMModelCredential()
-model_dict = {
- 'moonshot-v1-8k': ModelInfo('moonshot-v1-8k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
- ),
- 'moonshot-v1-32k': ModelInfo('moonshot-v1-32k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
- ),
- 'moonshot-v1-128k': ModelInfo('moonshot-v1-128k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
- )
-}
+moonshot_v1_8k = ModelInfo('moonshot-v1-8k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
+ KimiChatModel)
+moonshot_v1_32k = ModelInfo('moonshot-v1-32k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
+ KimiChatModel)
+moonshot_v1_128k = ModelInfo('moonshot-v1-128k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
+ KimiChatModel)
+
+model_info_manage = ModelInfoManage.builder().append_model_info(moonshot_v1_8k).append_model_info(
+ moonshot_v1_32k).append_default_model_info(moonshot_v1_128k).append_default_model_info(moonshot_v1_8k).build()
class KimiModelProvider(IModelProvider):
+ def get_model_info_manage(self):
+ return model_info_manage
+
def get_dialogue_number(self):
return 3
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel:
- kimi_chat_open_ai = KimiChatModel(
- openai_api_base=model_credential['api_base'],
- openai_api_key=model_credential['api_key'],
- model_name=model_name,
- )
- return kimi_chat_open_ai
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return kimi_llm_model_credential
-
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_kimi_provider', name='Kimi', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'kimi_model_provider', 'icon',
'kimi_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py b/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py
deleted file mode 100644
index deee11a020d..00000000000
--- a/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: kimi_chat_model.py
- @date:2023/11/10 17:45
- @desc:
-"""
-from typing import List
-
-from langchain_community.chat_models import ChatOpenAI
-from langchain_core.messages import BaseMessage, get_buffer_string
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class KimiChatModel(ChatOpenAI):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py
new file mode 100644
index 00000000000..c0ce2ec029a
--- /dev/null
+++ b/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py
@@ -0,0 +1,30 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2023/11/10 17:45
+ @desc:
+"""
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class KimiChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ kimi_chat_open_ai = KimiChatModel(
+ openai_api_base=model_credential['api_base'],
+ openai_api_key=model_credential['api_key'],
+ model_name=model_name,
+ extra_body=optional_params,
+ )
+ return kimi_chat_open_ai
diff --git a/apps/setting/models_provider/impl/local_model_provider/__init__.py b/apps/setting/models_provider/impl/local_model_provider/__init__.py
new file mode 100644
index 00000000000..90a8d72c352
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: __init__.py
+ @date:2024/7/10 17:48
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..bbb431a6b3f
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/11 11:06
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding
+
+
+class LocalEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ if not model_type == 'EMBEDDING':
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['cache_folder']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(gettext('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return model
+
+ cache_folder = forms.TextInputField(_('Model catalog'), required=True)
diff --git a/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py
new file mode 100644
index 00000000000..4c1715a72c9
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py
@@ -0,0 +1,54 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py
+ @date:2024/9/3 14:33
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from langchain_core.documents import Document
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.local_model_provider.model.reranker import LocalBaseReranker
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class LocalRerankerCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ if not model_type == 'RERANKER':
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['cache_dir']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model: LocalBaseReranker = provider.get_model(model_type, model_name, model_credential)
+ model.compress_documents([Document(page_content=gettext('Hello'))], gettext('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return model
+
+ cache_dir = forms.TextInputField(_('Model catalog'), required=True)
diff --git a/apps/setting/models_provider/impl/local_model_provider/icon/local_icon_svg b/apps/setting/models_provider/impl/local_model_provider/icon/local_icon_svg
new file mode 100644
index 00000000000..62930faabd5
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/icon/local_icon_svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py b/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py
new file mode 100644
index 00000000000..b104e789c93
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: zhipu_model_provider.py
+ @date:2024/04/19 13:5
+ @desc:
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
+ ModelInfoManage
+from setting.models_provider.impl.local_model_provider.credential.embedding import LocalEmbeddingCredential
+from setting.models_provider.impl.local_model_provider.credential.reranker import LocalRerankerCredential
+from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding
+from setting.models_provider.impl.local_model_provider.model.reranker import LocalReranker
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+embedding_text2vec_base_chinese = ModelInfo('shibing624/text2vec-base-chinese', '', ModelTypeConst.EMBEDDING,
+ LocalEmbeddingCredential(), LocalEmbedding)
+bge_reranker_v2_m3 = ModelInfo('BAAI/bge-reranker-v2-m3', '', ModelTypeConst.RERANKER,
+ LocalRerankerCredential(), LocalReranker)
+
+model_info_manage = (ModelInfoManage.builder().append_model_info(embedding_text2vec_base_chinese)
+ .append_default_model_info(embedding_text2vec_base_chinese)
+ .append_model_info(bge_reranker_v2_m3)
+ .append_default_model_info(bge_reranker_v2_m3)
+ .build())
+
+
+class LocalModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_local_provider', name=_('local model'), icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'local_model_provider', 'icon',
+ 'local_icon_svg')))
diff --git a/apps/setting/models_provider/impl/local_model_provider/model/embedding.py b/apps/setting/models_provider/impl/local_model_provider/model/embedding.py
new file mode 100644
index 00000000000..4d6c65b9f68
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/model/embedding.py
@@ -0,0 +1,62 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/11 14:06
+ @desc:
+"""
+from typing import Dict, List
+
+import requests
+from langchain_core.embeddings import Embeddings
+from pydantic import BaseModel
+from langchain_huggingface import HuggingFaceEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from smartdoc.const import CONFIG
+
+
+class WebLocalEmbedding(MaxKBBaseModel, BaseModel, Embeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ pass
+
+ model_id: str = None
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.model_id = kwargs.get('model_id', None)
+
+ def embed_query(self, text: str) -> List[float]:
+ bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
+ res = requests.post(f'{CONFIG.get("LOCAL_MODEL_PROTOCOL")}://{bind}/api/model/{self.model_id}/embed_query',
+ {'text': text})
+ result = res.json()
+ if result.get('code', 500) == 200:
+ return result.get('data')
+ raise Exception(result.get('message'))
+
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
+ res = requests.post(f'{CONFIG.get("LOCAL_MODEL_PROTOCOL")}://{bind}/api/model/{self.model_id}/embed_documents',
+ {'texts': texts})
+ result = res.json()
+ if result.get('code', 500) == 200:
+ return result.get('data')
+ raise Exception(result.get('message'))
+
+
+class LocalEmbedding(MaxKBBaseModel, HuggingFaceEmbeddings):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ if model_kwargs.get('use_local', True):
+ return LocalEmbedding(model_name=model_name, cache_folder=model_credential.get('cache_folder'),
+ model_kwargs={'device': model_credential.get('device')},
+ encode_kwargs={'normalize_embeddings': True}
+ )
+ return WebLocalEmbedding(model_name=model_name, cache_folder=model_credential.get('cache_folder'),
+ model_kwargs={'device': model_credential.get('device')},
+ encode_kwargs={'normalize_embeddings': True},
+ **model_kwargs)
diff --git a/apps/setting/models_provider/impl/local_model_provider/model/reranker.py b/apps/setting/models_provider/impl/local_model_provider/model/reranker.py
new file mode 100644
index 00000000000..b941625b0e6
--- /dev/null
+++ b/apps/setting/models_provider/impl/local_model_provider/model/reranker.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py.py
+ @date:2024/9/2 16:42
+ @desc:
+"""
+from typing import Sequence, Optional, Dict, Any, ClassVar
+
+import requests
+import torch
+from langchain_core.callbacks import Callbacks
+from langchain_core.documents import BaseDocumentCompressor, Document
+from transformers import AutoModelForSequenceClassification, AutoTokenizer
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from smartdoc.const import CONFIG
+
+
+class LocalReranker(MaxKBBaseModel):
+ def __init__(self, model_name, top_n=3, cache_dir=None):
+ super().__init__()
+ self.model_name = model_name
+ self.cache_dir = cache_dir
+ self.top_n = top_n
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ if model_kwargs.get('use_local', True):
+ return LocalBaseReranker(model_name=model_name, cache_dir=model_credential.get('cache_dir'),
+ model_kwargs={'device': model_credential.get('device', 'cpu')}
+
+ )
+ return WebLocalBaseReranker(model_name=model_name, cache_dir=model_credential.get('cache_dir'),
+ model_kwargs={'device': model_credential.get('device')},
+ **model_kwargs)
+
+
+class WebLocalBaseReranker(MaxKBBaseModel, BaseDocumentCompressor):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ pass
+
+ model_id: str = None
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.model_id = kwargs.get('model_id', None)
+
+ def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \
+ Sequence[Document]:
+ if documents is None or len(documents) == 0:
+ return []
+ bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
+ res = requests.post(
+ f'{CONFIG.get("LOCAL_MODEL_PROTOCOL")}://{bind}/api/model/{self.model_id}/compress_documents',
+ json={'documents': [{'page_content': document.page_content, 'metadata': document.metadata} for document in
+ documents], 'query': query}, headers={'Content-Type': 'application/json'})
+ result = res.json()
+ if result.get('code', 500) == 200:
+ return [Document(page_content=document.get('page_content'), metadata=document.get('metadata')) for document
+ in result.get('data')]
+ raise Exception(result.get('message'))
+
+
+class LocalBaseReranker(MaxKBBaseModel, BaseDocumentCompressor):
+ client: Any = None
+ tokenizer: Any = None
+ model: Optional[str] = None
+ cache_dir: Optional[str] = None
+ model_kwargs: Any = {}
+
+ def __init__(self, model_name, cache_dir=None, **model_kwargs):
+ super().__init__()
+ self.model = model_name
+ self.cache_dir = cache_dir
+ self.model_kwargs = model_kwargs
+ self.client = AutoModelForSequenceClassification.from_pretrained(self.model, cache_dir=self.cache_dir)
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model, cache_dir=self.cache_dir)
+ self.client = self.client.to(self.model_kwargs.get('device', 'cpu'))
+ self.client.eval()
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return LocalBaseReranker(model_name, cache_dir=model_credential.get('cache_dir'), **model_kwargs)
+
+ def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \
+ Sequence[Document]:
+ if documents is None or len(documents) == 0:
+ return []
+ with torch.no_grad():
+ inputs = self.tokenizer([[query, document.page_content] for document in documents], padding=True,
+ truncation=True, return_tensors='pt', max_length=512)
+ scores = [torch.sigmoid(s).float().item() for s in
+ self.client(**inputs, return_dict=True).logits.view(-1, ).float()]
+ result = [Document(page_content=documents[index].page_content, metadata={'relevance_score': scores[index]})
+ for index
+ in range(len(documents))]
+ result.sort(key=lambda row: row.metadata.get('relevance_score'), reverse=True)
+ return result
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..c422dba1c6e
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 15:10
+ @desc:
+"""
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding
+
+
+class OllamaEmbeddingModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'))
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid'))
+ exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if
+ model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name]
+ if len(exist) == 0:
+ raise AppApiException(ValidCode.model_not_fount,
+ _('The model does not exist, please download the model first'))
+ model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return model_info
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['model']:
+ if key not in model_info:
+ raise AppApiException(500, _('{key} is required').format(key=key))
+ return self
+
+ api_base = forms.TextInputField('API URL', required=True)
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py
new file mode 100644
index 00000000000..2b50067b923
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py
@@ -0,0 +1,56 @@
+# coding=utf-8
+from typing import Dict
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class OllamaImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class OllamaImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'))
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid'))
+ exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if
+ model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name]
+ if len(exist) == 0:
+ raise AppApiException(ValidCode.model_not_fount,
+ gettext('The model does not exist, please download the model first'))
+
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return OllamaImageModelParams()
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py
new file mode 100644
index 00000000000..add06621937
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:19
+ @desc:
+"""
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class OllamaLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.3,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ num_predict = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class OllamaLLMModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'))
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid'))
+ exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if
+ model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name]
+ if len(exist) == 0:
+ raise AppApiException(ValidCode.model_not_fount,
+ gettext('The model does not exist, please download the model first'))
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))}
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['api_key', 'model']:
+ if key not in model_info:
+ raise AppApiException(500, gettext('{key} is required').format(key=key))
+ self.api_key = model_info.get('api_key')
+ return self
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return OllamaLLMModelParams()
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py
new file mode 100644
index 00000000000..c2825aacb42
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/reranker.py
@@ -0,0 +1,66 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 15:10
+ @desc:
+"""
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.ollama_model_provider.model.reranker import OllamaReranker
+from langchain_core.documents import BaseDocumentCompressor, Document
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class OllamaReRankModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ if not model_type == 'RERANKER':
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'))
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid'))
+ exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if
+ model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name]
+ if len(exist) == 0:
+ raise AppApiException(ValidCode.model_not_fount,
+ _('The model does not exist, please download the model first'))
+
+ try:
+ model: OllamaReranker = provider.get_model(model_type, model_name, model_credential)
+ model.compress_documents([Document(page_content=gettext('Hello'))], gettext('Hello'))
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return model_info
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['model']:
+ if key not in model_info:
+ raise AppApiException(500, _('{key} is required').format(key=key))
+ return self
+
+ api_base = forms.TextInputField('API URL', required=True)
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/embedding.py b/apps/setting/models_provider/impl/ollama_model_provider/model/embedding.py
new file mode 100644
index 00000000000..d1a68ebc7ae
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/model/embedding.py
@@ -0,0 +1,48 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 15:02
+ @desc:
+"""
+from typing import Dict, List
+
+from langchain_community.embeddings import OllamaEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class OllamaEmbedding(MaxKBBaseModel, OllamaEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return OllamaEmbedding(
+ model=model_name,
+ base_url=model_credential.get('api_base'),
+ )
+
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ """Embed documents using an Ollama deployed embedding model.
+
+ Args:
+ texts: The list of texts to embed.
+
+ Returns:
+ List of embeddings, one for each text.
+ """
+ instruction_pairs = [f"{text}" for text in texts]
+ embeddings = self._embed(instruction_pairs)
+ return embeddings
+
+ def embed_query(self, text: str) -> List[float]:
+ """Embed a query using a Ollama deployed embedding model.
+
+ Args:
+ text: The text to embed.
+
+ Returns:
+ Embeddings for the text.
+ """
+ instruction_pair = f"{text}"
+ embedding = self._embed([instruction_pair])[0]
+ return embedding
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py
new file mode 100644
index 00000000000..215ce0130d7
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py
@@ -0,0 +1,32 @@
+from typing import Dict
+from urllib.parse import urlparse, ParseResult
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def get_base_url(url: str):
+ parse = urlparse(url)
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
+
+
+class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ api_base = model_credential.get('api_base', '')
+ base_url = get_base_url(api_base)
+ base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1')
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return OllamaImage(
+ model_name=model_name,
+ openai_api_base=base_url,
+ openai_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py
new file mode 100644
index 00000000000..6cd291ff3cc
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py
@@ -0,0 +1,48 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/3/6 11:48
+ @desc:
+"""
+from typing import List, Dict
+from urllib.parse import urlparse, ParseResult
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_ollama.chat_models import ChatOllama
+
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+def get_base_url(url: str):
+ parse = urlparse(url)
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
+
+
+class OllamaChatModel(MaxKBBaseModel, ChatOllama):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ api_base = model_credential.get('api_base', '')
+ base_url = get_base_url(api_base)
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+
+ return OllamaChatModel(model=model_name, base_url=base_url,
+ stream=True, **optional_params)
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/ollama_chat_model.py b/apps/setting/models_provider/impl/ollama_model_provider/model/ollama_chat_model.py
deleted file mode 100644
index 86c5219d4a0..00000000000
--- a/apps/setting/models_provider/impl/ollama_model_provider/model/ollama_chat_model.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: ollama_chat_model.py
- @date:2024/3/6 11:48
- @desc:
-"""
-from typing import List
-
-from langchain_community.chat_models import ChatOpenAI
-from langchain_core.messages import BaseMessage, get_buffer_string
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class OllamaChatModel(ChatOpenAI):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py b/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py
new file mode 100644
index 00000000000..9704537a54a
--- /dev/null
+++ b/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py
@@ -0,0 +1,49 @@
+from typing import Sequence, Optional, Any, Dict
+
+from langchain_community.embeddings import OllamaEmbeddings
+from langchain_core.callbacks import Callbacks
+from langchain_core.documents import Document
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from sklearn.metrics.pairwise import cosine_similarity
+from pydantic import BaseModel, Field
+
+
+class OllamaReranker(MaxKBBaseModel, OllamaEmbeddings, BaseModel):
+ top_n: Optional[int] = Field(3, description="Number of top documents to return")
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return OllamaReranker(
+ model=model_name,
+ base_url=model_credential.get('api_base'),
+ **optional_params
+ )
+
+ def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \
+ Sequence[Document]:
+ """Rank documents based on their similarity to the query.
+
+ Args:
+ query: The query text.
+ documents: The list of document texts to rank.
+
+ Returns:
+ List of documents sorted by relevance to the query.
+ """
+ # 获取查询和文档的嵌入
+ query_embedding = self.embed_query(query)
+ documents = [doc.page_content for doc in documents]
+ document_embeddings = self.embed_documents(documents)
+ # 计算相似度
+ similarities = cosine_similarity([query_embedding], document_embeddings)[0]
+ ranked_docs = [(doc,_) for _, doc in sorted(zip(similarities, documents), reverse=True)][:self.top_n]
+ return [
+ Document(
+ page_content=doc, # 第一个值是文档内容
+ metadata={'relevance_score': score} # 第二个值是相似度分数
+ )
+ for doc, score in ranked_docs
+ ]
+
+
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py
index 73239921e45..f9de848e2fe 100644
--- a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py
+++ b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py
@@ -12,120 +12,204 @@
from urllib.parse import urlparse, ParseResult
import requests
-from langchain.chat_models.base import BaseChatModel
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
- BaseModelCredential, DownModelChunk, DownModelChunkStatus, ValidCode
-from setting.models_provider.impl.ollama_model_provider.model.ollama_chat_model import OllamaChatModel
+ BaseModelCredential, DownModelChunk, DownModelChunkStatus, ValidCode, ModelInfoManage
+from setting.models_provider.impl.ollama_model_provider.credential.embedding import OllamaEmbeddingModelCredential
+from setting.models_provider.impl.ollama_model_provider.credential.image import OllamaImageModelCredential
+from setting.models_provider.impl.ollama_model_provider.credential.llm import OllamaLLMModelCredential
+from setting.models_provider.impl.ollama_model_provider.credential.reranker import OllamaReRankModelCredential
+from setting.models_provider.impl.ollama_model_provider.model.embedding import OllamaEmbedding
+from setting.models_provider.impl.ollama_model_provider.model.image import OllamaImage
+from setting.models_provider.impl.ollama_model_provider.model.llm import OllamaChatModel
+from setting.models_provider.impl.ollama_model_provider.model.reranker import OllamaReranker
from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
""
-
-class OllamaLLMModelCredential(BaseForm, BaseModelCredential):
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = OllamaModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
- try:
- model_list = OllamaModelProvider.get_base_model_list(model_credential.get('api_base'))
- except Exception as e:
- raise AppApiException(ValidCode.valid_error.value, "API 域名无效")
- exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if
- model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name]
- if len(exist) == 0:
- raise AppApiException(ValidCode.model_not_fount, "模型不存在,请先下载模型")
- return True
-
- def encryption_dict(self, model_info: Dict[str, object]):
- return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))}
-
- def build_model(self, model_info: Dict[str, object]):
- for key in ['api_key', 'model']:
- if key not in model_info:
- raise AppApiException(500, f'{key} 字段为必填字段')
- self.api_key = model_info.get('api_key')
- return self
-
- api_base = forms.TextInputField('API 域名', required=True)
- api_key = forms.PasswordInputField('API Key', required=True)
-
-
ollama_llm_model_credential = OllamaLLMModelCredential()
+model_info_list = [
+ ModelInfo(
+ 'deepseek-r1:1.5b',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'deepseek-r1:7b',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'deepseek-r1:8b',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'deepseek-r1:14b',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'deepseek-r1:32b',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
-model_dict = {
- 'llama2': ModelInfo(
+ ModelInfo(
'llama2',
- 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'llama2:13b': ModelInfo(
+ _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 7B pretrained models. Links to other models can be found in the index at the bottom.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'llama2:13b',
- 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'llama2:70b': ModelInfo(
+ _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 13B pretrained models. Links to other models can be found in the index at the bottom.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'llama2:70b',
- 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'llama2-chinese:13b': ModelInfo(
+ _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 70B pretrained models. Links to other models can be found in the index at the bottom.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'llama2-chinese:13b',
- '由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'llama3:8b': ModelInfo(
+ _('Since the Chinese alignment of Llama2 itself is weak, we use the Chinese instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so that it has strong Chinese conversation capabilities.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'llama3:8b',
- 'Meta Llama 3:迄今为止最有能力的公开产品LLM。8亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'llama3:70b': ModelInfo(
+ _('Meta Llama 3: The most capable public product LLM to date. 8 billion parameters.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'llama3:70b',
- 'Meta Llama 3:迄今为止最有能力的公开产品LLM。70亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:0.5b': ModelInfo(
+ _('Meta Llama 3: The most capable public product LLM to date. 70 billion parameters.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:0.5b',
- 'qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。0.5亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:1.8b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 0.5b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 500 million parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:1.8b',
- 'qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。1.8亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:4b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 1.8b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 1.8 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:4b',
- 'qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。4亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:7b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 4b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 4 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+
+ ModelInfo(
'qwen:7b',
- 'qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语1言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。7亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:14b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 7b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 7 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:14b',
- 'qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。14亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:32b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 14b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 14 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:32b',
- 'qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。32亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:72b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 32b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 32 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:72b',
- 'qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。72亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'qwen:110b': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 72b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 72 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
'qwen:110b',
- 'qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。110亿参数。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
- 'phi3': ModelInfo(
+ _("Compared with previous versions, qwen 1.5 110b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 110 billion parameters."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2:72b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2:57b-a14b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2:7b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:72b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:32b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:14b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:7b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:1.5b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:0.5b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'qwen2.5:3b-instruct',
+ '',
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+ ModelInfo(
+ 'phi3',
+ _("Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open model."),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
+]
+ollama_embedding_model_credential = OllamaEmbeddingModelCredential()
+ollama_image_model_credential = OllamaImageModelCredential()
+ollama_reranker_model_credential = OllamaReRankModelCredential()
+embedding_model_info = [
+ ModelInfo(
+ 'nomic-embed-text',
+ _('A high-performance open embedding model with a large token context window.'),
+ ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding),
+]
+reranker_model_info = [
+ ModelInfo(
+ 'linux6200/bge-reranker-v2-m3',
+ '',
+ ModelTypeConst.RERANKER, ollama_reranker_model_credential, OllamaReranker),
+]
+
+image_model_info = [
+ ModelInfo(
+ 'llava:7b',
+ '',
+ ModelTypeConst.IMAGE, ollama_image_model_credential, OllamaImage),
+ ModelInfo(
+ 'llava:13b',
+ '',
+ ModelTypeConst.IMAGE, ollama_image_model_credential, OllamaImage),
+ ModelInfo(
+ 'llava:34b',
+ '',
+ ModelTypeConst.IMAGE, ollama_image_model_credential, OllamaImage),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_model_info_list(embedding_model_info)
+ .append_default_model_info(ModelInfo(
'phi3',
- 'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。',
- ModelTypeConst.LLM, ollama_llm_model_credential),
-}
+ _('Phi-3 Mini is Microsoft\'s 3.8B parameter, lightweight, state-of-the-art open model.'),
+ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel))
+ .append_default_model_info(ModelInfo(
+ 'nomic-embed-text',
+ _('A high-performance open embedding model with a large token context window.'),
+ ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding), )
+ .append_model_info_list(image_model_info)
+ .append_default_model_info(image_model_info[0])
+ .append_model_info_list(reranker_model_info)
+ .append_default_model_info(reranker_model_info[0])
+ .build()
+)
def get_base_url(url: str):
parse = urlparse(url)
- return ParseResult(scheme=parse.scheme, netloc=parse.netloc, path='', params='',
- query='',
- fragment='').geturl()
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
def convert_to_down_model_chunk(row_str: str, chunk_index: int):
@@ -162,42 +246,20 @@ def convert(response_stream) -> Iterator[DownModelChunk]:
temp = ""
if len(temp) > 0:
- print(temp)
rows = [t for t in temp.split("\n") if len(t) > 0]
for row in rows:
yield convert_to_down_model_chunk(row, index)
class OllamaModelProvider(IModelProvider):
+ def get_model_info_manage(self):
+ return model_info_manage
+
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_ollama_provider', name='Ollama', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'ollama_model_provider', 'icon',
'ollama_icon_svg')))
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
-
- def get_model_list(self, model_type):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- # 如果使用模型不在配置中,则使用默认认证
- return ollama_llm_model_credential
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel:
- api_base = model_credential.get('api_base')
- base_url = get_base_url(api_base)
- return OllamaChatModel(model=model_name, openai_api_base=(base_url + '/v1'),
- openai_api_key=model_credential.get('api_key'))
-
- def get_dialogue_number(self):
- return 2
-
@staticmethod
def get_base_model_list(api_base):
base_url = get_base_url(api_base)
@@ -206,7 +268,7 @@ def get_base_model_list(api_base):
return r.json()
def down_model(self, model_type: str, model_name, model_credential: Dict[str, object]) -> Iterator[DownModelChunk]:
- api_base = model_credential.get('api_base')
+ api_base = model_credential.get('api_base', '')
base_url = get_base_url(api_base)
r = requests.request(
method="POST",
diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..31f18451864
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 16:45
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class OpenAIEmbeddingCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/image.py b/apps/setting/models_provider/impl/openai_model_provider/credential/image.py
new file mode 100644
index 00000000000..7cd7197f721
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/credential/image.py
@@ -0,0 +1,74 @@
+# coding=utf-8
+import base64
+import os
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class OpenAIImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class OpenAIImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return OpenAIImageModelParams()
diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py
new file mode 100644
index 00000000000..eb862264899
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py
@@ -0,0 +1,80 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:32
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+from openai import BadRequestError
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class OpenAILLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException) or isinstance(e, BadRequestError):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return OpenAILLMModelParams()
diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py b/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py
new file mode 100644
index 00000000000..e198238b123
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class OpenAISTTModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py b/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py
new file mode 100644
index 00000000000..cd2e342cd22
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py
@@ -0,0 +1,90 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class OpenAITTIModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'),
+ _('The image generation endpoint allows you to create raw images based on text prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 or 1792x1024 pixels.')),
+ required=True,
+ default_value='1024x1024',
+ option_list=[
+ {'value': '1024x1024', 'label': '1024x1024'},
+ {'value': '1024x1792', 'label': '1024x1792'},
+ {'value': '1792x1024', 'label': '1792x1024'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ quality = forms.SingleSelect(
+ TooltipLabel(_('Picture quality'), _('''
+By default, images are produced in standard quality, but with DALL·E 3 you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest.
+ ''')),
+ required=True,
+ default_value='standard',
+ option_list=[
+ {'value': 'standard', 'label': 'standard'},
+ {'value': 'hd', 'label': 'hd'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'),
+ _('You can use DALL·E 3 to request 1 image at a time (requesting more images by issuing parallel requests), or use DALL·E 2 with the n parameter to request up to 10 images at a time.')),
+ required=True, default_value=1,
+ _min=1,
+ _max=10,
+ _step=1,
+ precision=0)
+
+
+class OpenAITextToImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return OpenAITTIModelParams()
diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py b/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py
new file mode 100644
index 00000000000..57059197501
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class OpenAITTSModelGeneralParams(BaseForm):
+ # alloy, echo, fable, onyx, nova, shimmer
+ voice = forms.SingleSelect(
+ TooltipLabel('Voice',
+ _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')),
+ required=True, default_value='alloy',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': 'alloy', 'value': 'alloy'},
+ {'text': 'echo', 'value': 'echo'},
+ {'text': 'fable', 'value': 'fable'},
+ {'text': 'onyx', 'value': 'onyx'},
+ {'text': 'nova', 'value': 'nova'},
+ {'text': 'shimmer', 'value': 'shimmer'},
+ ])
+
+
+class OpenAITTSModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return OpenAITTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/embedding.py b/apps/setting/models_provider/impl/openai_model_provider/model/embedding.py
new file mode 100644
index 00000000000..f95e78188f0
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/embedding.py
@@ -0,0 +1,39 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 17:44
+ @desc:
+"""
+from typing import Dict, List
+
+import openai
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class OpenAIEmbeddingModel(MaxKBBaseModel):
+ model_name: str
+
+ def __init__(self, api_key, base_url, model_name: str):
+ self.client = openai.OpenAI(api_key=api_key, base_url=base_url).embeddings
+ self.model_name = model_name
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return OpenAIEmbeddingModel(
+ api_key=model_credential.get('api_key'),
+ model_name=model_name,
+ base_url=model_credential.get('api_base'),
+ )
+
+ def embed_query(self, text: str):
+ res = self.embed_documents([text])
+ return res[0]
+
+ def embed_documents(
+ self, texts: List[str], chunk_size: int | None = None
+ ) -> List[List[float]]:
+ res = self.client.create(input=texts, model=self.model_name, encoding_format="float")
+ return [e.embedding for e in res.data]
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/image.py b/apps/setting/models_provider/impl/openai_model_provider/model/image.py
new file mode 100644
index 00000000000..7ac0906a786
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/image.py
@@ -0,0 +1,20 @@
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return OpenAIImage(
+ model_name=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/llm.py b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py
new file mode 100644
index 00000000000..1893852100b
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py
@@ -0,0 +1,57 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/18 15:28
+ @desc:
+"""
+from typing import List, Dict
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ streaming = model_kwargs.get('streaming', True)
+ if 'o1' in model_name:
+ streaming = False
+ azure_chat_open_ai = OpenAIChatModel(
+ model=model_name,
+ base_url=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ extra_body=optional_params,
+ streaming=streaming,
+ custom_get_token_ids=custom_get_token_ids
+ )
+ return azure_chat_open_ai
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ try:
+ return super().get_num_tokens_from_messages(messages)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ try:
+ return super().get_num_tokens(text)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py b/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py
deleted file mode 100644
index 7271fe8adf9..00000000000
--- a/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: openai_chat_model.py
- @date:2024/4/18 15:28
- @desc:
-"""
-from typing import List
-
-from langchain_core.messages import BaseMessage, get_buffer_string
-from langchain_openai import ChatOpenAI
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class OpenAIChatModel(ChatOpenAI):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- try:
- return super().get_num_tokens_from_messages(messages)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- try:
- return super().get_num_tokens(text)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/stt.py b/apps/setting/models_provider/impl/openai_model_provider/model/stt.py
new file mode 100644
index 00000000000..0b5f9a4b932
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/stt.py
@@ -0,0 +1,59 @@
+import asyncio
+import io
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class OpenAISpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ api_base: str
+ api_key: str
+ model: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return OpenAISpeechToText(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def speech_to_text(self, audio_file):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ audio_data = audio_file.read()
+ buffer = io.BytesIO(audio_data)
+ buffer.name = "file.mp3" # this is the important line
+ res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer)
+ return res.text
+
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/tti.py b/apps/setting/models_provider/impl/openai_model_provider/model/tti.py
new file mode 100644
index 00000000000..942afcf9f0d
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/tti.py
@@ -0,0 +1,58 @@
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class OpenAITextToImage(MaxKBBaseModel, BaseTextToImage):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return OpenAITextToImage(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ response_list = chat.models.with_raw_response.list()
+
+ # self.generate_image('生成一个小猫图片')
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ res = chat.images.generate(model=self.model, prompt=prompt, **self.params)
+ file_urls = []
+ for content in res.data:
+ url = content.url
+ file_urls.append(url)
+
+ return file_urls
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/tts.py b/apps/setting/models_provider/impl/openai_model_provider/model/tts.py
new file mode 100644
index 00000000000..0eeab1d7cce
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/tts.py
@@ -0,0 +1,64 @@
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class OpenAITextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'voice': 'alloy'}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return OpenAITextToSpeech(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def text_to_speech(self, text):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ text = _remove_empty_lines(text)
+ with client.audio.speech.with_streaming_response.create(
+ model=self.model,
+ input=text,
+ **self.params
+ ) as response:
+ return response.read()
+
+ def is_cache_model(self):
+ return False
diff --git a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py
index 324e851cd19..a06d3b75f08 100644
--- a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py
+++ b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py
@@ -7,127 +7,141 @@
@desc:
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
- ModelInfo, \
- ModelTypeConst, ValidCode
-from setting.models_provider.impl.openai_model_provider.model.openai_chat_model import OpenAIChatModel
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.openai_model_provider.credential.embedding import OpenAIEmbeddingCredential
+from setting.models_provider.impl.openai_model_provider.credential.image import OpenAIImageModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.llm import OpenAILLMModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.stt import OpenAISTTModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.tti import OpenAITextToImageModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.tts import OpenAITTSModelCredential
+from setting.models_provider.impl.openai_model_provider.model.embedding import OpenAIEmbeddingModel
+from setting.models_provider.impl.openai_model_provider.model.image import OpenAIImage
+from setting.models_provider.impl.openai_model_provider.model.llm import OpenAIChatModel
+from setting.models_provider.impl.openai_model_provider.model.stt import OpenAISpeechToText
+from setting.models_provider.impl.openai_model_provider.model.tti import OpenAITextToImage
+from setting.models_provider.impl.openai_model_provider.model.tts import OpenAITextToSpeech
from smartdoc.conf import PROJECT_DIR
-
-
-class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = OpenAIModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
-
- for key in ['api_base', 'api_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = OpenAIModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_base = forms.TextInputField('API 域名', required=True)
- api_key = forms.PasswordInputField('API Key', required=True)
-
+from django.utils.translation import gettext_lazy as _
openai_llm_model_credential = OpenAILLMModelCredential()
-
-model_dict = {
- 'gpt-3.5-turbo': ModelInfo('gpt-3.5-turbo', '最新的gpt-3.5-turbo,随OpenAI调整而更新', ModelTypeConst.LLM,
- openai_llm_model_credential,
- ),
- 'gpt-4': ModelInfo('gpt-4', '最新的gpt-4,随OpenAI调整而更新', ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-4o': ModelInfo('gpt-4o', '最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-4-turbo': ModelInfo('gpt-4-turbo', '最新的gpt-4-turbo,随OpenAI调整而更新', ModelTypeConst.LLM,
- openai_llm_model_credential,
- ),
- 'gpt-4-turbo-preview': ModelInfo('gpt-4-turbo-preview', '最新的gpt-4-turbo-preview,随OpenAI调整而更新',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-3.5-turbo-0125': ModelInfo('gpt-3.5-turbo-0125',
- '2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM,
- openai_llm_model_credential,
- ),
- 'gpt-3.5-turbo-1106': ModelInfo('gpt-3.5-turbo-1106',
- '2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM,
- openai_llm_model_credential,
- ),
- 'gpt-3.5-turbo-0613': ModelInfo('gpt-3.5-turbo-0613',
- '[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-4o-2024-05-13': ModelInfo('gpt-4o-2024-05-13',
- '2024年5月13日的gpt-4o快照,支持上下文长度128,000 tokens',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-4-turbo-2024-04-09': ModelInfo('gpt-4-turbo-2024-04-09',
- '2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-4-0125-preview': ModelInfo('gpt-4-0125-preview', '2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
- 'gpt-4-1106-preview': ModelInfo('gpt-4-1106-preview', '2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens',
- ModelTypeConst.LLM, openai_llm_model_credential,
- ),
-}
+openai_stt_model_credential = OpenAISTTModelCredential()
+openai_tts_model_credential = OpenAITTSModelCredential()
+openai_image_model_credential = OpenAIImageModelCredential()
+openai_tti_model_credential = OpenAITextToImageModelCredential()
+model_info_list = [
+ ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
+ openai_llm_model_credential, OpenAIChatModel
+ ),
+ ModelInfo('gpt-4', _('Latest gpt-4, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4o', _('The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI adjustments'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4o-mini', _('The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI adjustments'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4-turbo', _('The latest gpt-4-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
+ openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4-turbo-preview', _('The latest gpt-4-turbo-preview, updated with OpenAI adjustments'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-3.5-turbo-0125',
+ _('gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 tokens'), ModelTypeConst.LLM,
+ openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-3.5-turbo-1106',
+ _('gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 tokens'), ModelTypeConst.LLM,
+ openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-3.5-turbo-0613',
+ _('[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June 13, 2024'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4o-2024-05-13',
+ _('gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4-turbo-2024-04-09',
+ _('gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 tokens'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4-0125-preview', _('gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 tokens'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('gpt-4-1106-preview', _('gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 tokens'),
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ OpenAIChatModel),
+ ModelInfo('whisper-1', '',
+ ModelTypeConst.STT, openai_stt_model_credential,
+ OpenAISpeechToText),
+ ModelInfo('tts-1', '',
+ ModelTypeConst.TTS, openai_tts_model_credential,
+ OpenAITextToSpeech)
+]
+open_ai_embedding_credential = OpenAIEmbeddingCredential()
+model_info_embedding_list = [
+ ModelInfo('text-embedding-ada-002', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ OpenAIEmbeddingModel),
+ ModelInfo('text-embedding-3-small', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ OpenAIEmbeddingModel),
+ ModelInfo('text-embedding-3-large', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ OpenAIEmbeddingModel)
+]
+
+model_info_image_list = [
+ ModelInfo('gpt-4o', _('The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI adjustments'),
+ ModelTypeConst.IMAGE, openai_image_model_credential,
+ OpenAIImage),
+ ModelInfo('gpt-4o-mini', _('The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI adjustments'),
+ ModelTypeConst.IMAGE, openai_image_model_credential,
+ OpenAIImage),
+]
+
+model_info_tti_list = [
+ ModelInfo('dall-e-2', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ OpenAITextToImage),
+ ModelInfo('dall-e-3', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ OpenAITextToImage),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
+ openai_llm_model_credential, OpenAIChatModel
+ ))
+ .append_model_info_list(model_info_embedding_list)
+ .append_default_model_info(model_info_embedding_list[0])
+ .append_model_info_list(model_info_image_list)
+ .append_default_model_info(model_info_image_list[0])
+ .append_model_info_list(model_info_tti_list)
+ .append_default_model_info(model_info_tti_list[0])
+ .append_default_model_info(ModelInfo('whisper-1', '',
+ ModelTypeConst.STT, openai_stt_model_credential,
+ OpenAISpeechToText)
+ )
+ .append_default_model_info(ModelInfo('tts-1', '',
+ ModelTypeConst.TTS, openai_tts_model_credential,
+ OpenAITextToSpeech))
+ .build()
+)
class OpenAIModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 3
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> OpenAIChatModel:
- azure_chat_open_ai = OpenAIChatModel(
- model=model_name,
- openai_api_base=model_credential.get('api_base'),
- openai_api_key=model_credential.get('api_key')
- )
- return azure_chat_open_ai
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return openai_llm_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_openai_provider', name='OpenAI', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'openai_model_provider', 'icon',
'openai_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py
new file mode 100644
index 00000000000..3f3caafa0fd
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:41
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QwenModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=1.0,
+ _min=0.1,
+ _max=1.9,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class QwenVLModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return QwenModelParams()
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py
new file mode 100644
index 00000000000..6aced3340a2
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py
@@ -0,0 +1,76 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:41
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QwenModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=1.0,
+ _min=0.1,
+ _max=1.9,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return QwenModelParams()
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py
new file mode 100644
index 00000000000..cc904fe226f
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py
@@ -0,0 +1,98 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:41
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QwenModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')),
+ required=True,
+ default_value='1024*1024',
+ option_list=[
+ {'value': '1024*1024', 'label': '1024*1024'},
+ {'value': '720*1280', 'label': '720*1280'},
+ {'value': '768*1152', 'label': '768*1152'},
+ {'value': '1280*720', 'label': '1280*720'},
+ ],
+ text_field='label',
+ value_field='value')
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')),
+ required=True, default_value=1,
+ _min=1,
+ _max=4,
+ _step=1,
+ precision=0)
+ style = forms.SingleSelect(
+ TooltipLabel(_('Style'), _('Specify the style of generated images')),
+ required=True,
+ default_value='',
+ option_list=[
+ {'value': '', 'label': _('Default value, the image style is randomly output by the model')},
+ {'value': '', 'label': _('photography')},
+ {'value': '', 'label': _('Portraits')},
+ {'value': '<3d cartoon>', 'label': _('3D cartoon')},
+ {'value': '', 'label': _('animation')},
+ {'value': '', 'label': _('painting')},
+ {'value': '', 'label': _('watercolor')},
+ {'value': '', 'label': _('sketch')},
+ {'value': '', 'label': _('Chinese painting')},
+ {'value': '', 'label': _('flat illustration')},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+
+class QwenTextToImageModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return QwenModelParams()
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py
new file mode 100644
index 00000000000..bf3af0e3484
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py
@@ -0,0 +1,25 @@
+# coding=utf-8
+
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ chat_tong_yi = QwenVLChatModel(
+ model_name=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
+ return chat_tong_yi
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py
new file mode 100644
index 00000000000..c4df28af9bb
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/28 11:44
+ @desc:
+"""
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ chat_tong_yi = QwenChatModel(
+ model_name=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
+ return chat_tong_yi
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/qwen_chat_model.py b/apps/setting/models_provider/impl/qwen_model_provider/model/qwen_chat_model.py
deleted file mode 100644
index d3894d1d0aa..00000000000
--- a/apps/setting/models_provider/impl/qwen_model_provider/model/qwen_chat_model.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: qwen_chat_model.py
- @date:2024/4/28 11:44
- @desc:
-"""
-from typing import List
-
-from langchain_community.chat_models import ChatTongyi
-from langchain_core.messages import BaseMessage, get_buffer_string
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class QwenChatModel(ChatTongyi):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py b/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py
new file mode 100644
index 00000000000..c39e1b3a7fc
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py
@@ -0,0 +1,59 @@
+# coding=utf-8
+from http import HTTPStatus
+from typing import Dict
+
+from dashscope import ImageSynthesis
+from django.utils.translation import gettext
+from langchain_community.chat_models import ChatTongyi
+from langchain_core.messages import HumanMessage
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
+ api_key: str
+ model_name: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.model_name = kwargs.get('model_name')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024*1024', 'style': '', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ chat_tong_yi = QwenTextToImageModel(
+ model_name=model_name,
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+ return chat_tong_yi
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max')
+ chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])])
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
+ rsp = ImageSynthesis.call(api_key=self.api_key,
+ model=self.model_name,
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ **self.params)
+ file_urls = []
+ if rsp.status_code == HTTPStatus.OK:
+ for result in rsp.output.results:
+ file_urls.append(result.url)
+ else:
+ print('sync_call Failed, status_code: %s, code: %s, message: %s' %
+ (rsp.status_code, rsp.code, rsp.message))
+ return file_urls
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py
index 179f90368b0..48328116356 100644
--- a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py
+++ b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py
@@ -7,87 +7,59 @@
@desc:
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-from langchain_community.chat_models.tongyi import ChatTongyi
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
- ModelInfo, IModelProvider, ValidCode
-from setting.models_provider.impl.qwen_model_provider.model.qwen_chat_model import QwenChatModel
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
+ ModelInfoManage
+from setting.models_provider.impl.qwen_model_provider.credential.image import QwenVLModelCredential
+from setting.models_provider.impl.qwen_model_provider.credential.llm import OpenAILLMModelCredential
+from setting.models_provider.impl.qwen_model_provider.credential.tti import QwenTextToImageModelCredential
+from setting.models_provider.impl.qwen_model_provider.model.image import QwenVLChatModel
+
+from setting.models_provider.impl.qwen_model_provider.model.llm import QwenChatModel
+from setting.models_provider.impl.qwen_model_provider.model.tti import QwenTextToImageModel
from smartdoc.conf import PROJECT_DIR
-
-
-class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = QwenModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
- for key in ['api_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = QwenModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_key = forms.PasswordInputField('API Key', required=True)
-
+from django.utils.translation import gettext as _
qwen_model_credential = OpenAILLMModelCredential()
-
-model_dict = {
- 'qwen-turbo': ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential),
- 'qwen-plus': ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential),
- 'qwen-max': ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential)
-}
+qwenvl_model_credential = QwenVLModelCredential()
+qwentti_model_credential = QwenTextToImageModelCredential()
+
+module_info_list = [
+ ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel),
+ ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel),
+ ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel)
+]
+module_info_vl_list = [
+ ModelInfo('qwen-vl-max', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
+ ModelInfo('qwen-vl-max-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
+ ModelInfo('qwen-vl-plus-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
+]
+module_info_tti_list = [
+ ModelInfo('wanx-v1',
+ _('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'),
+ ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(module_info_list)
+ .append_default_model_info(
+ ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel))
+ .append_model_info_list(module_info_vl_list)
+ .append_default_model_info(module_info_vl_list[0])
+ .append_model_info_list(module_info_tti_list)
+ .append_default_model_info(module_info_tti_list[0])
+ .build()
+)
class QwenModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 3
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatTongyi:
- chat_tong_yi = QwenChatModel(
- model_name=model_name,
- dashscope_api_key=model_credential.get('api_key')
- )
- return chat_tong_yi
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return qwen_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
- return ModelProvideInfo(provider='model_qwen_provider', name='通义千问', icon=get_file_content(
+ return ModelProvideInfo(provider='model_qwen_provider', name=_('Tongyi Qianwen'), icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'qwen_model_provider', 'icon',
'qwen_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/__init__.py b/apps/setting/models_provider/impl/regolo_model_provider/__init__.py
new file mode 100644
index 00000000000..2dc4ab10db4
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/3/28 16:25
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..ddea7fed52d
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/embedding.py
@@ -0,0 +1,52 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 16:45
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class RegoloEmbeddingCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/image.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/image.py
new file mode 100644
index 00000000000..5975c774806
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/image.py
@@ -0,0 +1,74 @@
+# coding=utf-8
+import base64
+import os
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class RegoloImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class RegoloImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return RegoloImageModelParams()
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/llm.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/llm.py
new file mode 100644
index 00000000000..60eb4ff0abf
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/llm.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:32
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class RegoloLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class RegoloLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return RegoloLLMModelParams()
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/credential/tti.py b/apps/setting/models_provider/impl/regolo_model_provider/credential/tti.py
new file mode 100644
index 00000000000..88f46ce4143
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/credential/tti.py
@@ -0,0 +1,89 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class RegoloTTIModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'),
+ _('The image generation endpoint allows you to create raw images based on text prompts. ')),
+ required=True,
+ default_value='1024x1024',
+ option_list=[
+ {'value': '1024x1024', 'label': '1024x1024'},
+ {'value': '1024x1792', 'label': '1024x1792'},
+ {'value': '1792x1024', 'label': '1792x1024'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ quality = forms.SingleSelect(
+ TooltipLabel(_('Picture quality'), _('''
+By default, images are produced in standard quality.
+ ''')),
+ required=True,
+ default_value='standard',
+ option_list=[
+ {'value': 'standard', 'label': 'standard'},
+ {'value': 'hd', 'label': 'hd'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'),
+ _('1 as default')),
+ required=True, default_value=1,
+ _min=1,
+ _max=10,
+ _step=1,
+ precision=0)
+
+
+class RegoloTextToImageModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return RegoloTTIModelParams()
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/icon/regolo_icon_svg b/apps/setting/models_provider/impl/regolo_model_provider/icon/regolo_icon_svg
new file mode 100644
index 00000000000..b69154451ad
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/icon/regolo_icon_svg
@@ -0,0 +1,64 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/embedding.py b/apps/setting/models_provider/impl/regolo_model_provider/model/embedding.py
new file mode 100644
index 00000000000..b067b8eff29
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/model/embedding.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 17:44
+ @desc:
+"""
+from typing import Dict
+
+from langchain_community.embeddings import OpenAIEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class RegoloEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return RegoloEmbeddingModel(
+ api_key=model_credential.get('api_key'),
+ model=model_name,
+ openai_api_base="https://api.regolo.ai/v1",
+ )
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/image.py b/apps/setting/models_provider/impl/regolo_model_provider/model/image.py
new file mode 100644
index 00000000000..f16768fad1e
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/model/image.py
@@ -0,0 +1,19 @@
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class RegoloImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return RegoloImage(
+ model_name=model_name,
+ openai_api_base="https://api.regolo.ai/v1",
+ openai_api_key=model_credential.get('api_key'),
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/llm.py b/apps/setting/models_provider/impl/regolo_model_provider/model/llm.py
new file mode 100644
index 00000000000..126a756a20d
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/model/llm.py
@@ -0,0 +1,38 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/18 15:28
+ @desc:
+"""
+from typing import List, Dict
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_openai.chat_models import ChatOpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class RegoloChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return RegoloChatModel(
+ model=model_name,
+ openai_api_base="https://api.regolo.ai/v1",
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/model/tti.py b/apps/setting/models_provider/impl/regolo_model_provider/model/tti.py
new file mode 100644
index 00000000000..a92527295ac
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/model/tti.py
@@ -0,0 +1,58 @@
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class RegoloTextToImage(MaxKBBaseModel, BaseTextToImage):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = "https://api.regolo.ai/v1"
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return RegoloTextToImage(
+ model=model_name,
+ api_base="https://api.regolo.ai/v1",
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ response_list = chat.models.with_raw_response.list()
+
+ # self.generate_image('生成一个小猫图片')
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ res = chat.images.generate(model=self.model, prompt=prompt, **self.params)
+ file_urls = []
+ for content in res.data:
+ url = content.url
+ file_urls.append(url)
+
+ return file_urls
diff --git a/apps/setting/models_provider/impl/regolo_model_provider/regolo_model_provider.py b/apps/setting/models_provider/impl/regolo_model_provider/regolo_model_provider.py
new file mode 100644
index 00000000000..a5e7dc36550
--- /dev/null
+++ b/apps/setting/models_provider/impl/regolo_model_provider/regolo_model_provider.py
@@ -0,0 +1,89 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: openai_model_provider.py
+ @date:2024/3/28 16:26
+ @desc:
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.regolo_model_provider.credential.embedding import \
+ RegoloEmbeddingCredential
+from setting.models_provider.impl.regolo_model_provider.credential.llm import RegoloLLMModelCredential
+from setting.models_provider.impl.regolo_model_provider.credential.tti import \
+ RegoloTextToImageModelCredential
+from setting.models_provider.impl.regolo_model_provider.model.embedding import RegoloEmbeddingModel
+from setting.models_provider.impl.regolo_model_provider.model.llm import RegoloChatModel
+from setting.models_provider.impl.regolo_model_provider.model.tti import RegoloTextToImage
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+openai_llm_model_credential = RegoloLLMModelCredential()
+openai_tti_model_credential = RegoloTextToImageModelCredential()
+model_info_list = [
+ ModelInfo('Phi-4', '', ModelTypeConst.LLM,
+ openai_llm_model_credential, RegoloChatModel
+ ),
+ ModelInfo('DeepSeek-R1-Distill-Qwen-32B', '', ModelTypeConst.LLM,
+ openai_llm_model_credential,
+ RegoloChatModel),
+ ModelInfo('maestrale-chat-v0.4-beta', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ RegoloChatModel),
+ ModelInfo('Llama-3.3-70B-Instruct',
+ '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ RegoloChatModel),
+ ModelInfo('Llama-3.1-8B-Instruct',
+ '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ RegoloChatModel),
+ ModelInfo('DeepSeek-Coder-6.7B-Instruct', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ RegoloChatModel)
+]
+open_ai_embedding_credential = RegoloEmbeddingCredential()
+model_info_embedding_list = [
+ ModelInfo('gte-Qwen2', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ RegoloEmbeddingModel),
+]
+
+model_info_tti_list = [
+ ModelInfo('FLUX.1-dev', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ RegoloTextToImage),
+ ModelInfo('sdxl-turbo', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ RegoloTextToImage),
+]
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(
+ ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
+ openai_llm_model_credential, RegoloChatModel
+ ))
+ .append_model_info_list(model_info_embedding_list)
+ .append_default_model_info(model_info_embedding_list[0])
+ .append_model_info_list(model_info_tti_list)
+ .append_default_model_info(model_info_tti_list[0])
+
+ .build()
+)
+
+
+class RegoloModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_regolo_provider', name='Regolo', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'regolo_model_provider',
+ 'icon',
+ 'regolo_icon_svg')))
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/__init__.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/__init__.py
new file mode 100644
index 00000000000..2dc4ab10db4
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/3/28 16:25
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..1a4f8d1d3ee
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 16:45
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class SiliconCloudEmbeddingCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py
new file mode 100644
index 00000000000..cb6c2ee9cd3
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/image.py
@@ -0,0 +1,74 @@
+# coding=utf-8
+import base64
+import os
+import traceback
+from typing import Dict
+
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from django.utils.translation import gettext_lazy as _, gettext
+
+
+class SiliconCloudImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class SiliconCloudImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return SiliconCloudImageModelParams()
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py
new file mode 100644
index 00000000000..a7333eb4747
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/llm.py
@@ -0,0 +1,79 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:32
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class SiliconCloudLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class SiliconCloudLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return SiliconCloudLLMModelParams()
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py
new file mode 100644
index 00000000000..834aaf12034
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/reranker.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py
+ @date:2024/9/9 17:51
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+from langchain_core.documents import Document
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.siliconCloud_model_provider.model.reranker import SiliconCloudReranker
+
+
+class SiliconCloudRerankerCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ if not model_type == 'RERANKER':
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model: SiliconCloudReranker = provider.get_model(model_type, model_name, model_credential)
+ model.compress_documents([Document(page_content=_('Hello'))], _('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py
new file mode 100644
index 00000000000..cba2e5d8736
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/stt.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class SiliconCloudSTTModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py
new file mode 100644
index 00000000000..860bad4b9e2
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py
@@ -0,0 +1,90 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class SiliconCloudTTIModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'),
+ _('The image generation endpoint allows you to create raw images based on text prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 or 1792x1024 pixels.')),
+ required=True,
+ default_value='1024x1024',
+ option_list=[
+ {'value': '1024x1024', 'label': '1024x1024'},
+ {'value': '1024x1792', 'label': '1024x1792'},
+ {'value': '1792x1024', 'label': '1792x1024'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ quality = forms.SingleSelect(
+ TooltipLabel(_('Picture quality'), _('''
+By default, images are produced in standard quality, but with DALL·E 3 you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest.
+ ''')),
+ required=True,
+ default_value='standard',
+ option_list=[
+ {'value': 'standard', 'label': 'standard'},
+ {'value': 'hd', 'label': 'hd'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'),
+ _('You can use DALL·E 3 to request 1 image at a time (requesting more images by issuing parallel requests), or use DALL·E 2 with the n parameter to request up to 10 images at a time.')),
+ required=True, default_value=1,
+ _min=1,
+ _max=10,
+ _step=1,
+ precision=0)
+
+
+class SiliconCloudTextToImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return SiliconCloudTTIModelParams()
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py
new file mode 100644
index 00000000000..ffe003e9a42
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class SiliconCloudTTSModelGeneralParams(BaseForm):
+ # alloy, echo, fable, onyx, nova, shimmer
+ voice = forms.SingleSelect(
+ TooltipLabel('Voice',
+ _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')),
+ required=True, default_value='alloy',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': 'alloy', 'value': 'alloy'},
+ {'text': 'echo', 'value': 'echo'},
+ {'text': 'fable', 'value': 'fable'},
+ {'text': 'onyx', 'value': 'onyx'},
+ {'text': 'nova', 'value': 'nova'},
+ {'text': 'shimmer', 'value': 'shimmer'},
+ ])
+
+
+class SiliconCloudTTSModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return SiliconCloudTTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/icon/siliconCloud_icon_svg b/apps/setting/models_provider/impl/siliconCloud_model_provider/icon/siliconCloud_icon_svg
new file mode 100644
index 00000000000..339fff751d2
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/icon/siliconCloud_icon_svg
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/embedding.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/embedding.py
new file mode 100644
index 00000000000..e8b6c0f5cf1
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/embedding.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 17:44
+ @desc:
+"""
+from typing import Dict
+
+from langchain_community.embeddings import OpenAIEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class SiliconCloudEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return SiliconCloudEmbeddingModel(
+ api_key=model_credential.get('api_key'),
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ )
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py
new file mode 100644
index 00000000000..2ec0689d4d2
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py
@@ -0,0 +1,20 @@
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return SiliconCloudImage(
+ model_name=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py
new file mode 100644
index 00000000000..6fb0c7816fa
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py
@@ -0,0 +1,38 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/18 15:28
+ @desc:
+"""
+from typing import List, Dict
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_openai.chat_models import ChatOpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class SiliconCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return SiliconCloudChatModel(
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py
new file mode 100644
index 00000000000..ef85cec5867
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/reranker.py
@@ -0,0 +1,74 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: siliconcloud_reranker.py
+ @date:2024/9/10 9:45
+ @desc: SiliconCloud 文档重排封装
+"""
+
+from typing import Sequence, Optional, Any, Dict
+import requests
+
+from langchain_core.callbacks import Callbacks
+from langchain_core.documents import BaseDocumentCompressor, Document
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from django.utils.translation import gettext as _
+
+
+class SiliconCloudReranker(MaxKBBaseModel, BaseDocumentCompressor):
+ api_base: Optional[str]
+ """SiliconCloud API URL"""
+ model: Optional[str]
+ """SiliconCloud 重排模型 ID"""
+ api_key: Optional[str]
+ """API Key"""
+
+ top_n: Optional[int] = 3 # 取前 N 个最相关的结果
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return SiliconCloudReranker(
+ api_base=model_credential.get('api_base'),
+ model=model_name,
+ api_key=model_credential.get('api_key'),
+ top_n=model_kwargs.get('top_n', 3)
+ )
+
+ def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \
+ Sequence[Document]:
+ if not documents:
+ return []
+
+ # 预处理文本
+ texts = [doc.page_content for doc in documents]
+
+ # 发送请求到 SiliconCloud API
+ headers = {
+ "Authorization": f"Bearer {self.api_key}",
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "model": self.model,
+ "query": query,
+ "documents": texts,
+ "top_n": self.top_n,
+ "return_documents": True,
+ }
+
+ response = requests.post(f"{self.api_base}/rerank", json=payload, headers=headers)
+
+ if response.status_code != 200:
+ raise RuntimeError(f"SiliconCloud API 请求失败: {response.text}")
+
+ res = response.json()
+
+ # 解析返回结果
+ return [
+ Document(
+ page_content=item.get('document', {}).get('text', ''),
+ metadata={'relevance_score': item.get('relevance_score')}
+ )
+ for item in res.get('results', [])
+ ]
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/stt.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/stt.py
new file mode 100644
index 00000000000..4bb07da2135
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/stt.py
@@ -0,0 +1,59 @@
+import asyncio
+import io
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class SiliconCloudSpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ api_base: str
+ api_key: str
+ model: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return SiliconCloudSpeechToText(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def speech_to_text(self, audio_file):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ audio_data = audio_file.read()
+ buffer = io.BytesIO(audio_data)
+ buffer.name = "file.mp3" # this is the important line
+ res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer)
+ return res.text
+
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tti.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tti.py
new file mode 100644
index 00000000000..6849753c19d
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tti.py
@@ -0,0 +1,58 @@
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class SiliconCloudTextToImage(MaxKBBaseModel, BaseTextToImage):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return SiliconCloudTextToImage(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ response_list = chat.models.with_raw_response.list()
+
+ # self.generate_image('生成一个小猫图片')
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ res = chat.images.generate(model=self.model, prompt=prompt, **self.params)
+ file_urls = []
+ for content in res.data:
+ url = content.url
+ file_urls.append(url)
+
+ return file_urls
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tts.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tts.py
new file mode 100644
index 00000000000..1b17cbb825a
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/tts.py
@@ -0,0 +1,64 @@
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class SiliconCloudTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'voice': 'alloy'}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return SiliconCloudTextToSpeech(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def text_to_speech(self, text):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ text = _remove_empty_lines(text)
+ with client.audio.speech.with_streaming_response.create(
+ model=self.model,
+ input=text,
+ **self.params
+ ) as response:
+ return response.read()
+
+ def is_cache_model(self):
+ return False
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py
new file mode 100644
index 00000000000..a8e005ddcab
--- /dev/null
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py
@@ -0,0 +1,137 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: openai_model_provider.py
+ @date:2024/3/28 16:26
+ @desc:
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.siliconCloud_model_provider.credential.embedding import \
+ SiliconCloudEmbeddingCredential
+from setting.models_provider.impl.siliconCloud_model_provider.credential.llm import SiliconCloudLLMModelCredential
+from setting.models_provider.impl.siliconCloud_model_provider.credential.reranker import SiliconCloudRerankerCredential
+from setting.models_provider.impl.siliconCloud_model_provider.credential.stt import SiliconCloudSTTModelCredential
+from setting.models_provider.impl.siliconCloud_model_provider.credential.tti import \
+ SiliconCloudTextToImageModelCredential
+from setting.models_provider.impl.siliconCloud_model_provider.model.embedding import SiliconCloudEmbeddingModel
+from setting.models_provider.impl.siliconCloud_model_provider.model.llm import SiliconCloudChatModel
+from setting.models_provider.impl.siliconCloud_model_provider.model.reranker import SiliconCloudReranker
+from setting.models_provider.impl.siliconCloud_model_provider.model.stt import SiliconCloudSpeechToText
+from setting.models_provider.impl.siliconCloud_model_provider.model.tti import SiliconCloudTextToImage
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+openai_llm_model_credential = SiliconCloudLLMModelCredential()
+openai_stt_model_credential = SiliconCloudSTTModelCredential()
+openai_reranker_model_credential = SiliconCloudRerankerCredential()
+openai_tti_model_credential = SiliconCloudTextToImageModelCredential()
+model_info_list = [
+ ModelInfo('deepseek-ai/DeepSeek-R1-Distill-Llama-8B', '', ModelTypeConst.LLM,
+ openai_llm_model_credential, SiliconCloudChatModel
+ ),
+ ModelInfo('deepseek-ai/DeepSeek-R1-Distill-Qwen-7B', '', ModelTypeConst.LLM,
+ openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('Qwen/Qwen2.5-7B-Instruct',
+ '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('Qwen/Qwen2.5-Coder-7B-Instruct', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('internlm/internlm2_5-7b-chat', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('Qwen/Qwen2-1.5B-Instruct', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('THUDM/glm-4-9b-chat', '',
+ ModelTypeConst.LLM, openai_llm_model_credential,
+ SiliconCloudChatModel),
+ ModelInfo('FunAudioLLM/SenseVoiceSmall', '',
+ ModelTypeConst.STT, openai_stt_model_credential,
+ SiliconCloudSpeechToText),
+]
+open_ai_embedding_credential = SiliconCloudEmbeddingCredential()
+model_info_embedding_list = [
+ ModelInfo('netease-youdao/bce-embedding-base_v1', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ SiliconCloudEmbeddingModel),
+ ModelInfo('BAAI/bge-m3', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ SiliconCloudEmbeddingModel),
+ ModelInfo('BAAI/bge-large-en-v1.5', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ SiliconCloudEmbeddingModel),
+ ModelInfo('BAAI/bge-large-zh-v1.5', '',
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ SiliconCloudEmbeddingModel),
+]
+
+model_info_tti_list = [
+ ModelInfo('deepseek-ai/Janus-Pro-7B', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ SiliconCloudTextToImage),
+ ModelInfo('stabilityai/stable-diffusion-3-5-large', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ SiliconCloudTextToImage),
+ ModelInfo('black-forest-labs/FLUX.1-schnell', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ SiliconCloudTextToImage),
+ ModelInfo('stabilityai/stable-diffusion-3-medium', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ SiliconCloudTextToImage),
+ ModelInfo('stabilityai/stable-diffusion-xl-base-1.0', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ SiliconCloudTextToImage),
+ ModelInfo('stabilityai/stable-diffusion-2-1', '',
+ ModelTypeConst.TTI, openai_tti_model_credential,
+ SiliconCloudTextToImage),
+]
+model_rerank_list = [
+ ModelInfo('netease-youdao/bce-reranker-base_v1', '', ModelTypeConst.RERANKER,
+ openai_reranker_model_credential, SiliconCloudReranker
+ ),
+ ModelInfo('BAAI/bge-reranker-v2-m3', '', ModelTypeConst.RERANKER,
+ openai_reranker_model_credential, SiliconCloudReranker
+ ),
+]
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(
+ ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
+ openai_llm_model_credential, SiliconCloudChatModel
+ ))
+ .append_model_info_list(model_info_embedding_list)
+ .append_default_model_info(model_info_embedding_list[0])
+ .append_model_info_list(model_info_tti_list)
+ .append_default_model_info(model_info_tti_list[0])
+ .append_default_model_info(ModelInfo('whisper-1', '',
+ ModelTypeConst.STT, openai_stt_model_credential,
+ SiliconCloudSpeechToText))
+ .append_model_info_list(model_rerank_list)
+ .append_default_model_info(model_rerank_list[0])
+
+ .build()
+)
+
+
+class SiliconCloudModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_siliconCloud_provider', name='SILICONFLOW', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'siliconCloud_model_provider',
+ 'icon',
+ 'siliconCloud_icon_svg')))
diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/__init__.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/__init__.py
new file mode 100644
index 00000000000..2dc4ab10db4
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/3/28 16:25
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py
new file mode 100644
index 00000000000..8d8e52d27ee
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/credential/llm.py
@@ -0,0 +1,79 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:32
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class TencentCloudLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class TencentCloudLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return TencentCloudLLMModelParams()
diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/icon/tencent_cloud_icon_svg b/apps/setting/models_provider/impl/tencent_cloud_model_provider/icon/tencent_cloud_icon_svg
new file mode 100644
index 00000000000..ff559eaff44
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/icon/tencent_cloud_icon_svg
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py
new file mode 100644
index 00000000000..cfcdf7aca21
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py
@@ -0,0 +1,39 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/18 15:28
+ @desc:
+"""
+from typing import List, Dict
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ azure_chat_open_ai = TencentCloudChatModel(
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params,
+ custom_get_token_ids=custom_get_token_ids
+ )
+ return azure_chat_open_ai
diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py
new file mode 100644
index 00000000000..2781165b234
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: openai_model_provider.py
+ @date:2024/3/28 16:26
+ @desc:
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
+ ModelTypeConst, ModelInfoManage
+from setting.models_provider.impl.openai_model_provider.credential.embedding import OpenAIEmbeddingCredential
+from setting.models_provider.impl.openai_model_provider.credential.image import OpenAIImageModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.llm import OpenAILLMModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.stt import OpenAISTTModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.tti import OpenAITextToImageModelCredential
+from setting.models_provider.impl.openai_model_provider.credential.tts import OpenAITTSModelCredential
+from setting.models_provider.impl.openai_model_provider.model.embedding import OpenAIEmbeddingModel
+from setting.models_provider.impl.openai_model_provider.model.image import OpenAIImage
+from setting.models_provider.impl.openai_model_provider.model.llm import OpenAIChatModel
+from setting.models_provider.impl.openai_model_provider.model.stt import OpenAISpeechToText
+from setting.models_provider.impl.openai_model_provider.model.tti import OpenAITextToImage
+from setting.models_provider.impl.openai_model_provider.model.tts import OpenAITextToSpeech
+from setting.models_provider.impl.tencent_cloud_model_provider.credential.llm import TencentCloudLLMModelCredential
+from setting.models_provider.impl.tencent_cloud_model_provider.model.llm import TencentCloudChatModel
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext_lazy as _
+
+openai_llm_model_credential = TencentCloudLLMModelCredential()
+model_info_list = [
+ ModelInfo('deepseek-v3', '', ModelTypeConst.LLM,
+ openai_llm_model_credential, TencentCloudChatModel
+ ),
+ ModelInfo('deepseek-r1', '', ModelTypeConst.LLM,
+ openai_llm_model_credential, TencentCloudChatModel
+ ),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(
+ ModelInfo('deepseek-v3', '', ModelTypeConst.LLM,
+ openai_llm_model_credential, TencentCloudChatModel
+ ))
+ .build()
+)
+
+
+class TencentCloudModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_tencent_cloud_provider', name=_('Tencent Cloud'), icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'tencent_cloud_model_provider',
+ 'icon',
+ 'tencent_cloud_icon_svg')))
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/__init__.py b/apps/setting/models_provider/impl/tencent_model_provider/__init__.py
new file mode 100644
index 00000000000..8cb7f459eae
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/__init__.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..4c500005b15
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py
@@ -0,0 +1,41 @@
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class TencentEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True) -> bool:
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ self.valid_form(model_credential)
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]) -> Dict[str, object]:
+ encrypted_secret_key = super().encryption(model.get('SecretKey', ''))
+ return {**model, 'SecretKey': encrypted_secret_key}
+
+ SecretId = forms.PasswordInputField('SecretId', required=True)
+ SecretKey = forms.PasswordInputField('SecretKey', required=True)
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py
new file mode 100644
index 00000000000..257be9f67bf
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 18:41
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QwenModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=1.0,
+ _min=0.1,
+ _max=1.9,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class TencentVisionModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return QwenModelParams()
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py
new file mode 100644
index 00000000000..4fa28ba3023
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+import traceback
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class TencentLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.5,
+ _min=0.1,
+ _max=2.0,
+ _step=0.01,
+ precision=2)
+
+
+class TencentLLMModelCredential(BaseForm, BaseModelCredential):
+ REQUIRED_FIELDS = ['hunyuan_app_id', 'hunyuan_secret_id', 'hunyuan_secret_key']
+
+ @classmethod
+ def _validate_model_type(cls, model_type, provider, raise_exception=False):
+ if not any(mt['value'] == model_type for mt in provider.get_model_type_list()):
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ return False
+ return True
+
+ @classmethod
+ def _validate_credential_fields(cls, model_credential, raise_exception=False):
+ missing_keys = [key for key in cls.REQUIRED_FIELDS if key not in model_credential]
+ if missing_keys:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{keys} is required').format(keys=", ".join(missing_keys)))
+ return False
+ return True
+
+ def is_valid(self, model_type, model_name, model_credential, model_params, provider, raise_exception=False):
+ if not (self._validate_model_type(model_type, provider, raise_exception) and
+ self._validate_credential_fields(model_credential, raise_exception)):
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ return False
+ return True
+
+ def encryption_dict(self, model):
+ return {**model, 'hunyuan_secret_key': super().encryption(model.get('hunyuan_secret_key', ''))}
+
+ hunyuan_app_id = forms.TextInputField('APP ID', required=True)
+ hunyuan_secret_id = forms.PasswordInputField('SecretId', required=True)
+ hunyuan_secret_key = forms.PasswordInputField('SecretKey', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return TencentLLMModelParams()
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py
new file mode 100644
index 00000000000..60fcfbfc9b0
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py
@@ -0,0 +1,116 @@
+# coding=utf-8
+import traceback
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class TencentTTIModelParams(BaseForm):
+ Style = forms.SingleSelect(
+ TooltipLabel(_('painting style'), _('If not passed, the default value is 201 (Japanese anime style)')),
+ required=True,
+ default_value='201',
+ option_list=[
+ {'value': '000', 'label': _('Not limited to style')},
+ {'value': '101', 'label': _('ink painting')},
+ {'value': '102', 'label': _('concept art')},
+ {'value': '103', 'label': _('Oil painting 1')},
+ {'value': '118', 'label': _('Oil Painting 2 (Van Gogh)')},
+ {'value': '104', 'label': _('watercolor painting')},
+ {'value': '105', 'label': _('pixel art')},
+ {'value': '106', 'label': _('impasto style')},
+ {'value': '107', 'label': _('illustration')},
+ {'value': '108', 'label': _('paper cut style')},
+ {'value': '109', 'label': _('Impressionism 1 (Monet)')},
+ {'value': '119', 'label': _('Impressionism 2')},
+ {'value': '110', 'label': '2.5D'},
+ {'value': '111', 'label': _('classical portraiture')},
+ {'value': '112', 'label': _('black and white sketch')},
+ {'value': '113', 'label': _('cyberpunk')},
+ {'value': '114', 'label': _('science fiction style')},
+ {'value': '115', 'label': _('dark style')},
+ {'value': '116', 'label': '3D'},
+ {'value': '117', 'label': _('vaporwave')},
+ {'value': '201', 'label': _('Japanese animation')},
+ {'value': '202', 'label': _('monster style')},
+ {'value': '203', 'label': _('Beautiful ancient style')},
+ {'value': '204', 'label': _('retro anime')},
+ {'value': '301', 'label': _('Game cartoon hand drawing')},
+ {'value': '401', 'label': _('Universal realistic style')},
+ ],
+ value_field='value',
+ text_field='label'
+ )
+
+ Resolution = forms.SingleSelect(
+ TooltipLabel(_('Generate image resolution'), _('If not transmitted, the default value is 768:768.')),
+ required=True,
+ default_value='768:768',
+ option_list=[
+ {'value': '768:768', 'label': '768:768(1:1)'},
+ {'value': '768:1024', 'label': '768:1024(3:4)'},
+ {'value': '1024:768', 'label': '1024:768(4:3)'},
+ {'value': '1024:1024', 'label': '1024:1024(1:1)'},
+ {'value': '720:1280', 'label': '720:1280(9:16)'},
+ {'value': '1280:720', 'label': '1280:720(16:9)'},
+ {'value': '768:1280', 'label': '768:1280(3:5)'},
+ {'value': '1280:768', 'label': '1280:768(5:3)'},
+ {'value': '1080:1920', 'label': '1080:1920(9:16)'},
+ {'value': '1920:1080', 'label': '1920:1080(16:9)'},
+ ],
+ value_field='value',
+ text_field='label'
+ )
+
+
+class TencentTTIModelCredential(BaseForm, BaseModelCredential):
+ REQUIRED_FIELDS = ['hunyuan_secret_id', 'hunyuan_secret_key']
+
+ @classmethod
+ def _validate_model_type(cls, model_type, provider, raise_exception=False):
+ if not any(mt['value'] == model_type for mt in provider.get_model_type_list()):
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ return False
+ return True
+
+ @classmethod
+ def _validate_credential_fields(cls, model_credential, raise_exception=False):
+ missing_keys = [key for key in cls.REQUIRED_FIELDS if key not in model_credential]
+ if missing_keys:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{keys} is required').format(keys=", ".join(missing_keys)))
+ return False
+ return True
+
+ def is_valid(self, model_type, model_name, model_credential, model_params, provider, raise_exception=False):
+ if not (self._validate_model_type(model_type, provider, raise_exception) and
+ self._validate_credential_fields(model_credential, raise_exception)):
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ return False
+ return True
+
+ def encryption_dict(self, model):
+ return {**model, 'hunyuan_secret_key': super().encryption(model.get('hunyuan_secret_key', ''))}
+
+ hunyuan_secret_id = forms.PasswordInputField('SecretId', required=True)
+ hunyuan_secret_key = forms.PasswordInputField('SecretKey', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return TencentTTIModelParams()
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/icon/tencent_icon_svg b/apps/setting/models_provider/impl/tencent_model_provider/icon/tencent_icon_svg
new file mode 100644
index 00000000000..6cec08b74c2
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/icon/tencent_icon_svg
@@ -0,0 +1,5 @@
+
+
+
+
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/embedding.py b/apps/setting/models_provider/impl/tencent_model_provider/model/embedding.py
new file mode 100644
index 00000000000..659a5ac12b4
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/model/embedding.py
@@ -0,0 +1,41 @@
+
+from typing import Dict, List
+
+from langchain_core.embeddings import Embeddings
+from tencentcloud.common import credential
+from tencentcloud.hunyuan.v20230901.hunyuan_client import HunyuanClient
+from tencentcloud.hunyuan.v20230901.models import GetEmbeddingRequest
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class TencentEmbeddingModel(MaxKBBaseModel, Embeddings):
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ return [self.embed_query(text) for text in texts]
+
+ def embed_query(self, text: str) -> List[float]:
+ request = GetEmbeddingRequest()
+ request.Input = text
+ res = self.client.GetEmbedding(request)
+ return res.Data[0].Embedding
+
+ def __init__(self, secret_id: str, secret_key: str, model_name: str):
+ self.secret_id = secret_id
+ self.secret_key = secret_key
+ self.model_name = model_name
+ cred = credential.Credential(
+ secret_id, secret_key
+ )
+ self.client = HunyuanClient(cred, "")
+
+ @staticmethod
+ def new_instance(model_type: str, model_name: str, model_credential: Dict[str, str], **model_kwargs):
+ return TencentEmbeddingModel(
+ secret_id=model_credential.get('SecretId'),
+ secret_key=model_credential.get('SecretKey'),
+ model_name=model_name,
+ )
+
+ def _generate_auth_token(self):
+ # Example method to generate an authentication token for the model API
+ return f"{self.secret_id}:{self.secret_key}"
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py b/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py
new file mode 100644
index 00000000000..9055c4cb1be
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py
@@ -0,0 +1,280 @@
+import json
+import logging
+from typing import Any, Dict, Iterator, List, Mapping, Optional, Type
+
+from langchain_core.callbacks import CallbackManagerForLLMRun
+from langchain_core.language_models.chat_models import (
+ BaseChatModel,
+ generate_from_stream,
+)
+from langchain_core.messages import (
+ AIMessage,
+ AIMessageChunk,
+ BaseMessage,
+ BaseMessageChunk,
+ ChatMessage,
+ ChatMessageChunk,
+ HumanMessage,
+ HumanMessageChunk, SystemMessage,
+)
+from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
+from pydantic import Field, SecretStr, root_validator
+from langchain_core.utils import (
+ convert_to_secret_str,
+ get_from_dict_or_env,
+ get_pydantic_field_names,
+ pre_init,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _convert_message_to_dict(message: BaseMessage) -> dict:
+ message_dict: Dict[str, Any]
+ if isinstance(message, ChatMessage):
+ message_dict = {"Role": message.role, "Content": message.content}
+ elif isinstance(message, HumanMessage):
+ message_dict = {"Role": "user", "Content": message.content}
+ elif isinstance(message, AIMessage):
+ message_dict = {"Role": "assistant", "Content": message.content}
+ elif isinstance(message, SystemMessage):
+ message_dict = {"Role": "system", "Content": message.content}
+ else:
+ raise TypeError(f"Got unknown type {message}")
+
+ return message_dict
+
+
+def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
+ role = _dict["Role"]
+ if role == "user":
+ return HumanMessage(content=_dict["Content"])
+ elif role == "assistant":
+ return AIMessage(content=_dict.get("Content", "") or "")
+ else:
+ return ChatMessage(content=_dict["Content"], role=role)
+
+
+def _convert_delta_to_message_chunk(
+ _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
+) -> BaseMessageChunk:
+ role = _dict.get("Role")
+ content = _dict.get("Content") or ""
+
+ if role == "user" or default_class == HumanMessageChunk:
+ return HumanMessageChunk(content=content)
+ elif role == "assistant" or default_class == AIMessageChunk:
+ return AIMessageChunk(content=content)
+ elif role or default_class == ChatMessageChunk:
+ return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
+ else:
+ return default_class(content=content) # type: ignore[call-arg]
+
+
+def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
+ generations = []
+ for choice in response["Choices"]:
+ message = _convert_dict_to_message(choice["Message"])
+ generations.append(ChatGeneration(message=message))
+
+ token_usage = response["Usage"]
+ llm_output = {"token_usage": token_usage}
+ return ChatResult(generations=generations, llm_output=llm_output)
+
+
+class ChatHunyuan(BaseChatModel):
+ """Tencent Hunyuan chat models API by Tencent.
+
+ For more information, see https://cloud.tencent.com/document/product/1729
+ """
+
+ @property
+ def lc_secrets(self) -> Dict[str, str]:
+ return {
+ "hunyuan_app_id": "HUNYUAN_APP_ID",
+ "hunyuan_secret_id": "HUNYUAN_SECRET_ID",
+ "hunyuan_secret_key": "HUNYUAN_SECRET_KEY",
+ }
+
+ @property
+ def lc_serializable(self) -> bool:
+ return True
+
+ hunyuan_app_id: Optional[int] = None
+ """Hunyuan App ID"""
+ hunyuan_secret_id: Optional[str] = None
+ """Hunyuan Secret ID"""
+ hunyuan_secret_key: Optional[SecretStr] = None
+ """Hunyuan Secret Key"""
+ streaming: bool = False
+ """Whether to stream the results or not."""
+ request_timeout: int = 60
+ """Timeout for requests to Hunyuan API. Default is 60 seconds."""
+ temperature: float = 1.0
+ """What sampling temperature to use."""
+ top_p: float = 1.0
+ """What probability mass to use."""
+ model: str = "hunyuan-lite"
+ """What Model to use.
+ Optional model:
+ - hunyuan-lite、
+ - hunyuan-standard
+ - hunyuan-standard-256K
+ - hunyuan-pro
+ - hunyuan-code
+ - hunyuan-role
+ - hunyuan-functioncall
+ - hunyuan-vision
+ """
+ stream_moderation: bool = False
+ """Whether to review the results or not when streaming is true."""
+ enable_enhancement: bool = True
+ """Whether to enhancement the results or not."""
+
+ model_kwargs: Dict[str, Any] = Field(default_factory=dict)
+ """Holds any model parameters valid for API call not explicitly specified."""
+
+ class Config:
+ """Configuration for this pydantic object."""
+
+ validate_by_name = True
+
+ @root_validator(pre=True)
+ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Build extra kwargs from additional params that were passed in."""
+ all_required_field_names = get_pydantic_field_names(cls)
+ extra = values.get("model_kwargs", {})
+ for field_name in list(values):
+ if field_name in extra:
+ raise ValueError(f"Found {field_name} supplied twice.")
+ if field_name not in all_required_field_names:
+ logger.warning(
+ f"""WARNING! {field_name} is not default parameter.
+ {field_name} was transferred to model_kwargs.
+ Please confirm that {field_name} is what you intended."""
+ )
+ extra[field_name] = values.pop(field_name)
+
+ invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
+ if invalid_model_kwargs:
+ raise ValueError(
+ f"Parameters {invalid_model_kwargs} should be specified explicitly. "
+ f"Instead they were passed in as part of `model_kwargs` parameter."
+ )
+
+ values["model_kwargs"] = extra
+ return values
+
+ @pre_init
+ def validate_environment(cls, values: Dict) -> Dict:
+ values["hunyuan_app_id"] = get_from_dict_or_env(
+ values,
+ "hunyuan_app_id",
+ "HUNYUAN_APP_ID",
+ )
+ values["hunyuan_secret_id"] = get_from_dict_or_env(
+ values,
+ "hunyuan_secret_id",
+ "HUNYUAN_SECRET_ID",
+ )
+ values["hunyuan_secret_key"] = convert_to_secret_str(
+ get_from_dict_or_env(
+ values,
+ "hunyuan_secret_key",
+ "HUNYUAN_SECRET_KEY",
+ )
+ )
+ return values
+
+ @property
+ def _default_params(self) -> Dict[str, Any]:
+ """Get the default parameters for calling Hunyuan API."""
+ normal_params = {
+ "Temperature": self.temperature,
+ "TopP": self.top_p,
+ "Model": self.model,
+ "Stream": self.streaming,
+ "StreamModeration": self.stream_moderation,
+ "EnableEnhancement": self.enable_enhancement,
+ }
+ return {**normal_params, **self.model_kwargs}
+
+ def _generate(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> ChatResult:
+ if self.streaming:
+ stream_iter = self._stream(
+ messages=messages, stop=stop, run_manager=run_manager, **kwargs
+ )
+ return generate_from_stream(stream_iter)
+
+ res = self._chat(messages, **kwargs)
+ return _create_chat_result(json.loads(res.to_json_string()))
+
+ usage_metadata: dict = {}
+
+ def _stream(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ res = self._chat(messages, **kwargs)
+
+ default_chunk_class = AIMessageChunk
+ for chunk in res:
+ chunk = chunk.get("data", "")
+ if len(chunk) == 0:
+ continue
+ response = json.loads(chunk)
+ if "error" in response:
+ raise ValueError(f"Error from Hunyuan api response: {response}")
+
+ for choice in response["Choices"]:
+ chunk = _convert_delta_to_message_chunk(
+ choice["Delta"], default_chunk_class
+ )
+ default_chunk_class = chunk.__class__
+ # FinishReason === stop
+ if choice.get("FinishReason") == "stop":
+ self.usage_metadata = response.get("Usage", {})
+ cg_chunk = ChatGenerationChunk(message=chunk)
+ if run_manager:
+ run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
+ yield cg_chunk
+
+ def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> Any:
+ if self.hunyuan_secret_key is None:
+ raise ValueError("Hunyuan secret key is not set.")
+
+ try:
+ from tencentcloud.common import credential
+ from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
+ except ImportError:
+ raise ImportError(
+ "Could not import tencentcloud python package. "
+ "Please install it with `pip install tencentcloud-sdk-python`."
+ )
+
+ parameters = {**self._default_params, **kwargs}
+ cred = credential.Credential(
+ self.hunyuan_secret_id, str(self.hunyuan_secret_key.get_secret_value())
+ )
+ client = hunyuan_client.HunyuanClient(cred, "")
+ req = models.ChatCompletionsRequest()
+ params = {
+ "Messages": [_convert_message_to_dict(m) for m in messages],
+ **parameters,
+ }
+ req.from_json_string(json.dumps(params))
+ resp = client.ChatCompletions(req)
+ return resp
+
+ @property
+ def _llm_type(self) -> str:
+ return "hunyuan-chat"
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py
new file mode 100644
index 00000000000..6800cdd567c
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py
@@ -0,0 +1,20 @@
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return TencentVision(
+ model_name=model_name,
+ openai_api_base='https://api.hunyuan.cloud.tencent.com/v1',
+ openai_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/llm.py b/apps/setting/models_provider/impl/tencent_model_provider/model/llm.py
new file mode 100644
index 00000000000..d462cb7af19
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/model/llm.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+
+from typing import List, Dict, Optional, Any
+
+from langchain_core.messages import BaseMessage
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan
+
+
+class TencentModel(MaxKBBaseModel, ChatHunyuan):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ def __init__(self, model_name: str, credentials: Dict[str, str], streaming: bool = False, **kwargs):
+ hunyuan_app_id = credentials.get('hunyuan_app_id')
+ hunyuan_secret_id = credentials.get('hunyuan_secret_id')
+ hunyuan_secret_key = credentials.get('hunyuan_secret_key')
+
+ optional_params = MaxKBBaseModel.filter_optional_params(kwargs)
+
+ if not all([hunyuan_app_id, hunyuan_secret_id, hunyuan_secret_key]):
+ raise ValueError(
+ "All of 'hunyuan_app_id', 'hunyuan_secret_id', and 'hunyuan_secret_key' must be provided in credentials.")
+
+ super().__init__(model=model_name, hunyuan_app_id=hunyuan_app_id, hunyuan_secret_id=hunyuan_secret_id,
+ hunyuan_secret_key=hunyuan_secret_key, streaming=streaming,
+ temperature=optional_params.get('temperature', 1.0)
+ )
+
+ @staticmethod
+ def new_instance(model_type: str, model_name: str, model_credential: Dict[str, object],
+ **model_kwargs) -> 'TencentModel':
+ streaming = model_kwargs.pop('streaming', False)
+ return TencentModel(model_name=model_name, credentials=model_credential, streaming=streaming, **model_kwargs)
+
+ def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
+ return self.usage_metadata
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ return self.usage_metadata.get('PromptTokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ return self.usage_metadata.get('CompletionTokens', 0)
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py b/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py
new file mode 100644
index 00000000000..c2e671e4930
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+
+import json
+from typing import Dict
+
+from django.utils.translation import gettext as _
+from tencentcloud.common import credential
+from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
+from tencentcloud.common.profile.client_profile import ClientProfile
+from tencentcloud.common.profile.http_profile import HttpProfile
+from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+from setting.models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan
+
+
+class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage):
+ hunyuan_secret_id: str
+ hunyuan_secret_key: str
+ model: str
+ params: dict
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.hunyuan_secret_id = kwargs.get('hunyuan_secret_id')
+ self.hunyuan_secret_key = kwargs.get('hunyuan_secret_key')
+ self.model = kwargs.get('model_name')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type: str, model_name: str, model_credential: Dict[str, object],
+ **model_kwargs) -> 'TencentTextToImageModel':
+ optional_params = {'params': {'Style': '201', 'Resolution': '768:768'}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return TencentTextToImageModel(
+ model=model_name,
+ hunyuan_secret_id=model_credential.get('hunyuan_secret_id'),
+ hunyuan_secret_key=model_credential.get('hunyuan_secret_key'),
+ **optional_params
+ )
+
+ def check_auth(self):
+ chat = ChatHunyuan(hunyuan_app_id='111111',
+ hunyuan_secret_id=self.hunyuan_secret_id,
+ hunyuan_secret_key=self.hunyuan_secret_key,
+ model="hunyuan-standard")
+ res = chat.invoke(_('Hello'))
+ # print(res)
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ try:
+ # 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密
+ # 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305
+ # 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取
+ cred = credential.Credential(self.hunyuan_secret_id, self.hunyuan_secret_key)
+ # 实例化一个http选项,可选的,没有特殊需求可以跳过
+ httpProfile = HttpProfile()
+ httpProfile.endpoint = "hunyuan.tencentcloudapi.com"
+
+ # 实例化一个client选项,可选的,没有特殊需求可以跳过
+ clientProfile = ClientProfile()
+ clientProfile.httpProfile = httpProfile
+ # 实例化要请求产品的client对象,clientProfile是可选的
+ client = hunyuan_client.HunyuanClient(cred, "ap-guangzhou", clientProfile)
+
+ # 实例化一个请求对象,每个接口都会对应一个request对象
+ req = models.TextToImageLiteRequest()
+ params = {
+ "Prompt": prompt,
+ "NegativePrompt": negative_prompt,
+ "RspImgType": "url",
+ **self.params
+ }
+ req.from_json_string(json.dumps(params))
+
+ # 返回的resp是一个TextToImageLiteResponse的实例,与请求对象对应
+ resp = client.TextToImageLite(req)
+ # 输出json格式的字符串回包
+ print(resp.to_json_string())
+ file_urls = []
+
+ file_urls.append(resp.ResultImage)
+ return file_urls
+ except TencentCloudSDKException as err:
+ print(err)
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py b/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py
new file mode 100644
index 00000000000..e6a1c16fd4c
--- /dev/null
+++ b/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+import os
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import (
+ IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, ModelInfoManage
+)
+from setting.models_provider.impl.tencent_model_provider.credential.embedding import TencentEmbeddingCredential
+from setting.models_provider.impl.tencent_model_provider.credential.image import TencentVisionModelCredential
+from setting.models_provider.impl.tencent_model_provider.credential.llm import TencentLLMModelCredential
+from setting.models_provider.impl.tencent_model_provider.credential.tti import TencentTTIModelCredential
+from setting.models_provider.impl.tencent_model_provider.model.embedding import TencentEmbeddingModel
+from setting.models_provider.impl.tencent_model_provider.model.image import TencentVision
+from setting.models_provider.impl.tencent_model_provider.model.llm import TencentModel
+from setting.models_provider.impl.tencent_model_provider.model.tti import TencentTextToImageModel
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+def _create_model_info(model_name, description, model_type, credential_class, model_class):
+ return ModelInfo(
+ name=model_name,
+ desc=description,
+ model_type=model_type,
+ model_credential=credential_class(),
+ model_class=model_class
+ )
+
+
+def _get_tencent_icon_path():
+ return os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'tencent_model_provider',
+ 'icon', 'tencent_icon_svg')
+
+
+def _initialize_model_info():
+ model_info_list = [_create_model_info(
+ 'hunyuan-pro',
+ _('The most effective version of the current hybrid model, the trillion-level parameter scale MOE-32K long article model. Reaching the absolute leading level on various benchmarks, with complex instructions and reasoning, complex mathematical capabilities, support for function call, and application focus optimization in fields such as multi-language translation, finance, law, and medical care'),
+ ModelTypeConst.LLM,
+ TencentLLMModelCredential,
+ TencentModel
+ ),
+ _create_model_info(
+ 'hunyuan-standard',
+ _('A better routing strategy is adopted to simultaneously alleviate the problems of load balancing and expert convergence. For long articles, the needle-in-a-haystack index reaches 99.9%'),
+ ModelTypeConst.LLM,
+ TencentLLMModelCredential,
+ TencentModel),
+ _create_model_info(
+ 'hunyuan-lite',
+ _('Upgraded to MOE structure, the context window is 256k, leading many open source models in multiple evaluation sets such as NLP, code, mathematics, industry, etc.'),
+ ModelTypeConst.LLM,
+ TencentLLMModelCredential,
+ TencentModel),
+ _create_model_info(
+ 'hunyuan-role',
+ _("Hunyuan's latest version of the role-playing model, a role-playing model launched by Hunyuan's official fine-tuning training, is based on the Hunyuan model combined with the role-playing scene data set for additional training, and has better basic effects in role-playing scenes."),
+ ModelTypeConst.LLM,
+ TencentLLMModelCredential,
+ TencentModel),
+ _create_model_info(
+ 'hunyuan-functioncall',
+ _("Hunyuan's latest MOE architecture FunctionCall model has been trained with high-quality FunctionCall data and has a context window of 32K, leading in multiple dimensions of evaluation indicators."),
+ ModelTypeConst.LLM,
+ TencentLLMModelCredential,
+ TencentModel),
+ _create_model_info(
+ 'hunyuan-code',
+ _("Hunyuan's latest code generation model, after training the base model with 200B high-quality code data, and iterating on high-quality SFT data for half a year, the context long window length has been increased to 8K, and it ranks among the top in the automatic evaluation indicators of code generation in the five major languages; the five major languages In the manual high-quality evaluation of 10 comprehensive code tasks that consider all aspects, the performance is in the first echelon."),
+ ModelTypeConst.LLM,
+ TencentLLMModelCredential,
+ TencentModel),
+ ]
+
+ tencent_embedding_model_info = _create_model_info(
+ 'hunyuan-embedding',
+ _("Tencent's Hunyuan Embedding interface can convert text into high-quality vector data. The vector dimension is 1024 dimensions."),
+ ModelTypeConst.EMBEDDING,
+ TencentEmbeddingCredential,
+ TencentEmbeddingModel
+ )
+
+ model_info_embedding_list = [tencent_embedding_model_info]
+
+ model_info_vision_list = [_create_model_info(
+ 'hunyuan-vision',
+ _('Mixed element visual model'),
+ ModelTypeConst.IMAGE,
+ TencentVisionModelCredential,
+ TencentVision)]
+
+ model_info_tti_list = [_create_model_info(
+ 'hunyuan-dit',
+ _('Hunyuan graph model'),
+ ModelTypeConst.TTI,
+ TencentTTIModelCredential,
+ TencentTextToImageModel)]
+
+ model_info_manage = ModelInfoManage.builder() \
+ .append_model_info_list(model_info_list) \
+ .append_model_info_list(model_info_embedding_list) \
+ .append_model_info_list(model_info_vision_list) \
+ .append_default_model_info(model_info_vision_list[0]) \
+ .append_model_info_list(model_info_tti_list) \
+ .append_default_model_info(model_info_tti_list[0]) \
+ .append_default_model_info(model_info_list[0]) \
+ .append_default_model_info(tencent_embedding_model_info) \
+ .build()
+
+ return model_info_manage
+
+
+class TencentModelProvider(IModelProvider):
+ def __init__(self):
+ self._model_info_manage = _initialize_model_info()
+
+ def get_model_info_manage(self):
+ return self._model_info_manage
+
+ def get_model_provide_info(self):
+ icon_path = _get_tencent_icon_path()
+ icon_data = get_file_content(icon_path)
+ return ModelProvideInfo(
+ provider='model_tencent_provider',
+ name=_('Tencent Hunyuan'),
+ icon=icon_data
+ )
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/__init__.py b/apps/setting/models_provider/impl/vllm_model_provider/__init__.py
new file mode 100644
index 00000000000..9bad5790a57
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/__init__.py
@@ -0,0 +1 @@
+# coding=utf-8
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..3ebccd20d53
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 16:45
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VllmEmbeddingCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py
new file mode 100644
index 00000000000..68674fc0b5a
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/image.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VllmImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class VllmImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return VllmImageModelParams()
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py
new file mode 100644
index 00000000000..6700b756d90
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py
@@ -0,0 +1,73 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class VLLMModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'))
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid'))
+ exist = provider.get_model_info_by_name(model_list, model_name)
+ if len(exist) == 0:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('The model does not exist, please download the model first'))
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ try:
+ res = model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))}
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['api_key', 'model']:
+ if key not in model_info:
+ raise AppApiException(500, gettext('{key} is required').format(key=key))
+ self.api_key = model_info.get('api_key')
+ return self
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return VLLMModelParams()
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/icon/vllm_icon_svg b/apps/setting/models_provider/impl/vllm_model_provider/icon/vllm_icon_svg
new file mode 100644
index 00000000000..1ad7d0a6db1
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/icon/vllm_icon_svg
@@ -0,0 +1,5 @@
+
+
+
+
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/embedding.py b/apps/setting/models_provider/impl/vllm_model_provider/model/embedding.py
new file mode 100644
index 00000000000..616d9d9eee3
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/model/embedding.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 17:44
+ @desc:
+"""
+from typing import Dict
+
+from langchain_community.embeddings import OpenAIEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class VllmEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return VllmEmbeddingModel(
+ model=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_base=model_credential.get('api_base'),
+ )
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/image.py b/apps/setting/models_provider/impl/vllm_model_provider/model/image.py
new file mode 100644
index 00000000000..c8cb0a84db9
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/model/image.py
@@ -0,0 +1,38 @@
+from typing import Dict, List
+
+from langchain_core.messages import get_buffer_string, BaseMessage
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class VllmImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return VllmImage(
+ model_name=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+ return self.usage_metadata.get('input_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+ return self.get_last_generation_info().get('output_tokens', 0)
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py
new file mode 100644
index 00000000000..4662a616965
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py
@@ -0,0 +1,57 @@
+# coding=utf-8
+
+from typing import Dict, Optional, Sequence, Union, Any, Callable
+from urllib.parse import urlparse, ParseResult
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_core.tools import BaseTool
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def get_base_url(url: str):
+ parse = urlparse(url)
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
+
+
+class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ vllm_chat_open_ai = VllmChatModel(
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
+ return vllm_chat_open_ai
+
+ def get_num_tokens_from_messages(
+ self,
+ messages: list[BaseMessage],
+ tools: Optional[
+ Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
+ ] = None,
+ ) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+ return self.usage_metadata.get('input_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+ return self.get_last_generation_info().get('output_tokens', 0)
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py b/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py
new file mode 100644
index 00000000000..7dc6664a088
--- /dev/null
+++ b/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py
@@ -0,0 +1,84 @@
+# coding=utf-8
+import os
+from urllib.parse import urlparse, ParseResult
+
+import requests
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
+ ModelInfoManage
+from setting.models_provider.impl.vllm_model_provider.credential.embedding import VllmEmbeddingCredential
+from setting.models_provider.impl.vllm_model_provider.credential.image import VllmImageModelCredential
+from setting.models_provider.impl.vllm_model_provider.credential.llm import VLLMModelCredential
+from setting.models_provider.impl.vllm_model_provider.model.embedding import VllmEmbeddingModel
+from setting.models_provider.impl.vllm_model_provider.model.image import VllmImage
+from setting.models_provider.impl.vllm_model_provider.model.llm import VllmChatModel
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+v_llm_model_credential = VLLMModelCredential()
+image_model_credential = VllmImageModelCredential()
+embedding_model_credential = VllmEmbeddingCredential()
+
+model_info_list = [
+ ModelInfo('facebook/opt-125m', _('Facebook’s 125M parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel),
+ ModelInfo('BAAI/Aquila-7B', _('BAAI’s 7B parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel),
+ ModelInfo('BAAI/AquilaChat-7B', _('BAAI’s 13B parameter mode'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel),
+
+]
+
+image_model_info_list = [
+ ModelInfo('Qwen/Qwen2-VL-2B-Instruct', '', ModelTypeConst.IMAGE, image_model_credential, VllmImage),
+]
+
+embedding_model_info_list = [
+ ModelInfo('HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5', '', ModelTypeConst.EMBEDDING, embedding_model_credential, VllmEmbeddingModel),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(ModelInfo('facebook/opt-125m',
+ _('Facebook’s 125M parameter model'),
+ ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel))
+ .append_model_info_list(image_model_info_list)
+ .append_default_model_info(image_model_info_list[0])
+ .append_model_info_list(embedding_model_info_list)
+ .append_default_model_info(embedding_model_info_list[0])
+ .build()
+)
+
+
+def get_base_url(url: str):
+ parse = urlparse(url)
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
+
+
+class VllmModelProvider(IModelProvider):
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_vllm_provider', name='vLLM', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'vllm_model_provider', 'icon',
+ 'vllm_icon_svg')))
+
+ @staticmethod
+ def get_base_model_list(api_base, api_key):
+ base_url = get_base_url(api_base)
+ base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1')
+ headers = {}
+ if api_key:
+ headers['Authorization'] = f"Bearer {api_key}"
+ r = requests.request(method="GET", url=f"{base_url}/models", headers=headers, timeout=5)
+ r.raise_for_status()
+ return r.json().get('data')
+
+ @staticmethod
+ def get_model_info_by_name(model_list, model_name):
+ if model_list is None:
+ return []
+ return [model for model in model_list if model.get('id') == model_name]
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/__init__.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/__init__.py
new file mode 100644
index 00000000000..8cb7f459eae
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/__init__.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..40cb8baee38
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/7/12 16:45
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VolcanicEmbeddingCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py
new file mode 100644
index 00000000000..23d22cf932c
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VolcanicEngineImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.95,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class VolcanicEngineImageModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField('API Key', required=True)
+ api_base = forms.TextInputField('API URL', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key', 'api_base']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return VolcanicEngineImageModelParams()
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py
new file mode 100644
index 00000000000..3b7734da5b0
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py
@@ -0,0 +1,79 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/11 17:57
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VolcanicEngineLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.3,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class VolcanicEngineLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['access_key_id', 'secret_access_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.invoke([HumanMessage(content=gettext('Hello'))])
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'access_key_id': super().encryption(model.get('access_key_id', ''))}
+
+ access_key_id = forms.PasswordInputField('Access Key ID', required=True)
+ secret_access_key = forms.PasswordInputField('Secret Access Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return VolcanicEngineLLMModelParams()
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py
new file mode 100644
index 00000000000..6aae433c373
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py
@@ -0,0 +1,52 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VolcanicEngineSTTModelCredential(BaseForm, BaseModelCredential):
+ volcanic_api_url = forms.TextInputField('API URL', required=True,
+ default_value='wss://openspeech.bytedance.com/api/v2/asr')
+ volcanic_app_id = forms.TextInputField('App ID', required=True)
+ volcanic_token = forms.PasswordInputField('Access Token', required=True)
+ volcanic_cluster = forms.TextInputField('Cluster ID', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['volcanic_api_url', 'volcanic_app_id', 'volcanic_token', 'volcanic_cluster']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'volcanic_token': super().encryption(model.get('volcanic_token', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py
new file mode 100644
index 00000000000..98c119e21cb
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VolcanicEngineTTIModelGeneralParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'),
+ _('If the gap between width, height and 512 is too large, the picture rendering effect will be poor and the probability of excessive delay will increase significantly. Recommended ratio and corresponding width and height before super score: width*height')),
+ required=True,
+ default_value='512*512',
+ option_list=[
+ {'value': '512*512', 'label': '512*512'},
+ {'value': '512*384', 'label': '512*384'},
+ {'value': '384*512', 'label': '384*512'},
+ {'value': '512*341', 'label': '512*341'},
+ {'value': '341*512', 'label': '341*512'},
+ {'value': '512*288', 'label': '512*288'},
+ {'value': '288*512', 'label': '288*512'},
+ ],
+ text_field='label',
+ value_field='value')
+
+
+class VolcanicEngineTTIModelCredential(BaseForm, BaseModelCredential):
+ access_key = forms.PasswordInputField('Access Key ID', required=True)
+ secret_key = forms.PasswordInputField('Secret Access Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['access_key', 'secret_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'secret_key': super().encryption(model.get('secret_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return VolcanicEngineTTIModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py
new file mode 100644
index 00000000000..4d0b68363ff
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class VolcanicEngineTTSModelGeneralParams(BaseForm):
+ voice_type = forms.SingleSelect(
+ TooltipLabel(_('timbre'), _('Chinese sounds can support mixed scenes of Chinese and English')),
+ required=True, default_value='zh_female_cancan_mars_bigtts',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': '灿灿/Shiny', 'value': 'zh_female_cancan_mars_bigtts'},
+ {'text': '清新女声', 'value': 'zh_female_qingxinnvsheng_mars_bigtts'},
+ {'text': '爽快思思/Skye', 'value': 'zh_female_shuangkuaisisi_moon_bigtts'},
+ {'text': '湾区大叔', 'value': 'zh_female_wanqudashu_moon_bigtts' },
+ {'text': '呆萌川妹', 'value': 'zh_female_daimengchuanmei_moon_bigtts'},
+ {'text': '广州德哥', 'value': 'zh_male_guozhoudege_moon_bigtts'},
+ {'text': '北京小爷', 'value': 'zh_male_beijingxiaoye_moon_bigtts'},
+ {'text': '少年梓辛/Brayan', 'value': 'zh_male_shaonianzixin_moon_bigtts'},
+ {'text': '魅力女友', 'value': 'zh_female_meilinvyou_moon_bigtts'},
+ ])
+ speed_ratio = forms.SliderField(
+ TooltipLabel(_('speaking speed'), _('[0.2,3], the default is 1, usually one decimal place is enough')),
+ required=True, default_value=1,
+ _min=0.2,
+ _max=3,
+ _step=0.1,
+ precision=1)
+
+
+class VolcanicEngineTTSModelCredential(BaseForm, BaseModelCredential):
+ volcanic_api_url = forms.TextInputField('API URL', required=True,
+ default_value='wss://openspeech.bytedance.com/api/v1/tts/ws_binary')
+ volcanic_app_id = forms.TextInputField('App ID', required=True)
+ volcanic_token = forms.PasswordInputField('Access Token', required=True)
+ volcanic_cluster = forms.TextInputField('Cluster ID', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['volcanic_api_url', 'volcanic_app_id', 'volcanic_token', 'volcanic_cluster']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'volcanic_token': super().encryption(model.get('volcanic_token', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return VolcanicEngineTTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/icon/volcanic_engine_icon_svg b/apps/setting/models_provider/impl/volcanic_engine_model_provider/icon/volcanic_engine_icon_svg
new file mode 100644
index 00000000000..05a1279ef4d
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/icon/volcanic_engine_icon_svg
@@ -0,0 +1,5 @@
+
+
+
+
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/embedding.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/embedding.py
new file mode 100644
index 00000000000..b950beacf34
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/embedding.py
@@ -0,0 +1,16 @@
+from typing import Dict
+
+from langchain_openai import OpenAIEmbeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class VolcanicEngineEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return VolcanicEngineEmbeddingModel(
+ openai_api_key=model_credential.get('api_key'),
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ check_embedding_ctx_length=False,
+ )
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3
new file mode 100644
index 00000000000..75e744c8ff5
Binary files /dev/null and b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 differ
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py
new file mode 100644
index 00000000000..6e2517bd4ad
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py
@@ -0,0 +1,20 @@
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return VolcanicEngineImage(
+ model_name=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_base=model_credential.get('api_base'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py
new file mode 100644
index 00000000000..8f089f26988
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py
@@ -0,0 +1,21 @@
+from typing import List, Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return VolcanicEngineChatModel(
+ model=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/stt.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/stt.py
new file mode 100644
index 00000000000..c441bee8ed3
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/stt.py
@@ -0,0 +1,343 @@
+# coding=utf-8
+
+"""
+requires Python 3.6 or later
+
+pip install asyncio
+pip install websockets
+"""
+import asyncio
+import base64
+import gzip
+import hmac
+import json
+import os
+import ssl
+import uuid
+import wave
+from hashlib import sha256
+from io import BytesIO
+from typing import Dict
+from urllib.parse import urlparse
+
+import websockets
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+audio_format = "mp3" # wav 或者 mp3,根据实际音频格式设置
+
+PROTOCOL_VERSION = 0b0001
+DEFAULT_HEADER_SIZE = 0b0001
+
+PROTOCOL_VERSION_BITS = 4
+HEADER_BITS = 4
+MESSAGE_TYPE_BITS = 4
+MESSAGE_TYPE_SPECIFIC_FLAGS_BITS = 4
+MESSAGE_SERIALIZATION_BITS = 4
+MESSAGE_COMPRESSION_BITS = 4
+RESERVED_BITS = 8
+
+# Message Type:
+CLIENT_FULL_REQUEST = 0b0001
+CLIENT_AUDIO_ONLY_REQUEST = 0b0010
+SERVER_FULL_RESPONSE = 0b1001
+SERVER_ACK = 0b1011
+SERVER_ERROR_RESPONSE = 0b1111
+
+# Message Type Specific Flags
+NO_SEQUENCE = 0b0000 # no check sequence
+POS_SEQUENCE = 0b0001
+NEG_SEQUENCE = 0b0010
+NEG_SEQUENCE_1 = 0b0011
+
+# Message Serialization
+NO_SERIALIZATION = 0b0000
+JSON = 0b0001
+THRIFT = 0b0011
+CUSTOM_TYPE = 0b1111
+
+# Message Compression
+NO_COMPRESSION = 0b0000
+GZIP = 0b0001
+CUSTOM_COMPRESSION = 0b1111
+
+ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ssl_context.check_hostname = False
+ssl_context.verify_mode = ssl.CERT_NONE
+
+
+def generate_header(
+ version=PROTOCOL_VERSION,
+ message_type=CLIENT_FULL_REQUEST,
+ message_type_specific_flags=NO_SEQUENCE,
+ serial_method=JSON,
+ compression_type=GZIP,
+ reserved_data=0x00,
+ extension_header=bytes()
+):
+ """
+ protocol_version(4 bits), header_size(4 bits),
+ message_type(4 bits), message_type_specific_flags(4 bits)
+ serialization_method(4 bits) message_compression(4 bits)
+ reserved (8bits) 保留字段
+ header_extensions 扩展头(大小等于 8 * 4 * (header_size - 1) )
+ """
+ header = bytearray()
+ header_size = int(len(extension_header) / 4) + 1
+ header.append((version << 4) | header_size)
+ header.append((message_type << 4) | message_type_specific_flags)
+ header.append((serial_method << 4) | compression_type)
+ header.append(reserved_data)
+ header.extend(extension_header)
+ return header
+
+
+def generate_full_default_header():
+ return generate_header()
+
+
+def generate_audio_default_header():
+ return generate_header(
+ message_type=CLIENT_AUDIO_ONLY_REQUEST
+ )
+
+
+def generate_last_audio_default_header():
+ return generate_header(
+ message_type=CLIENT_AUDIO_ONLY_REQUEST,
+ message_type_specific_flags=NEG_SEQUENCE
+ )
+
+
+def parse_response(res):
+ """
+ protocol_version(4 bits), header_size(4 bits),
+ message_type(4 bits), message_type_specific_flags(4 bits)
+ serialization_method(4 bits) message_compression(4 bits)
+ reserved (8bits) 保留字段
+ header_extensions 扩展头(大小等于 8 * 4 * (header_size - 1) )
+ payload 类似与http 请求体
+ """
+ protocol_version = res[0] >> 4
+ header_size = res[0] & 0x0f
+ message_type = res[1] >> 4
+ message_type_specific_flags = res[1] & 0x0f
+ serialization_method = res[2] >> 4
+ message_compression = res[2] & 0x0f
+ reserved = res[3]
+ header_extensions = res[4:header_size * 4]
+ payload = res[header_size * 4:]
+ result = {}
+ payload_msg = None
+ payload_size = 0
+ if message_type == SERVER_FULL_RESPONSE:
+ payload_size = int.from_bytes(payload[:4], "big", signed=True)
+ payload_msg = payload[4:]
+ elif message_type == SERVER_ACK:
+ seq = int.from_bytes(payload[:4], "big", signed=True)
+ result['seq'] = seq
+ if len(payload) >= 8:
+ payload_size = int.from_bytes(payload[4:8], "big", signed=False)
+ payload_msg = payload[8:]
+ elif message_type == SERVER_ERROR_RESPONSE:
+ code = int.from_bytes(payload[:4], "big", signed=False)
+ result['code'] = code
+ payload_size = int.from_bytes(payload[4:8], "big", signed=False)
+ payload_msg = payload[8:]
+ print(f"Error code: {code}, message: {payload_msg}")
+ if payload_msg is None:
+ return result
+ if message_compression == GZIP:
+ payload_msg = gzip.decompress(payload_msg)
+ if serialization_method == JSON:
+ payload_msg = json.loads(str(payload_msg, "utf-8"))
+ elif serialization_method != NO_SERIALIZATION:
+ payload_msg = str(payload_msg, "utf-8")
+ result['payload_msg'] = payload_msg
+ result['payload_size'] = payload_size
+ return result
+
+
+def read_wav_info(data: bytes = None) -> (int, int, int, int, int):
+ with BytesIO(data) as _f:
+ wave_fp = wave.open(_f, 'rb')
+ nchannels, sampwidth, framerate, nframes = wave_fp.getparams()[:4]
+ wave_bytes = wave_fp.readframes(nframes)
+ return nchannels, sampwidth, framerate, nframes, len(wave_bytes)
+
+
+class VolcanicEngineSpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ workflow: str = "audio_in,resample,partition,vad,fe,decode,itn,nlu_punctuate"
+ show_language: bool = False
+ show_utterances: bool = False
+ result_type: str = "full"
+ format: str = "mp3"
+ rate: int = 16000
+ language: str = "zh-CN"
+ bits: int = 16
+ channel: int = 1
+ codec: str = "raw"
+ audio_type: int = 1
+ secret: str = "access_secret"
+ auth_method: str = "token"
+ mp3_seg_size: int = 10000
+ success_code: int = 1000 # success code, default is 1000
+ seg_duration: int = 15000
+ nbest: int = 1
+
+ volcanic_app_id: str
+ volcanic_cluster: str
+ volcanic_api_url: str
+ volcanic_token: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.volcanic_api_url = kwargs.get('volcanic_api_url')
+ self.volcanic_token = kwargs.get('volcanic_token')
+ self.volcanic_app_id = kwargs.get('volcanic_app_id')
+ self.volcanic_cluster = kwargs.get('volcanic_cluster')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return VolcanicEngineSpeechToText(
+ volcanic_api_url=model_credential.get('volcanic_api_url'),
+ volcanic_token=model_credential.get('volcanic_token'),
+ volcanic_app_id=model_credential.get('volcanic_app_id'),
+ volcanic_cluster=model_credential.get('volcanic_cluster'),
+ **optional_params
+ )
+
+ def construct_request(self, reqid):
+ req = {
+ 'app': {
+ 'appid': self.volcanic_app_id,
+ 'cluster': self.volcanic_cluster,
+ 'token': self.volcanic_token,
+ },
+ 'user': {
+ 'uid': 'uid'
+ },
+ 'request': {
+ 'reqid': reqid,
+ 'nbest': self.nbest,
+ 'workflow': self.workflow,
+ 'show_language': self.show_language,
+ 'show_utterances': self.show_utterances,
+ 'result_type': self.result_type,
+ "sequence": 1
+ },
+ 'audio': {
+ 'format': self.format,
+ 'rate': self.rate,
+ 'language': self.language,
+ 'bits': self.bits,
+ 'channel': self.channel,
+ 'codec': self.codec
+ }
+ }
+ return req
+
+ @staticmethod
+ def slice_data(data: bytes, chunk_size: int) -> (list, bool):
+ """
+ slice data
+ :param data: wav data
+ :param chunk_size: the segment size in one request
+ :return: segment data, last flag
+ """
+ data_len = len(data)
+ offset = 0
+ while offset + chunk_size < data_len:
+ yield data[offset: offset + chunk_size], False
+ offset += chunk_size
+ else:
+ yield data[offset: data_len], True
+
+ def _real_processor(self, request_params: dict) -> dict:
+ pass
+
+ def token_auth(self):
+ return {'Authorization': 'Bearer; {}'.format(self.volcanic_token)}
+
+ def signature_auth(self, data):
+ header_dicts = {
+ 'Custom': 'auth_custom',
+ }
+
+ url_parse = urlparse(self.volcanic_api_url)
+ input_str = 'GET {} HTTP/1.1\n'.format(url_parse.path)
+ auth_headers = 'Custom'
+ for header in auth_headers.split(','):
+ input_str += '{}\n'.format(header_dicts[header])
+ input_data = bytearray(input_str, 'utf-8')
+ input_data += data
+ mac = base64.urlsafe_b64encode(
+ hmac.new(self.secret.encode('utf-8'), input_data, digestmod=sha256).digest())
+ header_dicts['Authorization'] = 'HMAC256; access_token="{}"; mac="{}"; h="{}"'.format(self.volcanic_token,
+ str(mac, 'utf-8'),
+ auth_headers)
+ return header_dicts
+
+ async def segment_data_processor(self, wav_data: bytes, segment_size: int):
+ reqid = str(uuid.uuid4())
+ # 构建 full client request,并序列化压缩
+ request_params = self.construct_request(reqid)
+ payload_bytes = str.encode(json.dumps(request_params))
+ payload_bytes = gzip.compress(payload_bytes)
+ full_client_request = bytearray(generate_full_default_header())
+ full_client_request.extend((len(payload_bytes)).to_bytes(4, 'big')) # payload size(4 bytes)
+ full_client_request.extend(payload_bytes) # payload
+ header = None
+ if self.auth_method == "token":
+ header = self.token_auth()
+ elif self.auth_method == "signature":
+ header = self.signature_auth(full_client_request)
+ async with websockets.connect(self.volcanic_api_url, extra_headers=header, max_size=1000000000,
+ ssl=ssl_context) as ws:
+ # 发送 full client request
+ await ws.send(full_client_request)
+ res = await ws.recv()
+ result = parse_response(res)
+ if 'payload_msg' in result and result['payload_msg']['code'] != self.success_code:
+ raise Exception(
+ f"Error code: {result['payload_msg']['code']}, message: {result['payload_msg']['message']}")
+ for seq, (chunk, last) in enumerate(VolcanicEngineSpeechToText.slice_data(wav_data, segment_size), 1):
+ # if no compression, comment this line
+ payload_bytes = gzip.compress(chunk)
+ audio_only_request = bytearray(generate_audio_default_header())
+ if last:
+ audio_only_request = bytearray(generate_last_audio_default_header())
+ audio_only_request.extend((len(payload_bytes)).to_bytes(4, 'big')) # payload size(4 bytes)
+ audio_only_request.extend(payload_bytes) # payload
+ # 发送 audio-only client request
+ await ws.send(audio_only_request)
+ res = await ws.recv()
+ result = parse_response(res)
+ if 'payload_msg' in result and result['payload_msg']['code'] != self.success_code:
+ return result
+ return result['payload_msg']['result'][0]['text']
+
+ def check_auth(self):
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f:
+ self.speech_to_text(f)
+
+ def speech_to_text(self, file):
+ data = file.read()
+ audio_data = bytes(data)
+ if self.format == "mp3":
+ segment_size = self.mp3_seg_size
+ return asyncio.run(self.segment_data_processor(audio_data, segment_size))
+ if self.format != "wav":
+ raise Exception("format should in wav or mp3")
+ nchannels, sampwidth, framerate, nframes, wav_len = read_wav_info(
+ audio_data)
+ size_per_sec = nchannels * sampwidth * framerate
+ segment_size = int(size_per_sec * self.seg_duration / 1000)
+ return asyncio.run(self.segment_data_processor(audio_data, segment_size))
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tti.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tti.py
new file mode 100644
index 00000000000..dd021c64320
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tti.py
@@ -0,0 +1,172 @@
+# coding=utf-8
+
+'''
+requires Python 3.6 or later
+
+pip install asyncio
+pip install websockets
+
+'''
+
+import datetime
+import hashlib
+import hmac
+import json
+import sys
+from typing import Dict
+
+import requests
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+method = 'POST'
+host = 'visual.volcengineapi.com'
+region = 'cn-north-1'
+endpoint = 'https://visual.volcengineapi.com'
+service = 'cv'
+
+req_key_dict = {
+ 'general_v1.4': 'high_aes_general_v14',
+ 'general_v2.0': 'high_aes_general_v20',
+ 'general_v2.0_L': 'high_aes_general_v20_L',
+ 'anime_v1.3': 'high_aes',
+ 'anime_v1.3.1': 'high_aes',
+}
+
+
+def sign(key, msg):
+ return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
+
+
+def getSignatureKey(key, dateStamp, regionName, serviceName):
+ kDate = sign(key.encode('utf-8'), dateStamp)
+ kRegion = sign(kDate, regionName)
+ kService = sign(kRegion, serviceName)
+ kSigning = sign(kService, 'request')
+ return kSigning
+
+
+def formatQuery(parameters):
+ request_parameters_init = ''
+ for key in sorted(parameters):
+ request_parameters_init += key + '=' + parameters[key] + '&'
+ request_parameters = request_parameters_init[:-1]
+ return request_parameters
+
+
+def signV4Request(access_key, secret_key, service, req_query, req_body):
+ if access_key is None or secret_key is None:
+ print('No access key is available.')
+ sys.exit()
+
+ t = datetime.datetime.utcnow()
+ current_date = t.strftime('%Y%m%dT%H%M%SZ')
+ # current_date = '20210818T095729Z'
+ datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
+ canonical_uri = '/'
+ canonical_querystring = req_query
+ signed_headers = 'content-type;host;x-content-sha256;x-date'
+ payload_hash = hashlib.sha256(req_body.encode('utf-8')).hexdigest()
+ content_type = 'application/json'
+ canonical_headers = 'content-type:' + content_type + '\n' + 'host:' + host + \
+ '\n' + 'x-content-sha256:' + payload_hash + \
+ '\n' + 'x-date:' + current_date + '\n'
+ canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + \
+ '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
+ # print(canonical_request)
+ algorithm = 'HMAC-SHA256'
+ credential_scope = datestamp + '/' + region + '/' + service + '/' + 'request'
+ string_to_sign = algorithm + '\n' + current_date + '\n' + credential_scope + '\n' + hashlib.sha256(
+ canonical_request.encode('utf-8')).hexdigest()
+ # print(string_to_sign)
+ signing_key = getSignatureKey(secret_key, datestamp, region, service)
+ # print(signing_key)
+ signature = hmac.new(signing_key, (string_to_sign).encode(
+ 'utf-8'), hashlib.sha256).hexdigest()
+ # print(signature)
+
+ authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + \
+ credential_scope + ', ' + 'SignedHeaders=' + \
+ signed_headers + ', ' + 'Signature=' + signature
+ # print(authorization_header)
+ headers = {'X-Date': current_date,
+ 'Authorization': authorization_header,
+ 'X-Content-Sha256': payload_hash,
+ 'Content-Type': content_type
+ }
+ # print(headers)
+
+ # ************* SEND THE REQUEST *************
+ request_url = endpoint + '?' + canonical_querystring
+
+ print('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++')
+ print('Request URL = ' + request_url)
+ try:
+ r = requests.post(request_url, headers=headers, data=req_body)
+ except Exception as err:
+ print(f'error occurred: {err}')
+ raise
+ else:
+ print('\nRESPONSE++++++++++++++++++++++++++++++++++++')
+ print(f'Response code: {r.status_code}\n')
+ # 使用 replace 方法将 \u0026 替换为 &
+ resp_str = r.text.replace("\\u0026", "&")
+ if r.status_code != 200:
+ raise Exception(f'Error: {resp_str}')
+ print(f'Response body: {resp_str}\n')
+ return json.loads(resp_str)['data']['image_urls']
+
+
+class VolcanicEngineTextToImage(MaxKBBaseModel, BaseTextToImage):
+ access_key: str
+ secret_key: str
+ model_version: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.access_key = kwargs.get('access_key')
+ self.secret_key = kwargs.get('secret_key')
+ self.model_version = kwargs.get('model_version')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return VolcanicEngineTextToImage(
+ model_version=model_name,
+ access_key=model_credential.get('access_key'),
+ secret_key=model_credential.get('secret_key'),
+ **optional_params
+ )
+
+ def check_auth(self):
+ res = self.generate_image('生成一张小猫图片')
+ print(res)
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ # 请求Query,按照接口文档中填入即可
+ query_params = {
+ 'Action': 'CVProcess',
+ 'Version': '2022-08-31',
+ }
+ formatted_query = formatQuery(query_params)
+ size = self.params.pop('size', '512*512').split('*')
+ body_params = {
+ "req_key": req_key_dict[self.model_version],
+ "prompt": prompt,
+ "model_version": self.model_version,
+ "return_url": True,
+ "width": int(size[0]),
+ "height": int(size[1]),
+ **self.params
+ }
+ formatted_body = json.dumps(body_params)
+ return signV4Request(self.access_key, self.secret_key, service, formatted_query, formatted_body)
+
+ def is_cache_model(self):
+ return False
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py
new file mode 100644
index 00000000000..4c1bfcc6f33
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py
@@ -0,0 +1,182 @@
+# coding=utf-8
+
+'''
+requires Python 3.6 or later
+
+pip install asyncio
+pip install websockets
+
+'''
+
+import asyncio
+import copy
+import gzip
+import json
+import re
+import ssl
+import uuid
+from typing import Dict
+
+import websockets
+from django.utils.translation import gettext as _
+
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+
+MESSAGE_TYPES = {11: "audio-only server response", 12: "frontend server response", 15: "error message from server"}
+MESSAGE_TYPE_SPECIFIC_FLAGS = {0: "no sequence number", 1: "sequence number > 0",
+ 2: "last message from server (seq < 0)", 3: "sequence number < 0"}
+MESSAGE_SERIALIZATION_METHODS = {0: "no serialization", 1: "JSON", 15: "custom type"}
+MESSAGE_COMPRESSIONS = {0: "no compression", 1: "gzip", 15: "custom compression method"}
+
+# version: b0001 (4 bits)
+# header size: b0001 (4 bits)
+# message type: b0001 (Full client request) (4bits)
+# message type specific flags: b0000 (none) (4bits)
+# message serialization method: b0001 (JSON) (4 bits)
+# message compression: b0001 (gzip) (4bits)
+# reserved data: 0x00 (1 byte)
+default_header = bytearray(b'\x11\x10\x11\x00')
+
+ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ssl_context.check_hostname = False
+ssl_context.verify_mode = ssl.CERT_NONE
+
+
+class VolcanicEngineTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ volcanic_app_id: str
+ volcanic_cluster: str
+ volcanic_api_url: str
+ volcanic_token: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.volcanic_api_url = kwargs.get('volcanic_api_url')
+ self.volcanic_token = kwargs.get('volcanic_token')
+ self.volcanic_app_id = kwargs.get('volcanic_app_id')
+ self.volcanic_cluster = kwargs.get('volcanic_cluster')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'voice_type': 'zh_female_cancan_mars_bigtts', 'speed_ratio': 1.0}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return VolcanicEngineTextToSpeech(
+ volcanic_api_url=model_credential.get('volcanic_api_url'),
+ volcanic_token=model_credential.get('volcanic_token'),
+ volcanic_app_id=model_credential.get('volcanic_app_id'),
+ volcanic_cluster=model_credential.get('volcanic_cluster'),
+ **optional_params
+ )
+
+ def check_auth(self):
+ self.text_to_speech(_('Hello'))
+
+ def text_to_speech(self, text):
+ request_json = {
+ "app": {
+ "appid": self.volcanic_app_id,
+ "token": "access_token",
+ "cluster": self.volcanic_cluster
+ },
+ "user": {
+ "uid": "uid"
+ },
+ "audio": {
+ "encoding": "mp3",
+ "volume_ratio": 1.0,
+ "pitch_ratio": 1.0,
+ } | self.params,
+ "request": {
+ "reqid": str(uuid.uuid4()),
+ "text": '',
+ "text_type": "plain",
+ "operation": "xxx"
+ }
+ }
+ text = _remove_empty_lines(text)
+
+ return asyncio.run(self.submit(request_json, text))
+
+ def is_cache_model(self):
+ return False
+
+ def token_auth(self):
+ return {'Authorization': 'Bearer; {}'.format(self.volcanic_token)}
+
+ async def submit(self, request_json, text):
+ submit_request_json = copy.deepcopy(request_json)
+ submit_request_json["request"]["operation"] = "submit"
+ header = {"Authorization": f"Bearer; {self.volcanic_token}"}
+ result = b''
+ async with websockets.connect(self.volcanic_api_url, extra_headers=header, ping_interval=None,
+ ssl=ssl_context) as ws:
+ lines = [text[i:i + 200] for i in range(0, len(text), 200)]
+ for line in lines:
+ if self.is_table_format_chars_only(line):
+ continue
+ submit_request_json["request"]["reqid"] = str(uuid.uuid4())
+ submit_request_json["request"]["text"] = line
+ payload_bytes = str.encode(json.dumps(submit_request_json))
+ payload_bytes = gzip.compress(payload_bytes) # if no compression, comment this line
+ full_client_request = bytearray(default_header)
+ full_client_request.extend((len(payload_bytes)).to_bytes(4, 'big')) # payload size(4 bytes)
+ full_client_request.extend(payload_bytes) # payload
+ await ws.send(full_client_request)
+ result += await self.parse_response(ws)
+ return result
+
+ @staticmethod
+ def is_table_format_chars_only(s):
+ # 检查是否仅包含 "|", "-", 和空格字符
+ return bool(s) and re.fullmatch(r'[|\-\s]+', s)
+
+ @staticmethod
+ async def parse_response(ws):
+ result = b''
+ while True:
+ res = await ws.recv()
+ protocol_version = res[0] >> 4
+ header_size = res[0] & 0x0f
+ message_type = res[1] >> 4
+ message_type_specific_flags = res[1] & 0x0f
+ serialization_method = res[2] >> 4
+ message_compression = res[2] & 0x0f
+ reserved = res[3]
+ header_extensions = res[4:header_size * 4]
+ payload = res[header_size * 4:]
+ if header_size != 1:
+ # print(f" Header extensions: {header_extensions}")
+ pass
+ if message_type == 0xb: # audio-only server response
+ if message_type_specific_flags == 0: # no sequence number as ACK
+ continue
+ else:
+ sequence_number = int.from_bytes(payload[:4], "big", signed=True)
+ payload_size = int.from_bytes(payload[4:8], "big", signed=False)
+ payload = payload[8:]
+ result += payload
+ if sequence_number < 0:
+ break
+ else:
+ continue
+ elif message_type == 0xf:
+ code = int.from_bytes(payload[:4], "big", signed=False)
+ msg_size = int.from_bytes(payload[4:8], "big", signed=False)
+ error_msg = payload[8:]
+ if message_compression == 1:
+ error_msg = gzip.decompress(error_msg)
+ error_msg = str(error_msg, "utf-8")
+ raise Exception(f"Error code: {code}, message: {error_msg}")
+ elif message_type == 0xc:
+ msg_size = int.from_bytes(payload[:4], "big", signed=False)
+ payload = payload[4:]
+ if message_compression == 1:
+ payload = gzip.decompress(payload)
+ else:
+ break
+ return result
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py
new file mode 100644
index 00000000000..d963a144625
--- /dev/null
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+"""
+@Project :MaxKB
+@File :gemini_model_provider.py
+@Author :Brian Yang
+@Date :5/13/24 7:47 AM
+"""
+import os
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
+ ModelInfoManage
+from setting.models_provider.impl.openai_model_provider.credential.llm import OpenAILLMModelCredential
+from setting.models_provider.impl.volcanic_engine_model_provider.credential.embedding import VolcanicEmbeddingCredential
+from setting.models_provider.impl.volcanic_engine_model_provider.credential.image import \
+ VolcanicEngineImageModelCredential
+from setting.models_provider.impl.volcanic_engine_model_provider.credential.tti import VolcanicEngineTTIModelCredential
+from setting.models_provider.impl.volcanic_engine_model_provider.credential.tts import VolcanicEngineTTSModelCredential
+from setting.models_provider.impl.volcanic_engine_model_provider.model.embedding import VolcanicEngineEmbeddingModel
+from setting.models_provider.impl.volcanic_engine_model_provider.model.image import VolcanicEngineImage
+from setting.models_provider.impl.volcanic_engine_model_provider.model.llm import VolcanicEngineChatModel
+from setting.models_provider.impl.volcanic_engine_model_provider.credential.stt import VolcanicEngineSTTModelCredential
+from setting.models_provider.impl.volcanic_engine_model_provider.model.stt import VolcanicEngineSpeechToText
+from setting.models_provider.impl.volcanic_engine_model_provider.model.tti import VolcanicEngineTextToImage
+from setting.models_provider.impl.volcanic_engine_model_provider.model.tts import VolcanicEngineTextToSpeech
+
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+volcanic_engine_llm_model_credential = OpenAILLMModelCredential()
+volcanic_engine_stt_model_credential = VolcanicEngineSTTModelCredential()
+volcanic_engine_tts_model_credential = VolcanicEngineTTSModelCredential()
+volcanic_engine_image_model_credential = VolcanicEngineImageModelCredential()
+volcanic_engine_tti_model_credential = VolcanicEngineTTIModelCredential()
+
+model_info_list = [
+ ModelInfo('ep-xxxxxxxxxx-yyyy',
+ _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'),
+ ModelTypeConst.LLM,
+ volcanic_engine_llm_model_credential, VolcanicEngineChatModel
+ ),
+ ModelInfo('ep-xxxxxxxxxx-yyyy',
+ _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'),
+ ModelTypeConst.IMAGE,
+ volcanic_engine_image_model_credential, VolcanicEngineImage
+ ),
+ ModelInfo('asr',
+ '',
+ ModelTypeConst.STT,
+ volcanic_engine_stt_model_credential, VolcanicEngineSpeechToText
+ ),
+ ModelInfo('tts',
+ '',
+ ModelTypeConst.TTS,
+ volcanic_engine_tts_model_credential, VolcanicEngineTextToSpeech
+ ),
+ ModelInfo('general_v2.0',
+ _('Universal 2.0-Vincent Diagram'),
+ ModelTypeConst.TTI,
+ volcanic_engine_tti_model_credential, VolcanicEngineTextToImage
+ ),
+ ModelInfo('general_v2.0_L',
+ _('Universal 2.0Pro-Vincent Chart'),
+ ModelTypeConst.TTI,
+ volcanic_engine_tti_model_credential, VolcanicEngineTextToImage
+ ),
+ ModelInfo('general_v1.4',
+ _('Universal 1.4-Vincent Chart'),
+ ModelTypeConst.TTI,
+ volcanic_engine_tti_model_credential, VolcanicEngineTextToImage
+ ),
+ ModelInfo('anime_v1.3',
+ _('Animation 1.3.0-Vincent Picture'),
+ ModelTypeConst.TTI,
+ volcanic_engine_tti_model_credential, VolcanicEngineTextToImage
+ ),
+ ModelInfo('anime_v1.3.1',
+ _('Animation 1.3.1-Vincent Picture'),
+ ModelTypeConst.TTI,
+ volcanic_engine_tti_model_credential, VolcanicEngineTextToImage
+ ),
+]
+
+open_ai_embedding_credential = VolcanicEmbeddingCredential()
+model_info_embedding_list = [
+ ModelInfo('ep-xxxxxxxxxx-yyyy',
+ _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'),
+ ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
+ VolcanicEngineEmbeddingModel)
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(model_info_list[0])
+ .append_default_model_info(model_info_list[1])
+ .append_default_model_info(model_info_list[2])
+ .append_default_model_info(model_info_list[3])
+ .append_default_model_info(model_info_list[4])
+ .append_model_info_list(model_info_embedding_list)
+ .append_default_model_info(model_info_embedding_list[0])
+ .build()
+)
+
+
+class VolcanicEngineModelProvider(IModelProvider):
+
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_volcanic_engine_provider', name=_('volcano engine'), icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'volcanic_engine_model_provider',
+ 'icon',
+ 'volcanic_engine_icon_svg')))
diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..85359511d33
--- /dev/null
+++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/10/17 15:40
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class QianfanEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ self.valid_form(model_credential)
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'qianfan_sk': super().encryption(model.get('qianfan_sk', ''))}
+
+ qianfan_ak = forms.PasswordInputField('API Key', required=True)
+
+ qianfan_sk = forms.PasswordInputField("Secret Key", required=True)
diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py
new file mode 100644
index 00000000000..d4d379db3d5
--- /dev/null
+++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py
@@ -0,0 +1,82 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/12 10:19
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class WenxinLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.95,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_output_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=2,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class WenxinLLMModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model_info = [model.lower() for model in model.client.models()]
+ if not model_info.__contains__(model_name.lower()):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_name} The model does not support').format(model_name=model_name))
+ for key in ['api_key', 'secret_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model.invoke(
+ [HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ raise e
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return {**model_info, 'secret_key': super().encryption(model_info.get('secret_key', ''))}
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['api_key', 'secret_key', 'model']:
+ if key not in model_info:
+ raise AppApiException(500, gettext('{key} is required').format(key=key))
+ self.api_key = model_info.get('api_key')
+ self.secret_key = model_info.get('secret_key')
+ return self
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ secret_key = forms.PasswordInputField("Secret Key", required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return WenxinLLMModelParams()
diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/model/embedding.py b/apps/setting/models_provider/impl/wenxin_model_provider/model/embedding.py
new file mode 100644
index 00000000000..d46ac51eaab
--- /dev/null
+++ b/apps/setting/models_provider/impl/wenxin_model_provider/model/embedding.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/10/17 16:48
+ @desc:
+"""
+from typing import Dict
+
+from langchain_community.embeddings import QianfanEmbeddingsEndpoint
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class QianfanEmbeddings(MaxKBBaseModel, QianfanEmbeddingsEndpoint):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return QianfanEmbeddings(
+ model=model_name,
+ qianfan_ak=model_credential.get('qianfan_ak'),
+ qianfan_sk=model_credential.get('qianfan_sk'),
+ )
diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/model/llm.py b/apps/setting/models_provider/impl/wenxin_model_provider/model/llm.py
new file mode 100644
index 00000000000..1f23e5a185a
--- /dev/null
+++ b/apps/setting/models_provider/impl/wenxin_model_provider/model/llm.py
@@ -0,0 +1,76 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2023/11/10 17:45
+ @desc:
+"""
+from typing import List, Dict, Optional, Any, Iterator
+
+from langchain_community.chat_models.baidu_qianfan_endpoint import _convert_dict_to_message, QianfanChatEndpoint
+from langchain_core.callbacks import CallbackManagerForLLMRun
+from langchain_core.messages import (
+ AIMessageChunk,
+ BaseMessage,
+)
+from langchain_core.outputs import ChatGenerationChunk
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class QianfanChatModel(MaxKBBaseModel, QianfanChatEndpoint):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return QianfanChatModel(model=model_name,
+ qianfan_ak=model_credential.get('api_key'),
+ qianfan_sk=model_credential.get('secret_key'),
+ streaming=model_kwargs.get('streaming', False),
+ init_kwargs=optional_params)
+
+ usage_metadata: dict = {}
+
+ def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
+ return self.usage_metadata
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ return self.usage_metadata.get('prompt_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ return self.usage_metadata.get('completion_tokens', 0)
+
+ def _stream(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ kwargs = {**self.init_kwargs, **kwargs}
+ params = self._convert_prompt_msg_params(messages, **kwargs)
+ params["stop"] = stop
+ params["stream"] = True
+ for res in self.client.do(**params):
+ if res:
+ msg = _convert_dict_to_message(res)
+ additional_kwargs = msg.additional_kwargs.get("function_call", {})
+ if msg.content == "" or res.get("body").get("is_end"):
+ token_usage = res.get("body").get("usage")
+ self.usage_metadata = token_usage
+ chunk = ChatGenerationChunk(
+ text=res["result"],
+ message=AIMessageChunk( # type: ignore[call-arg]
+ content=msg.content,
+ role="assistant",
+ additional_kwargs=additional_kwargs,
+ ),
+ generation_info=msg.additional_kwargs,
+ )
+ if run_manager:
+ run_manager.on_llm_new_token(chunk.text, chunk=chunk)
+ yield chunk
diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/model/qian_fan_chat_model.py b/apps/setting/models_provider/impl/wenxin_model_provider/model/qian_fan_chat_model.py
deleted file mode 100644
index b07e8a01ba1..00000000000
--- a/apps/setting/models_provider/impl/wenxin_model_provider/model/qian_fan_chat_model.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: qian_fan_chat_model.py
- @date:2023/11/10 17:45
- @desc:
-"""
-from typing import Optional, List, Any, Iterator, cast
-
-from langchain.callbacks.manager import CallbackManager
-from langchain.chat_models.base import BaseChatModel
-from langchain.load import dumpd
-from langchain.schema import LLMResult
-from langchain.schema.language_model import LanguageModelInput
-from langchain.schema.messages import BaseMessageChunk, BaseMessage, HumanMessage, AIMessage, get_buffer_string
-from langchain.schema.output import ChatGenerationChunk
-from langchain.schema.runnable import RunnableConfig
-from langchain_community.chat_models import QianfanChatEndpoint
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class QianfanChatModel(QianfanChatEndpoint):
-
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
-
- def stream(
- self,
- input: LanguageModelInput,
- config: Optional[RunnableConfig] = None,
- *,
- stop: Optional[List[str]] = None,
- **kwargs: Any,
- ) -> Iterator[BaseMessageChunk]:
- if len(input) % 2 == 0:
- input = [HumanMessage(content='padding'), *input]
- input = [
- HumanMessage(content=input[index].content) if index % 2 == 0 else AIMessage(content=input[index].content)
- for index in range(0, len(input))]
- if type(self)._stream == BaseChatModel._stream:
- # model doesn't implement streaming, so use default implementation
- yield cast(
- BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs)
- )
- else:
- config = config or {}
- messages = self._convert_input(input).to_messages()
- params = self._get_invocation_params(stop=stop, **kwargs)
- options = {"stop": stop, **kwargs}
- callback_manager = CallbackManager.configure(
- config.get("callbacks"),
- self.callbacks,
- self.verbose,
- config.get("tags"),
- self.tags,
- config.get("metadata"),
- self.metadata,
- )
- (run_manager,) = callback_manager.on_chat_model_start(
- dumpd(self),
- [messages],
- invocation_params=params,
- options=options,
- name=config.get("run_name"),
- )
- try:
- generation: Optional[ChatGenerationChunk] = None
- for chunk in self._stream(
- messages, stop=stop, run_manager=run_manager, **kwargs
- ):
- yield chunk.message
- if generation is None:
- generation = chunk
- assert generation is not None
- except BaseException as e:
- run_manager.on_llm_error(e)
- raise e
- else:
- run_manager.on_llm_end(
- LLMResult(generations=[[generation]]),
- )
diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py b/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py
index 3d7c9a7d9ed..b07f90223f7 100644
--- a/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py
+++ b/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py
@@ -7,122 +7,62 @@
@desc:
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-from langchain_community.chat_models import QianfanChatEndpoint
-from qianfan import ChatCompletion
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
- ModelInfo, IModelProvider, ValidCode
-from setting.models_provider.impl.wenxin_model_provider.model.qian_fan_chat_model import QianfanChatModel
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
+ ModelInfoManage
+from setting.models_provider.impl.wenxin_model_provider.credential.embedding import QianfanEmbeddingCredential
+from setting.models_provider.impl.wenxin_model_provider.credential.llm import WenxinLLMModelCredential
+from setting.models_provider.impl.wenxin_model_provider.model.embedding import QianfanEmbeddings
+from setting.models_provider.impl.wenxin_model_provider.model.llm import QianfanChatModel
from smartdoc.conf import PROJECT_DIR
-
-
-class WenxinLLMModelCredential(BaseForm, BaseModelCredential):
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = WenxinModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
- model_info = [model.lower() for model in ChatCompletion.models()]
- if not model_info.__contains__(model_name.lower()):
- raise AppApiException(ValidCode.valid_error.value, f'{model_name} 模型不支持')
- for key in ['api_key', 'secret_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- WenxinModelProvider().get_model(model_type, model_name, model_credential).invoke(
- [HumanMessage(content='你好')])
- except Exception as e:
- raise e
- return True
-
- def encryption_dict(self, model_info: Dict[str, object]):
- return {**model_info, 'secret_key': super().encryption(model_info.get('secret_key', ''))}
-
- def build_model(self, model_info: Dict[str, object]):
- for key in ['api_key', 'secret_key', 'model']:
- if key not in model_info:
- raise AppApiException(500, f'{key} 字段为必填字段')
- self.api_key = model_info.get('api_key')
- self.secret_key = model_info.get('secret_key')
- return self
-
- api_key = forms.PasswordInputField('API Key', required=True)
-
- secret_key = forms.PasswordInputField("Secret Key", required=True)
-
+from django.utils.translation import gettext as _
win_xin_llm_model_credential = WenxinLLMModelCredential()
-model_dict = {
- 'ERNIE-Bot-4': ModelInfo('ERNIE-Bot-4',
- 'ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'ERNIE-Bot': ModelInfo('ERNIE-Bot',
- 'ERNIE-Bot是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'ERNIE-Bot-turbo': ModelInfo('ERNIE-Bot-turbo',
- 'ERNIE-Bot-turbo是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力,响应速度更快。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'BLOOMZ-7B': ModelInfo('BLOOMZ-7B',
- 'BLOOMZ-7B是业内知名的大语言模型,由BigScience研发并开源,能够以46种语言和13种编程语言输出文本。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'Llama-2-7b-chat': ModelInfo('Llama-2-7b-chat',
- 'Llama-2-7b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-7b-chat是高性能原生开源版本,适用于对话场景。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'Llama-2-13b-chat': ModelInfo('Llama-2-13b-chat',
- 'Llama-2-13b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-13b-chat是性能与效果均衡的原生开源版本,适用于对话场景。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'Llama-2-70b-chat': ModelInfo('Llama-2-70b-chat',
- 'Llama-2-70b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-70b-chat是高精度效果的原生开源版本。',
- ModelTypeConst.LLM, win_xin_llm_model_credential),
-
- 'Qianfan-Chinese-Llama-2-7B': ModelInfo('Qianfan-Chinese-Llama-2-7B',
- '千帆团队在Llama-2-7b基础上的中文增强版本,在CMMLU、C-EVAL等中文知识库上表现优异。',
- ModelTypeConst.LLM, win_xin_llm_model_credential)
-}
+qianfan_embedding_credential = QianfanEmbeddingCredential()
+model_info_list = [ModelInfo('ERNIE-Bot-4',
+ _('ERNIE-Bot-4 is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('ERNIE-Bot',
+ _('ERNIE-Bot is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('ERNIE-Bot-turbo',
+ _('ERNIE-Bot-turbo is a large language model independently developed by Baidu. It covers massive Chinese data, has stronger capabilities in dialogue Q&A, content creation and generation, and has a faster response speed.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('BLOOMZ-7B',
+ _('BLOOMZ-7B is a well-known large language model in the industry. It was developed and open sourced by BigScience and can output text in 46 languages and 13 programming languages.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('Llama-2-7b-chat',
+ 'Llama-2-7b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning and knowledge application. Llama-2-7b-chat is a high-performance native open source version suitable for conversation scenarios.',
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('Llama-2-13b-chat',
+ _('Llama-2-13b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning and knowledge application. Llama-2-13b-chat is a native open source version with balanced performance and effect, suitable for conversation scenarios.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('Llama-2-70b-chat',
+ _('Llama-2-70b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning, and knowledge application. Llama-2-70b-chat is a native open source version with high-precision effects.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel),
+ ModelInfo('Qianfan-Chinese-Llama-2-7B',
+ _('The Chinese enhanced version developed by the Qianfan team based on Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-EVAL.'),
+ ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel)
+ ]
+embedding_model_info = ModelInfo('Embedding-V1',
+ _('Embedding-V1 is a text representation model based on Baidu Wenxin large model technology. It can convert text into a vector form represented by numerical values and can be used in text retrieval, information recommendation, knowledge mining and other scenarios. Embedding-V1 provides the Embeddings interface, which can generate corresponding vector representations based on input content. You can call this interface to input text into the model and obtain the corresponding vector representation for subsequent text processing and analysis.'),
+ ModelTypeConst.EMBEDDING, qianfan_embedding_credential, QianfanEmbeddings)
+model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info(
+ ModelInfo('ERNIE-Bot-4',
+ _('ERNIE-Bot-4 is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'),
+ ModelTypeConst.LLM,
+ win_xin_llm_model_credential,
+ QianfanChatModel)).append_model_info(embedding_model_info).append_default_model_info(
+ embedding_model_info).build()
class WenxinModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 2
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object],
- **model_kwargs) -> QianfanChatEndpoint:
- return QianfanChatModel(model=model_name,
- qianfan_ak=model_credential.get('api_key'),
- qianfan_sk=model_credential.get('secret_key'),
- streaming=model_kwargs.get('streaming', False))
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
-
- def get_model_list(self, model_type):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return win_xin_llm_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
- return ModelProvideInfo(provider='model_wenxin_provider', name='千帆大模型', icon=get_file_content(
+ return ModelProvideInfo(provider='model_wenxin_provider', name=_('Thousand sails large model'), icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'wenxin_model_provider', 'icon',
'azure_icon_svg')))
diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..57f66d3ac88
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py
@@ -0,0 +1,50 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/10/17 15:40
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XFEmbeddingCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ self.valid_form(model_credential)
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.embed_query(_('Hello'))
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))}
+
+ base_url = forms.TextInputField('API URL', required=True, default_value="https://emb-cn-huabei-1.xf-yun.com/")
+ spark_app_id = forms.TextInputField('APP ID', required=True)
+ spark_api_key = forms.PasswordInputField("API Key", required=True)
+ spark_api_secret = forms.PasswordInputField('API Secret', required=True)
diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/image.py b/apps/setting/models_provider/impl/xf_model_provider/credential/image.py
new file mode 100644
index 00000000000..b68b84149be
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/credential/image.py
@@ -0,0 +1,60 @@
+# coding=utf-8
+import base64
+import os
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.xf_model_provider.model.image import ImageMessage
+
+
+class XunFeiImageModelCredential(BaseForm, BaseModelCredential):
+ spark_api_url = forms.TextInputField('API URL', required=True,
+ default_value='wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image')
+ spark_app_id = forms.TextInputField('APP ID', required=True)
+ spark_api_key = forms.PasswordInputField("API Key", required=True)
+ spark_api_secret = forms.PasswordInputField('API Secret', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ with open(f'{cwd}/img_1.png', 'rb') as f:
+ message_list = [ImageMessage(str(base64.b64encode(f.read()), 'utf-8')),
+ HumanMessage(_('Please outline this picture'))]
+ model.stream(message_list)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/img_1.png b/apps/setting/models_provider/impl/xf_model_provider/credential/img_1.png
new file mode 100644
index 00000000000..ccb9d3b2035
Binary files /dev/null and b/apps/setting/models_provider/impl/xf_model_provider/credential/img_1.png differ
diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py
new file mode 100644
index 00000000000..f62a7164ced
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/12 10:29
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XunFeiLLMModelGeneralParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.5,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=4096,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class XunFeiLLMModelProParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.5,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=4096,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class XunFeiLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))}
+
+ spark_api_url = forms.TextInputField('API URL', required=True)
+ spark_app_id = forms.TextInputField('APP ID', required=True)
+ spark_api_key = forms.PasswordInputField("API Key", required=True)
+ spark_api_secret = forms.PasswordInputField('API Secret', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ if model_name == 'general' or model_name == 'pro-128k':
+ return XunFeiLLMModelGeneralParams()
+ return XunFeiLLMModelProParams()
diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py b/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py
new file mode 100644
index 00000000000..44db3b15272
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XunFeiSTTModelCredential(BaseForm, BaseModelCredential):
+ spark_api_url = forms.TextInputField('API URL', required=True, default_value='wss://iat-api.xfyun.cn/v2/iat')
+ spark_app_id = forms.TextInputField('APP ID', required=True)
+ spark_api_key = forms.PasswordInputField("API Key", required=True)
+ spark_api_secret = forms.PasswordInputField('API Secret', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py b/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py
new file mode 100644
index 00000000000..39463886264
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py
@@ -0,0 +1,75 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XunFeiTTSModelGeneralParams(BaseForm):
+ vcn = forms.SingleSelect(
+ TooltipLabel(_('Speaker'),
+ _('Speaker, optional value: Please go to the console to add a trial or purchase speaker. After adding, the speaker parameter value will be displayed.')),
+ required=True, default_value='xiaoyan',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': _('iFlytek Xiaoyan'), 'value': 'xiaoyan'},
+ {'text': _('iFlytek Xujiu'), 'value': 'aisjiuxu'},
+ {'text': _('iFlytek Xiaoping'), 'value': 'aisxping'},
+ {'text': _('iFlytek Xiaojing'), 'value': 'aisjinger'},
+ {'text': _('iFlytek Xuxiaobao'), 'value': 'aisbabyxu'},
+ ])
+ speed = forms.SliderField(
+ TooltipLabel(_('speaking speed'), _('Speech speed, optional value: [0-100], default is 50')),
+ required=True, default_value=50,
+ _min=1,
+ _max=100,
+ _step=5,
+ precision=1)
+
+
+class XunFeiTTSModelCredential(BaseForm, BaseModelCredential):
+ spark_api_url = forms.TextInputField('API URL', required=True, default_value='wss://tts-api.xfyun.cn/v2/tts')
+ spark_app_id = forms.TextInputField('APP ID', required=True)
+ spark_api_key = forms.PasswordInputField("API Key", required=True)
+ spark_api_secret = forms.PasswordInputField('API Secret', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return XunFeiTTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/embedding.py b/apps/setting/models_provider/impl/xf_model_provider/model/embedding.py
new file mode 100644
index 00000000000..78cc04ceb4b
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/model/embedding.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: embedding.py
+ @date:2024/10/17 15:29
+ @desc:
+"""
+
+import base64
+import json
+from typing import Dict, Optional
+
+import numpy as np
+from langchain_community.embeddings import SparkLLMTextEmbeddings
+from numpy import ndarray
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class XFEmbedding(MaxKBBaseModel, SparkLLMTextEmbeddings):
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return XFEmbedding(
+ spark_app_id=model_credential.get('spark_app_id'),
+ spark_api_key=model_credential.get('spark_api_key'),
+ spark_api_secret=model_credential.get('spark_api_secret')
+ )
+
+ @staticmethod
+ def _parser_message(
+ message: str,
+ ) -> Optional[ndarray]:
+ data = json.loads(message)
+ code = data["header"]["code"]
+ if code != 0:
+ # 这里是讯飞的QPS限制会报错,所以不建议用讯飞的向量模型
+ raise Exception(f"Request error: {code}, {data}")
+ else:
+ text_base = data["payload"]["feature"]["text"]
+ text_data = base64.b64decode(text_base)
+ dt = np.dtype(np.float32)
+ dt = dt.newbyteorder("<")
+ text = np.frombuffer(text_data, dtype=dt)
+ if len(text) > 2560:
+ array = text[:2560]
+ else:
+ array = text
+ return array
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3
new file mode 100644
index 00000000000..75e744c8ff5
Binary files /dev/null and b/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 differ
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/image.py b/apps/setting/models_provider/impl/xf_model_provider/model/image.py
new file mode 100644
index 00000000000..b7813287de2
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/model/image.py
@@ -0,0 +1,96 @@
+# coding=utf-8
+import base64
+import os
+from typing import Dict, Any, List, Optional, Iterator
+
+from docutils.utils import SystemMessage
+from langchain_community.chat_models.sparkllm import ChatSparkLLM, _convert_delta_to_message_chunk
+from langchain_core.callbacks import CallbackManagerForLLMRun
+from langchain_core.messages import BaseMessage, ChatMessage, HumanMessage, AIMessage, AIMessageChunk
+from langchain_core.outputs import ChatGenerationChunk
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class ImageMessage(HumanMessage):
+ content: str
+
+
+def convert_message_to_dict(message: BaseMessage) -> dict:
+ message_dict: Dict[str, Any]
+ if isinstance(message, ChatMessage):
+ message_dict = {"role": "user", "content": message.content}
+ elif isinstance(message, ImageMessage):
+ message_dict = {"role": "user", "content": message.content, "content_type": "image"}
+ elif isinstance(message, HumanMessage):
+ message_dict = {"role": "user", "content": message.content}
+ elif isinstance(message, AIMessage):
+ message_dict = {"role": "assistant", "content": message.content}
+ if "function_call" in message.additional_kwargs:
+ message_dict["function_call"] = message.additional_kwargs["function_call"]
+ # If function call only, content is None not empty string
+ if message_dict["content"] == "":
+ message_dict["content"] = None
+ if "tool_calls" in message.additional_kwargs:
+ message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
+ # If tool calls only, content is None not empty string
+ if message_dict["content"] == "":
+ message_dict["content"] = None
+ elif isinstance(message, SystemMessage):
+ message_dict = {"role": "system", "content": message.content}
+ else:
+ raise ValueError(f"Got unknown type {message}")
+
+ return message_dict
+
+
+class XFSparkImage(MaxKBBaseModel, ChatSparkLLM):
+ spark_app_id: str
+ spark_api_key: str
+ spark_api_secret: str
+ spark_api_url: str
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return XFSparkImage(
+ spark_app_id=model_credential.get('spark_app_id'),
+ spark_api_key=model_credential.get('spark_api_key'),
+ spark_api_secret=model_credential.get('spark_api_secret'),
+ spark_api_url=model_credential.get('spark_api_url'),
+ **optional_params
+ )
+
+ @staticmethod
+ def generate_message(prompt: str, image) -> list[BaseMessage]:
+ if image is None:
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ with open(f'{cwd}/img_1.png', 'rb') as f:
+ base64_image = base64.b64encode(f.read()).decode("utf-8")
+ return [ImageMessage(f'data:image/jpeg;base64,{base64_image}'), HumanMessage(prompt)]
+ return [HumanMessage(prompt)]
+
+ def _stream(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ default_chunk_class = AIMessageChunk
+
+ self.client.arun(
+ [convert_message_to_dict(m) for m in messages],
+ self.spark_user_id,
+ self.model_kwargs,
+ streaming=True,
+ )
+ for content in self.client.subscribe(timeout=self.request_timeout):
+ if "data" not in content:
+ continue
+ delta = content["data"]
+ chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
+ cg_chunk = ChatGenerationChunk(message=chunk)
+ if run_manager:
+ run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
+ yield cg_chunk
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/img_1.png b/apps/setting/models_provider/impl/xf_model_provider/model/img_1.png
new file mode 100644
index 00000000000..ccb9d3b2035
Binary files /dev/null and b/apps/setting/models_provider/impl/xf_model_provider/model/img_1.png differ
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/llm.py b/apps/setting/models_provider/impl/xf_model_provider/model/llm.py
new file mode 100644
index 00000000000..6380f752f61
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/model/llm.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2024/04/19 15:55
+ @desc:
+"""
+from typing import List, Optional, Any, Iterator, Dict
+
+from langchain_community.chat_models.sparkllm import \
+ ChatSparkLLM, convert_message_to_dict, _convert_delta_to_message_chunk
+from langchain_core.callbacks import CallbackManagerForLLMRun
+from langchain_core.messages import BaseMessage, AIMessageChunk
+from langchain_core.outputs import ChatGenerationChunk
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class XFChatSparkLLM(MaxKBBaseModel, ChatSparkLLM):
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return XFChatSparkLLM(
+ spark_app_id=model_credential.get('spark_app_id'),
+ spark_api_key=model_credential.get('spark_api_key'),
+ spark_api_secret=model_credential.get('spark_api_secret'),
+ spark_api_url=model_credential.get('spark_api_url'),
+ spark_llm_domain=model_name,
+ streaming=model_kwargs.get('streaming', False),
+ **optional_params
+ )
+
+ usage_metadata: dict = {}
+
+ def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
+ return self.usage_metadata
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ return self.usage_metadata.get('prompt_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ return self.usage_metadata.get('completion_tokens', 0)
+
+ def _stream(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ default_chunk_class = AIMessageChunk
+
+ self.client.arun(
+ [convert_message_to_dict(m) for m in messages],
+ self.spark_user_id,
+ self.model_kwargs,
+ True,
+ )
+ for content in self.client.subscribe(timeout=self.request_timeout):
+ if "data" in content:
+ delta = content["data"]
+ chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
+ cg_chunk = ChatGenerationChunk(message=chunk)
+ elif "usage" in content:
+ generation_info = content["usage"]
+ self.usage_metadata = generation_info
+ continue
+ else:
+ continue
+ if cg_chunk is not None:
+ if run_manager:
+ run_manager.on_llm_new_token(str(cg_chunk.message.content), chunk=cg_chunk)
+ yield cg_chunk
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/stt.py b/apps/setting/models_provider/impl/xf_model_provider/model/stt.py
new file mode 100644
index 00000000000..a1150bc83f2
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/model/stt.py
@@ -0,0 +1,171 @@
+# -*- coding:utf-8 -*-
+#
+# 错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看)
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+import asyncio
+import base64
+import datetime
+import hashlib
+import hmac
+import json
+import logging
+import os
+import ssl
+from datetime import datetime, UTC
+from typing import Dict
+from urllib.parse import urlencode, urlparse
+
+import websockets
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+STATUS_FIRST_FRAME = 0 # 第一帧的标识
+STATUS_CONTINUE_FRAME = 1 # 中间帧标识
+STATUS_LAST_FRAME = 2 # 最后一帧的标识
+
+ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ssl_context.check_hostname = False
+ssl_context.verify_mode = ssl.CERT_NONE
+
+max_kb = logging.getLogger("max_kb")
+
+
+class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ spark_app_id: str
+ spark_api_key: str
+ spark_api_secret: str
+ spark_api_url: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.spark_api_url = kwargs.get('spark_api_url')
+ self.spark_app_id = kwargs.get('spark_app_id')
+ self.spark_api_key = kwargs.get('spark_api_key')
+ self.spark_api_secret = kwargs.get('spark_api_secret')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return XFSparkSpeechToText(
+ spark_app_id=model_credential.get('spark_app_id'),
+ spark_api_key=model_credential.get('spark_api_key'),
+ spark_api_secret=model_credential.get('spark_api_secret'),
+ spark_api_url=model_credential.get('spark_api_url'),
+ **optional_params
+ )
+
+ # 生成url
+ def create_url(self):
+ url = self.spark_api_url
+ host = urlparse(url).hostname
+ # 生成RFC1123格式的时间戳
+ gmt_format = '%a, %d %b %Y %H:%M:%S GMT'
+ date = datetime.now(UTC).strftime(gmt_format)
+
+ # 拼接字符串
+ signature_origin = "host: " + host + "\n"
+ signature_origin += "date: " + date + "\n"
+ signature_origin += "GET " + "/v2/iat " + "HTTP/1.1"
+ # 进行hmac-sha256进行加密
+ signature_sha = hmac.new(self.spark_api_secret.encode('utf-8'), signature_origin.encode('utf-8'),
+ digestmod=hashlib.sha256).digest()
+ signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')
+
+ authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % (
+ self.spark_api_key, "hmac-sha256", "host date request-line", signature_sha)
+ authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
+ # 将请求的鉴权参数组合为字典
+ v = {
+ "authorization": authorization,
+ "date": date,
+ "host": host
+ }
+ # 拼接鉴权参数,生成url
+ url = url + '?' + urlencode(v)
+ # print("date: ",date)
+ # print("v: ",v)
+ # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
+ # print('websocket url :', url)
+ return url
+
+ def check_auth(self):
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f:
+ self.speech_to_text(f)
+
+ def speech_to_text(self, file):
+ async def handle():
+ async with websockets.connect(self.create_url(), max_size=1000000000, ssl=ssl_context) as ws:
+ # 发送 full client request
+ await self.send(ws, file)
+ return await self.handle_message(ws)
+
+ return asyncio.run(handle())
+
+ @staticmethod
+ async def handle_message(ws):
+ res = await ws.recv()
+ message = json.loads(res)
+ code = message["code"]
+ sid = message["sid"]
+ if code != 0:
+ errMsg = message["message"]
+ raise Exception(f"sid: {sid} call error: {errMsg} code is: {code}")
+ else:
+ data = message["data"]["result"]["ws"]
+ result = ""
+ for i in data:
+ for w in i["cw"]:
+ result += w["w"]
+ # print("sid:%s call success!,data is:%s" % (sid, json.dumps(data, ensure_ascii=False)))
+ return result
+
+ # 收到websocket连接建立的处理
+ async def send(self, ws, file):
+ frameSize = 8000 # 每一帧的音频大小
+ status = STATUS_FIRST_FRAME # 音频的状态信息,标识音频是第一帧,还是中间帧、最后一帧
+
+ while True:
+ buf = file.read(frameSize)
+ # 文件结束
+ if not buf:
+ status = STATUS_LAST_FRAME
+ # 第一帧处理
+ # 发送第一帧音频,带business 参数
+ # appid 必须带上,只需第一帧发送
+ if status == STATUS_FIRST_FRAME:
+ d = {
+ "common": {"app_id": self.spark_app_id},
+ "business": {
+ "domain": "iat",
+ "language": "zh_cn",
+ "accent": "mandarin",
+ "vinfo": 1,
+ "vad_eos": 10000
+ },
+ "data": {
+ "status": 0, "format": "audio/L16;rate=16000",
+ "audio": str(base64.b64encode(buf), 'utf-8'),
+ "encoding": "lame"}
+ }
+ d = json.dumps(d)
+ await ws.send(d)
+ status = STATUS_CONTINUE_FRAME
+ # 中间帧处理
+ elif status == STATUS_CONTINUE_FRAME:
+ d = {"data": {"status": 1, "format": "audio/L16;rate=16000",
+ "audio": str(base64.b64encode(buf), 'utf-8'),
+ "encoding": "lame"}}
+ await ws.send(json.dumps(d))
+ # 最后一帧处理
+ elif status == STATUS_LAST_FRAME:
+ d = {"data": {"status": 2, "format": "audio/L16;rate=16000",
+ "audio": str(base64.b64encode(buf), 'utf-8'),
+ "encoding": "lame"}}
+ await ws.send(json.dumps(d))
+ break
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/tts.py b/apps/setting/models_provider/impl/xf_model_provider/model/tts.py
new file mode 100644
index 00000000000..1db2b83b31b
--- /dev/null
+++ b/apps/setting/models_provider/impl/xf_model_provider/model/tts.py
@@ -0,0 +1,150 @@
+# -*- coding:utf-8 -*-
+#
+# author: iflytek
+#
+# 错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看)
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+import asyncio
+import base64
+import datetime
+import hashlib
+import hmac
+import json
+import logging
+import ssl
+from datetime import datetime, UTC
+from typing import Dict
+from urllib.parse import urlencode, urlparse
+
+import websockets
+from django.utils.translation import gettext as _
+
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+
+max_kb = logging.getLogger("max_kb")
+
+STATUS_FIRST_FRAME = 0 # 第一帧的标识
+STATUS_CONTINUE_FRAME = 1 # 中间帧标识
+STATUS_LAST_FRAME = 2 # 最后一帧的标识
+
+ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ssl_context.check_hostname = False
+ssl_context.verify_mode = ssl.CERT_NONE
+
+
+class XFSparkTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ spark_app_id: str
+ spark_api_key: str
+ spark_api_secret: str
+ spark_api_url: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.spark_api_url = kwargs.get('spark_api_url')
+ self.spark_app_id = kwargs.get('spark_app_id')
+ self.spark_api_key = kwargs.get('spark_api_key')
+ self.spark_api_secret = kwargs.get('spark_api_secret')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'vcn': 'xiaoyan', 'speed': 50}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return XFSparkTextToSpeech(
+ spark_app_id=model_credential.get('spark_app_id'),
+ spark_api_key=model_credential.get('spark_api_key'),
+ spark_api_secret=model_credential.get('spark_api_secret'),
+ spark_api_url=model_credential.get('spark_api_url'),
+ **optional_params
+ )
+
+ # 生成url
+ def create_url(self):
+ url = self.spark_api_url
+ host = urlparse(url).hostname
+ # 生成RFC1123格式的时间戳
+ gmt_format = '%a, %d %b %Y %H:%M:%S GMT'
+ date = datetime.now(UTC).strftime(gmt_format)
+
+ # 拼接字符串
+ signature_origin = "host: " + host + "\n"
+ signature_origin += "date: " + date + "\n"
+ signature_origin += "GET " + "/v2/tts " + "HTTP/1.1"
+ # 进行hmac-sha256进行加密
+ signature_sha = hmac.new(self.spark_api_secret.encode('utf-8'), signature_origin.encode('utf-8'),
+ digestmod=hashlib.sha256).digest()
+ signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')
+
+ authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % (
+ self.spark_api_key, "hmac-sha256", "host date request-line", signature_sha)
+ authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
+ # 将请求的鉴权参数组合为字典
+ v = {
+ "authorization": authorization,
+ "date": date,
+ "host": host
+ }
+ # 拼接鉴权参数,生成url
+ url = url + '?' + urlencode(v)
+ # print("date: ",date)
+ # print("v: ",v)
+ # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
+ # print('websocket url :', url)
+ return url
+
+ def check_auth(self):
+ self.text_to_speech(_('Hello'))
+
+ def text_to_speech(self, text):
+
+ # 使用小语种须使用以下方式,此处的unicode指的是 utf16小端的编码方式,即"UTF-16LE"”
+ # self.Data = {"status": 2, "text": str(base64.b64encode(self.Text.encode('utf-16')), "UTF8")}
+ text = _remove_empty_lines(text)
+
+ async def handle():
+ async with websockets.connect(self.create_url(), max_size=1000000000, ssl=ssl_context) as ws:
+ # 发送 full client request
+ await self.send(ws, text)
+ return await self.handle_message(ws)
+
+ return asyncio.run(handle())
+
+ def is_cache_model(self):
+ return False
+
+ @staticmethod
+ async def handle_message(ws):
+ audio_bytes: bytes = b''
+ while True:
+ res = await ws.recv()
+ message = json.loads(res)
+ # print(message)
+ code = message["code"]
+ sid = message["sid"]
+
+ if code != 0:
+ errMsg = message["message"]
+ raise Exception(f"sid: {sid} call error: {errMsg} code is: {code}")
+ else:
+ audio = message["data"]["audio"]
+ audio = base64.b64decode(audio)
+ audio_bytes += audio
+ # 退出
+ if message["data"]["status"] == 2:
+ break
+ return audio_bytes
+
+ async def send(self, ws, text):
+ business = {"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "tte": "utf8"}
+ d = {
+ "common": {"app_id": self.spark_app_id},
+ "business": business | self.params,
+ "data": {"status": 2, "text": str(base64.b64encode(text.encode('utf-8')), "UTF8")},
+ }
+ d = json.dumps(d)
+ await ws.send(d)
diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/xf_chat_model.py b/apps/setting/models_provider/impl/xf_model_provider/model/xf_chat_model.py
deleted file mode 100644
index 3b6a22c4747..00000000000
--- a/apps/setting/models_provider/impl/xf_model_provider/model/xf_chat_model.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: __init__.py.py
- @date:2024/04/19 15:55
- @desc:
-"""
-
-from typing import List, Optional, Any, Iterator
-
-from langchain_community.chat_models import ChatSparkLLM
-from langchain_community.chat_models.sparkllm import _convert_message_to_dict, _convert_delta_to_message_chunk
-from langchain_core.callbacks import CallbackManagerForLLMRun
-from langchain_core.messages import BaseMessage, AIMessageChunk, get_buffer_string
-from langchain_core.outputs import ChatGenerationChunk
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class XFChatSparkLLM(ChatSparkLLM):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
-
- def _stream(
- self,
- messages: List[BaseMessage],
- stop: Optional[List[str]] = None,
- run_manager: Optional[CallbackManagerForLLMRun] = None,
- **kwargs: Any,
- ) -> Iterator[ChatGenerationChunk]:
- default_chunk_class = AIMessageChunk
-
- self.client.arun(
- [_convert_message_to_dict(m) for m in messages],
- self.spark_user_id,
- self.model_kwargs,
- True,
- )
- for content in self.client.subscribe(timeout=self.request_timeout):
- if "data" not in content:
- continue
- delta = content["data"]
- chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
- cg_chunk = ChatGenerationChunk(message=chunk)
- if run_manager:
- run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
- yield cg_chunk
diff --git a/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py b/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py
index 28059c5c69b..faf21144dc4 100644
--- a/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py
+++ b/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py
@@ -7,97 +7,62 @@
@desc:
"""
import os
-from typing import Dict
-
-from langchain.schema import HumanMessage
-from langchain_community.chat_models import ChatSparkLLM
+import ssl
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
- ModelInfo, IModelProvider, ValidCode
-from setting.models_provider.impl.xf_model_provider.model.xf_chat_model import XFChatSparkLLM
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
+ ModelInfoManage
+from setting.models_provider.impl.xf_model_provider.credential.embedding import XFEmbeddingCredential
+from setting.models_provider.impl.xf_model_provider.credential.image import XunFeiImageModelCredential
+from setting.models_provider.impl.xf_model_provider.credential.llm import XunFeiLLMModelCredential
+from setting.models_provider.impl.xf_model_provider.credential.stt import XunFeiSTTModelCredential
+from setting.models_provider.impl.xf_model_provider.credential.tts import XunFeiTTSModelCredential
+from setting.models_provider.impl.xf_model_provider.model.embedding import XFEmbedding
+from setting.models_provider.impl.xf_model_provider.model.image import XFSparkImage
+from setting.models_provider.impl.xf_model_provider.model.llm import XFChatSparkLLM
+from setting.models_provider.impl.xf_model_provider.model.stt import XFSparkSpeechToText
+from setting.models_provider.impl.xf_model_provider.model.tts import XFSparkTextToSpeech
from smartdoc.conf import PROJECT_DIR
-import ssl
+from django.utils.translation import gettext as _
ssl._create_default_https_context = ssl.create_default_context()
-
-class XunFeiLLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = XunFeiModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
-
- for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = XunFeiModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))}
-
- spark_api_url = forms.TextInputField('API 域名', required=True)
- spark_app_id = forms.TextInputField('APP ID', required=True)
- spark_api_key = forms.PasswordInputField("API Key", required=True)
- spark_api_secret = forms.PasswordInputField('API Secret', required=True)
-
-
qwen_model_credential = XunFeiLLMModelCredential()
-
-model_dict = {
- 'generalv3.5': ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential),
- 'generalv3': ModelInfo('generalv3', '', ModelTypeConst.LLM, qwen_model_credential),
- 'generalv2': ModelInfo('generalv2', '', ModelTypeConst.LLM, qwen_model_credential)
-}
+stt_model_credential = XunFeiSTTModelCredential()
+image_model_credential = XunFeiImageModelCredential()
+tts_model_credential = XunFeiTTSModelCredential()
+embedding_model_credential = XFEmbeddingCredential()
+model_info_list = [
+ ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM),
+ ModelInfo('generalv3', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM),
+ ModelInfo('generalv2', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM),
+ ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText),
+ ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech),
+ ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding)
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(
+ ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM))
+ .append_default_model_info(
+ ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText),
+ )
+ .append_default_model_info(
+ ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech))
+ .append_default_model_info(
+ ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding))
+ .build()
+)
class XunFeiModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 3
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> XFChatSparkLLM:
- zhipuai_chat = XFChatSparkLLM(
- spark_app_id=model_credential.get('spark_app_id'),
- spark_api_key=model_credential.get('spark_api_key'),
- spark_api_secret=model_credential.get('spark_api_secret'),
- spark_api_url=model_credential.get('spark_api_url'),
- spark_llm_domain=model_name
- )
- return zhipuai_chat
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return qwen_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
- return ModelProvideInfo(provider='model_xf_provider', name='讯飞星火', icon=get_file_content(
+ return ModelProvideInfo(provider='model_xf_provider', name=_('iFlytek Spark'), icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'xf_model_provider', 'icon',
'xf_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/__init__.py b/apps/setting/models_provider/impl/xinference_model_provider/__init__.py
new file mode 100644
index 00000000000..9bad5790a57
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/__init__.py
@@ -0,0 +1 @@
+# coding=utf-8
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py
new file mode 100644
index 00000000000..e16319e7627
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding
+
+
+class XinferenceEmbeddingModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'),
+ 'embedding')
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid'))
+ exist = provider.get_model_info_by_name(model_list, model_name)
+ model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential)
+ if len(exist) == 0:
+ model.start_down_model_thread()
+ raise AppApiException(ValidCode.model_not_fount,
+ _('The model does not exist, please download the model first'))
+ model.embed_query(_('Hello'))
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return model_info
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['model']:
+ if key not in model_info:
+ raise AppApiException(500, _('{key} is required').format(key=key))
+ return self
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py
new file mode 100644
index 00000000000..2cd59a6c47e
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XinferenceImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class XinferenceImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return XinferenceImageModelParams()
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py
new file mode 100644
index 00000000000..cfc28b42a05
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py
@@ -0,0 +1,67 @@
+# coding=utf-8
+
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XinferenceLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.7,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=800,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class XinferenceLLMModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ try:
+ model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'),
+ model_type)
+ except Exception as e:
+ raise AppApiException(ValidCode.valid_error.value, gettext('API domain name is invalid'))
+ exist = provider.get_model_info_by_name(model_list, model_name)
+ if len(exist) == 0:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('The model does not exist, please download the model first'))
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return {**model_info, 'api_key': super().encryption(model_info.get('api_key', ''))}
+
+ def build_model(self, model_info: Dict[str, object]):
+ for key in ['api_key', 'model']:
+ if key not in model_info:
+ raise AppApiException(500, gettext('{key} is required').format(key=key))
+ self.api_key = model_info.get('api_key')
+ return self
+
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return XinferenceLLMModelParams()
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py
new file mode 100644
index 00000000000..f1f3aca32d8
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py
+ @date:2024/9/10 9:46
+ @desc:
+"""
+from typing import Dict
+
+from django.utils.translation import gettext as _
+from langchain_core.documents import Document
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XInferenceRerankerModelCredential(BaseForm, BaseModelCredential):
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=True):
+ if not model_type == 'RERANKER':
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['server_url']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.compress_documents([Document(page_content=_('Hello'))], _('Hello'))
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model_info: Dict[str, object]):
+ return model_info
+
+ server_url = forms.TextInputField('API URL', required=True)
+
+ api_key = forms.PasswordInputField('API Key', required=False)
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py
new file mode 100644
index 00000000000..39a4b675bd6
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+from typing import Dict
+
+from django.utils.translation import gettext as _
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XInferenceSTTModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ _('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential)
+ model.check_auth()
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ _('Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ pass
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py
new file mode 100644
index 00000000000..b1b2e6c2e45
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py
@@ -0,0 +1,87 @@
+# coding=utf-8
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XinferenceTTIModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'),
+ _('The image generation endpoint allows you to create raw images based on text prompts. The dimensions of the image can be 1024x1024, 1024x1792, or 1792x1024 pixels.')),
+ required=True,
+ default_value='1024x1024',
+ option_list=[
+ {'value': '1024x1024', 'label': '1024x1024'},
+ {'value': '1024x1792', 'label': '1024x1792'},
+ {'value': '1792x1024', 'label': '1792x1024'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ quality = forms.SingleSelect(
+ TooltipLabel(_('Picture quality'),
+ _('By default, images are generated in standard quality, you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest.')),
+ required=True,
+ default_value='standard',
+ option_list=[
+ {'value': 'standard', 'label': 'standard'},
+ {'value': 'hd', 'label': 'hd'},
+ ],
+ text_field='label',
+ value_field='value'
+ )
+
+ n = forms.SliderField(
+ TooltipLabel(_('Number of pictures'),
+ _('You can request 1 image at a time (requesting more images by making parallel requests), or up to 10 images at a time using the n parameter.')),
+ required=True, default_value=1,
+ _min=1,
+ _max=10,
+ _step=1,
+ precision=0)
+
+
+class XinferenceTextToImageModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return XinferenceTTIModelParams()
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py
new file mode 100644
index 00000000000..13bb9678015
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py
@@ -0,0 +1,66 @@
+# coding=utf-8
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class XInferenceTTSModelGeneralParams(BaseForm):
+ # ['中文女', '中文男', '日语男', '粤语女', '英文女', '英文男', '韩语女']
+ voice = forms.SingleSelect(
+ TooltipLabel(_('timbre'), ''),
+ required=True, default_value='中文女',
+ text_field='value',
+ value_field='value',
+ option_list=[
+ {'text': _('Chinese female'), 'value': '中文女'},
+ {'text': _('Chinese male'), 'value': '中文男'},
+ {'text': _('Japanese male'), 'value': '日语男'},
+ {'text': _('Cantonese female'), 'value': '粤语女'},
+ {'text': _('English female'), 'value': '英文女'},
+ {'text': _('English male'), 'value': '英文男'},
+ {'text': _('Korean female'), 'value': '韩语女'},
+ ])
+
+
+class XInferenceTTSModelCredential(BaseForm, BaseModelCredential):
+ api_base = forms.TextInputField('API URL', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.check_auth()
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return XInferenceTTSModelGeneralParams()
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/icon/xinference_icon_svg b/apps/setting/models_provider/impl/xinference_model_provider/icon/xinference_icon_svg
new file mode 100644
index 00000000000..fc553ee3ccd
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/icon/xinference_icon_svg
@@ -0,0 +1,5 @@
+
+
+
+
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/embedding.py b/apps/setting/models_provider/impl/xinference_model_provider/model/embedding.py
new file mode 100644
index 00000000000..73abc14cd54
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/embedding.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+import threading
+from typing import Dict, Optional, List, Any
+
+from langchain_core.embeddings import Embeddings
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class XinferenceEmbedding(MaxKBBaseModel, Embeddings):
+ client: Any
+ server_url: Optional[str]
+ """URL of the xinference server"""
+ model_uid: Optional[str]
+ """UID of the launched model"""
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return XinferenceEmbedding(
+ model_uid=model_name,
+ server_url=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ )
+
+ def down_model(self):
+ self.client.launch_model(model_name=self.model_uid, model_type="embedding")
+
+ def start_down_model_thread(self):
+ thread = threading.Thread(target=self.down_model)
+ thread.daemon = True
+ thread.start()
+
+ def __init__(
+ self, server_url: Optional[str] = None, model_uid: Optional[str] = None,
+ api_key: Optional[str] = None
+ ):
+ try:
+ from xinference.client import RESTfulClient
+ except ImportError:
+ try:
+ from xinference_client import RESTfulClient
+ except ImportError as e:
+ raise ImportError(
+ "Could not import RESTfulClient from xinference. Please install it"
+ " with `pip install xinference` or `pip install xinference_client`."
+ ) from e
+
+ if server_url is None:
+ raise ValueError("Please provide server URL")
+
+ if model_uid is None:
+ raise ValueError("Please provide the model UID")
+
+ self.server_url = server_url
+
+ self.model_uid = model_uid
+
+ self.api_key = api_key
+
+ self.client = RESTfulClient(server_url, api_key)
+
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
+ """Embed a list of documents using Xinference.
+ Args:
+ texts: The list of texts to embed.
+ Returns:
+ List of embeddings, one for each text.
+ """
+
+ model = self.client.get_model(self.model_uid)
+
+ embeddings = [
+ model.create_embedding(text)["data"][0]["embedding"] for text in texts
+ ]
+ return [list(map(float, e)) for e in embeddings]
+
+ def embed_query(self, text: str) -> List[float]:
+ """Embed a query of documents using Xinference.
+ Args:
+ text: The text to embed.
+ Returns:
+ Embeddings for the text.
+ """
+
+ model = self.client.get_model(self.model_uid)
+
+ embedding_res = model.create_embedding(text)
+
+ embedding = embedding_res["data"][0]["embedding"]
+
+ return list(map(float, embedding))
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/image.py b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py
new file mode 100644
index 00000000000..66a766ba8c0
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py
@@ -0,0 +1,35 @@
+from typing import Dict, List
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return XinferenceImage(
+ model_name=model_name,
+ openai_api_base=model_credential.get('api_base'),
+ openai_api_key=model_credential.get('api_key'),
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+ return self.usage_metadata.get('input_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+ return self.get_last_generation_info().get('output_tokens', 0)
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py
new file mode 100644
index 00000000000..9c0316ad20a
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py
@@ -0,0 +1,50 @@
+# coding=utf-8
+
+from typing import Dict, List
+from urllib.parse import urlparse, ParseResult
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+def get_base_url(url: str):
+ parse = urlparse(url)
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
+
+
+class XinferenceChatModel(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ api_base = model_credential.get('api_base', '')
+ base_url = get_base_url(api_base)
+ base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1')
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return XinferenceChatModel(
+ model=model_name,
+ openai_api_base=base_url,
+ openai_api_key=model_credential.get('api_key'),
+ extra_body=optional_params
+ )
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+ return self.usage_metadata.get('input_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ if self.usage_metadata is None or self.usage_metadata == {}:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
+ return self.get_last_generation_info().get('output_tokens', 0)
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/reranker.py b/apps/setting/models_provider/impl/xinference_model_provider/model/reranker.py
new file mode 100644
index 00000000000..28c8d267839
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/reranker.py
@@ -0,0 +1,57 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: reranker.py
+ @date:2024/9/10 9:45
+ @desc:
+"""
+from typing import Sequence, Optional, Any, Dict
+
+from langchain_core.callbacks import Callbacks
+from langchain_core.documents import BaseDocumentCompressor, Document
+from xinference_client.client.restful.restful_client import RESTfulRerankModelHandle
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class XInferenceReranker(MaxKBBaseModel, BaseDocumentCompressor):
+ server_url: Optional[str]
+ """URL of the xinference server"""
+ model_uid: Optional[str]
+ """UID of the launched model"""
+ api_key: Optional[str]
+
+ @staticmethod
+ def is_cache_model():
+ return False
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ return XInferenceReranker(server_url=model_credential.get('server_url'), model_uid=model_name,
+ api_key=model_credential.get('api_key'), top_n=model_kwargs.get('top_n', 3))
+
+ top_n: Optional[int] = 3
+
+ def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None) -> \
+ Sequence[Document]:
+ if documents is None or len(documents) == 0:
+ return []
+ client: Any
+ if documents is None or len(documents) == 0:
+ return []
+ try:
+ from xinference.client import RESTfulClient
+ except ImportError:
+ try:
+ from xinference_client import RESTfulClient
+ except ImportError as e:
+ raise ImportError(
+ "Could not import RESTfulClient from xinference. Please install it"
+ " with `pip install xinference` or `pip install xinference_client`."
+ ) from e
+
+ client = RESTfulClient(self.server_url, self.api_key)
+ model: RESTfulRerankModelHandle = client.get_model(self.model_uid)
+ res = model.rerank([document.page_content for document in documents], query, self.top_n, return_documents=True)
+ return [Document(page_content=d.get('document', {}).get('text'),
+ metadata={'relevance_score': d.get('relevance_score')}) for d in res.get('results', [])]
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/stt.py b/apps/setting/models_provider/impl/xinference_model_provider/model/stt.py
new file mode 100644
index 00000000000..ed0e98c3e1b
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/stt.py
@@ -0,0 +1,57 @@
+import io
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_stt import BaseSpeechToText
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class XInferenceSpeechToText(MaxKBBaseModel, BaseSpeechToText):
+ api_base: str
+ api_key: str
+ model: str
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {}
+ if 'max_tokens' in model_kwargs and model_kwargs['max_tokens'] is not None:
+ optional_params['max_tokens'] = model_kwargs['max_tokens']
+ if 'temperature' in model_kwargs and model_kwargs['temperature'] is not None:
+ optional_params['temperature'] = model_kwargs['temperature']
+ return XInferenceSpeechToText(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ response_list = client.models.with_raw_response.list()
+ # print(response_list)
+
+ def speech_to_text(self, audio_file):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ audio_data = audio_file.read()
+ buffer = io.BytesIO(audio_data)
+ buffer.name = "file.mp3" # this is the important line
+ res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer)
+ return res.text
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/tti.py b/apps/setting/models_provider/impl/xinference_model_provider/model/tti.py
new file mode 100644
index 00000000000..e050c6bf79b
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/tti.py
@@ -0,0 +1,63 @@
+import base64
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from common.util.common import bytes_to_uploaded_file
+from dataset.serializers.file_serializers import FileSerializer
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class XinferenceTextToImage(MaxKBBaseModel, BaseTextToImage):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return XinferenceTextToImage(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ self.generate_image('生成一个小猫图片')
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
+ res = chat.images.generate(model=self.model, prompt=prompt, response_format='b64_json', **self.params)
+ file_urls = []
+ # 临时文件
+ for img in res.data:
+ file = bytes_to_uploaded_file(base64.b64decode(img.b64_json), 'file_name.jpg')
+ meta = {
+ 'debug': True,
+ }
+ file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
+ file_urls.append(f'http://localhost:8080{file_url}')
+
+ return file_urls
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py b/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py
new file mode 100644
index 00000000000..416e6fab9fb
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py
@@ -0,0 +1,61 @@
+from typing import Dict
+
+from openai import OpenAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from common.util.common import _remove_empty_lines
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tts import BaseTextToSpeech
+from django.utils.translation import gettext as _
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class XInferenceTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
+ api_base: str
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.api_base = kwargs.get('api_base')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'voice': '中文女'}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return XInferenceTextToSpeech(
+ model=model_name,
+ api_base=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def check_auth(self):
+ self.text_to_speech(_('Hello'))
+
+ def text_to_speech(self, text):
+ client = OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key
+ )
+ # ['中文女', '中文男', '日语男', '粤语女', '英文女', '英文男', '韩语女']
+ text = _remove_empty_lines(text)
+ with client.audio.speech.with_streaming_response.create(
+ model=self.model,
+ input=text,
+ **self.params
+ ) as response:
+ return response.read()
+
+ def is_cache_model(self):
+ return False
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py
new file mode 100644
index 00000000000..e2da1e66a68
--- /dev/null
+++ b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py
@@ -0,0 +1,585 @@
+# coding=utf-8
+import os
+from urllib.parse import urlparse, ParseResult
+
+import requests
+
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
+ ModelInfoManage
+from setting.models_provider.impl.xinference_model_provider.credential.embedding import \
+ XinferenceEmbeddingModelCredential
+from setting.models_provider.impl.xinference_model_provider.credential.image import XinferenceImageModelCredential
+from setting.models_provider.impl.xinference_model_provider.credential.llm import XinferenceLLMModelCredential
+from setting.models_provider.impl.xinference_model_provider.credential.reranker import XInferenceRerankerModelCredential
+from setting.models_provider.impl.xinference_model_provider.credential.stt import XInferenceSTTModelCredential
+from setting.models_provider.impl.xinference_model_provider.credential.tti import XinferenceTextToImageModelCredential
+from setting.models_provider.impl.xinference_model_provider.credential.tts import XInferenceTTSModelCredential
+from setting.models_provider.impl.xinference_model_provider.model.embedding import XinferenceEmbedding
+from setting.models_provider.impl.xinference_model_provider.model.image import XinferenceImage
+from setting.models_provider.impl.xinference_model_provider.model.llm import XinferenceChatModel
+from setting.models_provider.impl.xinference_model_provider.model.reranker import XInferenceReranker
+from setting.models_provider.impl.xinference_model_provider.model.stt import XInferenceSpeechToText
+from setting.models_provider.impl.xinference_model_provider.model.tti import XinferenceTextToImage
+from setting.models_provider.impl.xinference_model_provider.model.tts import XInferenceTextToSpeech
+from smartdoc.conf import PROJECT_DIR
+from django.utils.translation import gettext as _
+
+xinference_llm_model_credential = XinferenceLLMModelCredential()
+xinference_stt_model_credential = XInferenceSTTModelCredential()
+xinference_tts_model_credential = XInferenceTTSModelCredential()
+xinference_image_model_credential = XinferenceImageModelCredential()
+xinference_tti_model_credential = XinferenceTextToImageModelCredential()
+
+model_info_list = [
+ ModelInfo(
+ 'code-llama',
+ _('Code Llama is a language model specifically designed for code generation.'),
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'code-llama-instruct',
+ _('''
+Code Llama Instruct is a fine-tuned version of Code Llama's instructions, designed to perform specific tasks.
+ '''),
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'code-llama-python',
+ _('Code Llama Python is a language model specifically designed for Python code generation.'),
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'codeqwen1.5',
+ _('CodeQwen 1.5 is a language model for code generation with high performance.'),
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'codeqwen1.5-chat',
+ _('CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5.'),
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'deepseek',
+ _('Deepseek is a large-scale language model with 13 billion parameters.'),
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'deepseek-chat',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'deepseek-coder',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'deepseek-coder-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'deepseek-vl-chat',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'gpt-3.5-turbo',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'gpt-4',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'gpt-4-vision-preview',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'gpt4all',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'llama2',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'llama2-chat',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'llama2-chat-32k',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen-chat',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen-chat-32k',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen-code',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen-code-chat',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen-vl',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen-vl-chat',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2-72b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2-57b-a14b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2-7b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-72b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-32b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-14b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-7b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-1.5b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-0.5b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'qwen2.5-3b-instruct',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+ ModelInfo(
+ 'minicpm-llama3-v-2_5',
+ '',
+ ModelTypeConst.LLM,
+ xinference_llm_model_credential,
+ XinferenceChatModel
+ ),
+]
+
+voice_model_info = [
+ ModelInfo(
+ 'CosyVoice-300M-SFT',
+ '',
+ ModelTypeConst.TTS,
+ xinference_tts_model_credential,
+ XInferenceTextToSpeech
+ ),
+ ModelInfo(
+ 'Belle-whisper-large-v3-zh',
+ '',
+ ModelTypeConst.STT,
+ xinference_stt_model_credential,
+ XInferenceSpeechToText
+ ),
+]
+
+image_model_info = [
+ ModelInfo(
+ 'qwen-vl-chat',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'deepseek-vl-chat',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'yi-vl-chat',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'omnilmm',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'internvl-chat',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'cogvlm2',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'MiniCPM-Llama3-V-2_5',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'GLM-4V',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'MiniCPM-V-2.6',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'internvl2',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'qwen2-vl-instruct',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'llama-3.2-vision',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'llama-3.2-vision-instruct',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+ ModelInfo(
+ 'glm-edge-v',
+ '',
+ ModelTypeConst.IMAGE,
+ xinference_image_model_credential,
+ XinferenceImage
+ ),
+]
+
+tti_model_info = [
+ ModelInfo(
+ 'sd-turbo',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+ ModelInfo(
+ 'sdxl-turbo',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+ ModelInfo(
+ 'stable-diffusion-v1.5',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+ ModelInfo(
+ 'stable-diffusion-xl-base-1.0',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+ ModelInfo(
+ 'sd3-medium',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+ ModelInfo(
+ 'FLUX.1-schnell',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+ ModelInfo(
+ 'FLUX.1-dev',
+ '',
+ ModelTypeConst.TTI,
+ xinference_tti_model_credential,
+ XinferenceTextToImage
+ ),
+]
+
+xinference_embedding_model_credential = XinferenceEmbeddingModelCredential()
+
+# 生成embedding_model_info列表
+embedding_model_info = [
+ ModelInfo('bce-embedding-base_v1', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-base-en', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-base-en-v1.5', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-base-zh', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-base-zh-v1.5', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-large-en', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-large-en-v1.5', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-large-zh', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-large-zh-noinstruct', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-large-zh-v1.5', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-m3', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential,
+ XinferenceEmbedding),
+ ModelInfo('bge-small-en-v1.5', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-small-zh', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('bge-small-zh-v1.5', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('e5-large-v2', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('gte-base', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential,
+ XinferenceEmbedding),
+ ModelInfo('gte-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential,
+ XinferenceEmbedding),
+ ModelInfo('jina-embeddings-v2-base-en', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('jina-embeddings-v2-base-zh', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('jina-embeddings-v2-small-en', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('m3e-base', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential,
+ XinferenceEmbedding),
+ ModelInfo('m3e-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential,
+ XinferenceEmbedding),
+ ModelInfo('m3e-small', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential,
+ XinferenceEmbedding),
+ ModelInfo('multilingual-e5-large', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('text2vec-base-chinese', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('text2vec-base-chinese-paraphrase', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('text2vec-base-chinese-sentence', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('text2vec-base-multilingual', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+ ModelInfo('text2vec-large-chinese', '', ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding),
+]
+rerank_list = [ModelInfo('bce-reranker-base_v1',
+ '',
+ ModelTypeConst.RERANKER, XInferenceRerankerModelCredential(), XInferenceReranker)]
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_model_info_list(voice_model_info)
+ .append_default_model_info(voice_model_info[0])
+ .append_default_model_info(voice_model_info[1])
+ .append_default_model_info(ModelInfo('phi3',
+ '',
+ ModelTypeConst.LLM, xinference_llm_model_credential,
+ XinferenceChatModel))
+ .append_model_info_list(embedding_model_info)
+ .append_default_model_info(ModelInfo('',
+ '',
+ ModelTypeConst.EMBEDDING,
+ xinference_embedding_model_credential, XinferenceEmbedding))
+ .append_model_info_list(rerank_list)
+ .append_model_info_list(image_model_info)
+ .append_default_model_info(image_model_info[0])
+ .append_model_info_list(tti_model_info)
+ .append_default_model_info(tti_model_info[0])
+ .append_default_model_info(rerank_list[0])
+ .build()
+)
+
+
+def get_base_url(url: str):
+ parse = urlparse(url)
+ result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
+ query='',
+ fragment='').geturl()
+ return result_url[:-1] if result_url.endswith("/") else result_url
+
+
+class XinferenceModelProvider(IModelProvider):
+ def get_model_info_manage(self):
+ return model_info_manage
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_xinference_provider', name='Xorbits Inference', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'xinference_model_provider', 'icon',
+ 'xinference_icon_svg')))
+
+ @staticmethod
+ def get_base_model_list(api_base, api_key, model_type):
+ base_url = get_base_url(api_base)
+ base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1')
+ headers = {}
+ if api_key:
+ headers['Authorization'] = f"Bearer {api_key}"
+ r = requests.request(method="GET", url=f"{base_url}/models", headers=headers, timeout=5)
+ r.raise_for_status()
+ model_list = r.json().get('data')
+ return [model for model in model_list if model.get('model_type') == model_type]
+
+ @staticmethod
+ def get_model_info_by_name(model_list, model_name):
+ if model_list is None:
+ return []
+ return [model for model in model_list if model.get('model_name') == model_name or model.get('id') == model_name]
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py
new file mode 100644
index 00000000000..599526df7eb
--- /dev/null
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py
@@ -0,0 +1,71 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class ZhiPuImageModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.95,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class ZhiPuImageModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
+ for chunk in res:
+ print(chunk)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return ZhiPuImageModelParams()
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py
new file mode 100644
index 00000000000..e6dc74d6260
--- /dev/null
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py
@@ -0,0 +1,76 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: llm.py
+ @date:2024/7/12 10:46
+ @desc:
+"""
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+from langchain_core.messages import HumanMessage
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class ZhiPuLLMModelParams(BaseForm):
+ temperature = forms.SliderField(TooltipLabel(_('Temperature'),
+ _('Higher values make the output more random, while lower values make it more focused and deterministic')),
+ required=True, default_value=0.95,
+ _min=0.1,
+ _max=1.0,
+ _step=0.01,
+ precision=2)
+
+ max_tokens = forms.SliderField(
+ TooltipLabel(_('Output the maximum Tokens'),
+ _('Specify the maximum number of tokens that the model can generate')),
+ required=True, default_value=1024,
+ _min=1,
+ _max=100000,
+ _step=1,
+ precision=0)
+
+
+class ZhiPuLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ model.invoke([HumanMessage(content=gettext('Hello'))])
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def get_model_params_setting_form(self, model_name):
+ return ZhiPuLLMModelParams()
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py
new file mode 100644
index 00000000000..38a88f26743
--- /dev/null
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py
@@ -0,0 +1,69 @@
+# coding=utf-8
+import traceback
+from typing import Dict
+
+from django.utils.translation import gettext_lazy as _, gettext
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm, TooltipLabel
+from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
+
+
+class ZhiPuTTIModelParams(BaseForm):
+ size = forms.SingleSelect(
+ TooltipLabel(_('Image size'),
+ _('Image size, only cogview-3-plus supports this parameter. Optional range: [1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the default is 1024x1024.')),
+ required=True,
+ default_value='1024x1024',
+ option_list=[
+ {'value': '1024x1024', 'label': '1024x1024'},
+ {'value': '768x1344', 'label': '768x1344'},
+ {'value': '864x1152', 'label': '864x1152'},
+ {'value': '1344x768', 'label': '1344x768'},
+ {'value': '1152x864', 'label': '1152x864'},
+ {'value': '1440x720', 'label': '1440x720'},
+ {'value': '720x1440', 'label': '720x1440'},
+ ],
+ text_field='label',
+ value_field='value')
+
+
+class ZhiPuTextToImageModelCredential(BaseForm, BaseModelCredential):
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
+ raise_exception=False):
+ model_type_list = provider.get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext('{model_type} Model type is not supported').format(model_type=model_type))
+
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
+ else:
+ return False
+ try:
+ model = provider.get_model(model_type, model_name, model_credential, **model_params)
+ res = model.check_auth()
+ print(res)
+ except Exception as e:
+ traceback.print_exc()
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value,
+ gettext(
+ 'Verification failed, please check whether the parameters are correct: {error}').format(
+ error=str(e)))
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ def get_model_params_setting_form(self, model_name):
+ return ZhiPuTTIModelParams()
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py
new file mode 100644
index 00000000000..6ac7830d8ff
--- /dev/null
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py
@@ -0,0 +1,20 @@
+from typing import Dict
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
+
+
+class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ return ZhiPuImage(
+ model_name=model_name,
+ openai_api_key=model_credential.get('api_key'),
+ openai_api_base='https://open.bigmodel.cn/api/paas/v4',
+ # stream_options={"include_usage": True},
+ streaming=True,
+ stream_usage=True,
+ extra_body=optional_params
+ )
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/llm.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/llm.py
new file mode 100644
index 00000000000..03699321c82
--- /dev/null
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/llm.py
@@ -0,0 +1,107 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: llm.py
+ @date:2024/4/28 11:42
+ @desc:
+"""
+
+import json
+from collections.abc import Iterator
+from typing import Any, Dict, List, Optional
+
+from langchain_community.chat_models import ChatZhipuAI
+from langchain_community.chat_models.zhipuai import _truncate_params, _get_jwt_token, connect_sse, \
+ _convert_delta_to_message_chunk
+from langchain_core.callbacks import (
+ CallbackManagerForLLMRun,
+)
+from langchain_core.messages import (
+ AIMessageChunk,
+ BaseMessage
+)
+from langchain_core.outputs import ChatGenerationChunk
+
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+
+
+class ZhipuChatModel(MaxKBBaseModel, ChatZhipuAI):
+ optional_params: dict
+
+ @staticmethod
+ def is_cache_model():
+ return False
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
+ zhipuai_chat = ZhipuChatModel(
+ api_key=model_credential.get('api_key'),
+ model=model_name,
+ streaming=model_kwargs.get('streaming', False),
+ optional_params=optional_params,
+ **optional_params,
+ )
+ return zhipuai_chat
+
+ usage_metadata: dict = {}
+
+ def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
+ return self.usage_metadata
+
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ return self.usage_metadata.get('prompt_tokens', 0)
+
+ def get_num_tokens(self, text: str) -> int:
+ return self.usage_metadata.get('completion_tokens', 0)
+
+ def _stream(
+ self,
+ messages: List[BaseMessage],
+ stop: Optional[List[str]] = None,
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
+ **kwargs: Any,
+ ) -> Iterator[ChatGenerationChunk]:
+ """Stream the chat response in chunks."""
+ if self.zhipuai_api_key is None:
+ raise ValueError("Did not find zhipuai_api_key.")
+ if self.zhipuai_api_base is None:
+ raise ValueError("Did not find zhipu_api_base.")
+ message_dicts, params = self._create_message_dicts(messages, stop)
+ payload = {**params, **kwargs, **self.optional_params, "messages": message_dicts, "stream": True}
+ _truncate_params(payload)
+ headers = {
+ "Authorization": _get_jwt_token(self.zhipuai_api_key),
+ "Accept": "application/json",
+ }
+
+ default_chunk_class = AIMessageChunk
+ import httpx
+
+ with httpx.Client(headers=headers, timeout=60) as client:
+ with connect_sse(
+ client, "POST", self.zhipuai_api_base, json=payload
+ ) as event_source:
+ for sse in event_source.iter_sse():
+ chunk = json.loads(sse.data)
+ if len(chunk["choices"]) == 0:
+ continue
+ choice = chunk["choices"][0]
+ generation_info = {}
+ if "usage" in chunk:
+ generation_info = chunk["usage"]
+ self.usage_metadata = generation_info
+ chunk = _convert_delta_to_message_chunk(
+ choice["delta"], default_chunk_class
+ )
+ finish_reason = choice.get("finish_reason", None)
+
+ chunk = ChatGenerationChunk(
+ message=chunk, generation_info=generation_info
+ )
+ yield chunk
+ if run_manager:
+ run_manager.on_llm_new_token(chunk.text, chunk=chunk)
+ if finish_reason is not None:
+ break
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py
new file mode 100644
index 00000000000..a3137a50e3e
--- /dev/null
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py
@@ -0,0 +1,69 @@
+from typing import Dict
+
+from django.utils.translation import gettext
+from langchain_community.chat_models import ChatZhipuAI
+from langchain_core.messages import HumanMessage
+from zhipuai import ZhipuAI
+
+from common.config.tokenizer_manage_config import TokenizerManage
+from setting.models_provider.base_model_provider import MaxKBBaseModel
+from setting.models_provider.impl.base_tti import BaseTextToImage
+
+
+def custom_get_token_ids(text: str):
+ tokenizer = TokenizerManage.get_tokenizer()
+ return tokenizer.encode(text)
+
+
+class ZhiPuTextToImage(MaxKBBaseModel, BaseTextToImage):
+ api_key: str
+ model: str
+ params: dict
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.api_key = kwargs.get('api_key')
+ self.model = kwargs.get('model')
+ self.params = kwargs.get('params')
+
+ @staticmethod
+ def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
+ optional_params = {'params': {'size': '1024x1024'}}
+ for key, value in model_kwargs.items():
+ if key not in ['model_id', 'use_local', 'streaming']:
+ optional_params['params'][key] = value
+ return ZhiPuTextToImage(
+ model=model_name,
+ api_key=model_credential.get('api_key'),
+ **optional_params,
+ )
+
+ def is_cache_model(self):
+ return False
+
+ def check_auth(self):
+ chat = ChatZhipuAI(
+ zhipuai_api_key=self.api_key,
+ model_name=self.model,
+ )
+ chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])])
+
+ # self.generate_image('生成一个小猫图片')
+
+ def generate_image(self, prompt: str, negative_prompt: str = None):
+ # chat = ChatZhipuAI(
+ # zhipuai_api_key=self.api_key,
+ # model_name=self.model,
+ # )
+ chat = ZhipuAI(api_key=self.api_key)
+ response = chat.images.generations(
+ model=self.model, # 填写需要调用的模型编码
+ prompt=prompt, # 填写需要生成图片的文本
+ **self.params # 填写额外参数
+ )
+ file_urls = []
+ for content in response.data:
+ url = content.url
+ file_urls.append(url)
+
+ return file_urls
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/zhipu_chat_model.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/zhipu_chat_model.py
deleted file mode 100644
index ceab8988d92..00000000000
--- a/apps/setting/models_provider/impl/zhipu_model_provider/model/zhipu_chat_model.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# coding=utf-8
-"""
- @project: maxkb
- @Author:虎
- @file: zhipu_chat_model.py
- @date:2024/4/28 11:42
- @desc:
-"""
-from typing import List
-
-from langchain_community.chat_models import ChatZhipuAI
-from langchain_core.messages import BaseMessage, get_buffer_string
-
-from common.config.tokenizer_manage_config import TokenizerManage
-
-
-class ZhipuChatModel(ChatZhipuAI):
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py b/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py
index ebbb3b46934..fcaa5447bfb 100644
--- a/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py
@@ -7,88 +7,71 @@
@desc:
"""
import os
-from typing import Dict
-from langchain.schema import HumanMessage
-from langchain_community.chat_models import ChatZhipuAI
-
-from common import forms
-from common.exception.app_exception import AppApiException
-from common.forms import BaseForm
from common.util.file_util import get_file_content
-from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
- ModelInfo, IModelProvider, ValidCode
-from setting.models_provider.impl.zhipu_model_provider.model.zhipu_chat_model import ZhipuChatModel
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
+ ModelInfoManage
+from setting.models_provider.impl.zhipu_model_provider.credential.image import ZhiPuImageModelCredential
+from setting.models_provider.impl.zhipu_model_provider.credential.llm import ZhiPuLLMModelCredential
+from setting.models_provider.impl.zhipu_model_provider.credential.tti import ZhiPuTextToImageModelCredential
+from setting.models_provider.impl.zhipu_model_provider.model.image import ZhiPuImage
+from setting.models_provider.impl.zhipu_model_provider.model.llm import ZhipuChatModel
+from setting.models_provider.impl.zhipu_model_provider.model.tti import ZhiPuTextToImage
from smartdoc.conf import PROJECT_DIR
-
-
-class ZhiPuLLMModelCredential(BaseForm, BaseModelCredential):
-
- def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
- model_type_list = ZhiPuModelProvider().get_model_type_list()
- if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
- raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
- for key in ['api_key']:
- if key not in model_credential:
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
- else:
- return False
- try:
- model = ZhiPuModelProvider().get_model(model_type, model_name, model_credential)
- model.invoke([HumanMessage(content='你好')])
- except Exception as e:
- if isinstance(e, AppApiException):
- raise e
- if raise_exception:
- raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
- else:
- return False
- return True
-
- def encryption_dict(self, model: Dict[str, object]):
- return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
-
- api_key = forms.PasswordInputField('API Key', required=True)
-
+from django.utils.translation import gettext as _
qwen_model_credential = ZhiPuLLMModelCredential()
-
-model_dict = {
- 'glm-4': ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential),
- 'glm-4v': ModelInfo('glm-4v', '', ModelTypeConst.LLM, qwen_model_credential),
- 'glm-3-turbo': ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, qwen_model_credential)
-}
+zhipu_image_model_credential = ZhiPuImageModelCredential()
+zhipu_tti_model_credential = ZhiPuTextToImageModelCredential()
+
+model_info_list = [
+ ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel),
+ ModelInfo('glm-4v', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel),
+ ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel)
+]
+
+model_info_image_list = [
+ ModelInfo('glm-4v-plus', _('Have strong multi-modal understanding capabilities. Able to understand up to five images simultaneously and supports video content understanding'),
+ ModelTypeConst.IMAGE, zhipu_image_model_credential,
+ ZhiPuImage),
+ ModelInfo('glm-4v', _('Focus on single picture understanding. Suitable for scenarios requiring efficient image analysis'),
+ ModelTypeConst.IMAGE, zhipu_image_model_credential,
+ ZhiPuImage),
+ ModelInfo('glm-4v-flash', _('Focus on single picture understanding. Suitable for scenarios requiring efficient image analysis (free)'),
+ ModelTypeConst.IMAGE, zhipu_image_model_credential,
+ ZhiPuImage),
+]
+
+model_info_tti_list = [
+ ModelInfo('cogview-3', _('Quickly and accurately generate images based on user text descriptions. Resolution supports 1024x1024'),
+ ModelTypeConst.TTI, zhipu_tti_model_credential,
+ ZhiPuTextToImage),
+ ModelInfo('cogview-3-plus', _('Generate high-quality images based on user text descriptions, supporting multiple image sizes'),
+ ModelTypeConst.TTI, zhipu_tti_model_credential,
+ ZhiPuTextToImage),
+ ModelInfo('cogview-3-flash', _('Generate high-quality images based on user text descriptions, supporting multiple image sizes (free)'),
+ ModelTypeConst.TTI, zhipu_tti_model_credential,
+ ZhiPuTextToImage),
+]
+
+model_info_manage = (
+ ModelInfoManage.builder()
+ .append_model_info_list(model_info_list)
+ .append_default_model_info(ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel))
+ .append_model_info_list(model_info_image_list)
+ .append_default_model_info(model_info_image_list[0])
+ .append_model_info_list(model_info_tti_list)
+ .append_default_model_info(model_info_tti_list[0])
+ .build()
+)
class ZhiPuModelProvider(IModelProvider):
- def get_dialogue_number(self):
- return 3
-
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatZhipuAI:
- zhipuai_chat = ZhipuChatModel(
- temperature=0.5,
- api_key=model_credential.get('api_key'),
- model=model_name
- )
- return zhipuai_chat
-
- def get_model_credential(self, model_type, model_name):
- if model_name in model_dict:
- return model_dict.get(model_name).model_credential
- return qwen_model_credential
+ def get_model_info_manage(self):
+ return model_info_manage
def get_model_provide_info(self):
- return ModelProvideInfo(provider='model_zhipu_provider', name='智谱AI', icon=get_file_content(
+ return ModelProvideInfo(provider='model_zhipu_provider', name=_('zhipu AI'), icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'zhipu_model_provider', 'icon',
'zhipuai_icon_svg')))
-
- def get_model_list(self, model_type: str):
- if model_type is None:
- raise AppApiException(500, '模型类型不能为空')
- return [model_dict.get(key).to_dict() for key in
- list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
-
- def get_model_type_list(self):
- return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/tools.py b/apps/setting/models_provider/tools.py
new file mode 100644
index 00000000000..150e3d40018
--- /dev/null
+++ b/apps/setting/models_provider/tools.py
@@ -0,0 +1,37 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: tools.py
+ @date:2024/7/22 11:18
+ @desc:
+"""
+from django.db import connection
+from django.db.models import QuerySet
+
+from common.config.embedding_config import ModelManage
+from setting.models import Model
+from setting.models_provider import get_model
+from django.utils.translation import gettext_lazy as _
+
+
+def get_model_by_id(_id, user_id):
+ model = QuerySet(Model).filter(id=_id).first()
+ # 手动关闭数据库连接
+ connection.close()
+ if model is None:
+ raise Exception(_('Model does not exist'))
+ if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id):
+ raise Exception(_('No permission to use this model') + f"{model.name}")
+ return model
+
+
+def get_model_instance_by_model_user_id(model_id, user_id, **kwargs):
+ """
+ 获取模型实例,根据模型相关数据
+ @param model_id: 模型id
+ @param user_id: 用户id
+ @return: 模型实例
+ """
+ model = get_model_by_id(model_id, user_id)
+ return ModelManage.get_model(model_id, lambda _id: get_model(model, **kwargs))
diff --git a/apps/setting/serializers/model_apply_serializers.py b/apps/setting/serializers/model_apply_serializers.py
new file mode 100644
index 00000000000..88609a18fd6
--- /dev/null
+++ b/apps/setting/serializers/model_apply_serializers.py
@@ -0,0 +1,76 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: model_apply_serializers.py
+ @date:2024/8/20 20:39
+ @desc:
+"""
+from django.db import connection
+from django.db.models import QuerySet
+from langchain_core.documents import Document
+from rest_framework import serializers
+
+from common.config.embedding_config import ModelManage
+from common.util.field_message import ErrMessage
+from setting.models import Model
+from setting.models_provider import get_model
+from django.utils.translation import gettext_lazy as _
+
+def get_embedding_model(model_id):
+ model = QuerySet(Model).filter(id=model_id).first()
+ # 手动关闭数据库连接
+ connection.close()
+ embedding_model = ModelManage.get_model(model_id,
+ lambda _id: get_model(model, use_local=True))
+ return embedding_model
+
+
+class EmbedDocuments(serializers.Serializer):
+ texts = serializers.ListField(required=True, child=serializers.CharField(required=True,
+ error_messages=ErrMessage.char(
+ _('vector text'))),
+ error_messages=ErrMessage.list(_('vector text list')))
+
+
+class EmbedQuery(serializers.Serializer):
+ text = serializers.CharField(required=True, error_messages=ErrMessage.char(_('vector text')))
+
+
+class CompressDocument(serializers.Serializer):
+ page_content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('text')))
+ metadata = serializers.DictField(required=False, error_messages=ErrMessage.dict(_('metadata')))
+
+
+class CompressDocuments(serializers.Serializer):
+ documents = CompressDocument(required=True, many=True)
+ query = serializers.CharField(required=True, error_messages=ErrMessage.char(_('query')))
+
+
+class ModelApplySerializers(serializers.Serializer):
+ model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('model id')))
+
+ def embed_documents(self, instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ EmbedDocuments(data=instance).is_valid(raise_exception=True)
+
+ model = get_embedding_model(self.data.get('model_id'))
+ return model.embed_documents(instance.getlist('texts'))
+
+ def embed_query(self, instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ EmbedQuery(data=instance).is_valid(raise_exception=True)
+
+ model = get_embedding_model(self.data.get('model_id'))
+ return model.embed_query(instance.get('text'))
+
+ def compress_documents(self, instance, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ CompressDocuments(data=instance).is_valid(raise_exception=True)
+ model = get_embedding_model(self.data.get('model_id'))
+ return [{'page_content': d.page_content, 'metadata': d.metadata} for d in model.compress_documents(
+ [Document(page_content=document.get('page_content'), metadata=document.get('metadata')) for document in
+ instance.get('documents')], instance.get('query'))]
diff --git a/apps/setting/serializers/provider_serializers.py b/apps/setting/serializers/provider_serializers.py
index 351a98c8a80..9f16901627e 100644
--- a/apps/setting/serializers/provider_serializers.py
+++ b/apps/setting/serializers/provider_serializers.py
@@ -7,21 +7,35 @@
@desc:
"""
import json
+import re
import threading
import time
import uuid
from typing import Dict
-from django.db.models import QuerySet
+from django.core import validators
+from django.db.models import QuerySet, Q
from rest_framework import serializers
from application.models import Application
+from common.config.embedding_config import ModelManage
from common.exception.app_exception import AppApiException
from common.util.field_message import ErrMessage
from common.util.rsa_util import rsa_long_decrypt, rsa_long_encrypt
-from setting.models.model_management import Model, Status
+from dataset.models import DataSet
+from setting.models.model_management import Model, Status, PermissionType
+from setting.models_provider import get_model, get_model_credential
from setting.models_provider.base_model_provider import ValidCode, DownModelChunkStatus
from setting.models_provider.constants.model_provider_constants import ModelProvideConstants
+from django.utils.translation import gettext_lazy as _
+
+
+def get_default_model_params_setting(provider, model_type, model_name):
+ credential = get_model_credential(provider, model_type, model_name)
+ setting_form = credential.get_model_params_setting_form(model_name)
+ if setting_form is not None:
+ return setting_form.to_form_list()
+ return []
class ModelPullManage:
@@ -36,6 +50,9 @@ def pull(model: Model, credential: Dict):
for chunk in response:
down_model_chunk[chunk.digest] = chunk.to_dict()
if time.time() - timestamp > 5:
+ model_new = QuerySet(Model).filter(id=model.id).first()
+ if model_new.status == Status.PAUSE_DOWNLOAD:
+ return
QuerySet(Model).filter(id=model.id).update(
meta={"down_model_chunk": list(down_model_chunk.values())})
timestamp = time.time()
@@ -56,49 +73,76 @@ def pull(model: Model, credential: Dict):
class ModelSerializer(serializers.Serializer):
class Query(serializers.Serializer):
- user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id')))
+
+ name = serializers.CharField(required=False, max_length=64,
+ error_messages=ErrMessage.char(_('model name')))
- name = serializers.CharField(required=False, max_length=20,
- error_messages=ErrMessage.char("模型名称"))
+ model_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_('model type')))
- model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型"))
+ model_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_('model name')))
- model_name = serializers.CharField(required=False, error_messages=ErrMessage.char("基础模型"))
+ provider = serializers.CharField(required=False, error_messages=ErrMessage.char(_('provider')))
- provider = serializers.CharField(required=False, error_messages=ErrMessage.char("供应商"))
+ permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_('permission type')))
+
+ create_user = serializers.CharField(required=False, error_messages=ErrMessage.char(_('create user')))
def list(self, with_valid):
if with_valid:
self.is_valid(raise_exception=True)
user_id = self.data.get('user_id')
name = self.data.get('name')
- model_query_set = QuerySet(Model).filter(user_id=user_id)
+ create_user = self.data.get('create_user')
+ if create_user is not None:
+ # 当前用户能查看自己的模型,包括公开和私有的
+ if create_user == user_id:
+ model_query_set = QuerySet(Model).filter(Q(user_id=create_user))
+ # 当前用户能查看其他人的模型,只能查看公开的
+ else:
+ model_query_set = QuerySet(Model).filter(
+ (Q(user_id=self.data.get('create_user')) & Q(permission_type='PUBLIC')))
+ else:
+ model_query_set = QuerySet(Model).filter((Q(user_id=user_id) | Q(permission_type='PUBLIC')))
query_params = {}
if name is not None:
- query_params['name__contains'] = name
+ query_params['name__icontains'] = name
if self.data.get('model_type') is not None:
query_params['model_type'] = self.data.get('model_type')
if self.data.get('model_name') is not None:
query_params['model_name'] = self.data.get('model_name')
if self.data.get('provider') is not None:
query_params['provider'] = self.data.get('provider')
+ if self.data.get('permission_type') is not None:
+ query_params['permission_type'] = self.data.get('permission_type')
return [
{'id': str(model.id), 'provider': model.provider, 'name': model.name, 'model_type': model.model_type,
- 'model_name': model.model_name, 'status': model.status, 'meta': model.meta} for model in
+ 'model_name': model.model_name, 'status': model.status, 'meta': model.meta,
+ 'permission_type': model.permission_type, 'user_id': model.user_id, 'username': model.user.username}
+ for model in
model_query_set.filter(**query_params).order_by("-create_time")]
class Edit(serializers.Serializer):
- user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid("用户id"))
+ user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid(_('user id')))
+
+ name = serializers.CharField(required=False, max_length=64,
+ error_messages=ErrMessage.char(_("model name")))
- name = serializers.CharField(required=False, max_length=20,
- error_messages=ErrMessage.char("模型名称"))
+ model_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_("model type")))
- model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型"))
+ permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_("permission type")),
+ validators=[
+ validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"),
+ message=_(
+ "permissions only supportPUBLIC|PRIVATE"),
+ code=500)
+ ])
- model_name = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型"))
+ model_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("model type")))
- credential = serializers.DictField(required=False, error_messages=ErrMessage.dict("认证信息"))
+ credential = serializers.DictField(required=False,
+ error_messages=ErrMessage.dict(_("certification information")))
def is_valid(self, model=None, raise_exception=False):
super().is_valid(raise_exception=True)
@@ -115,43 +159,55 @@ def is_valid(self, model=None, raise_exception=False):
model_name = self.data.get(
'model_name')
credential = self.data.get('credential')
-
+ provider_handler = ModelProvideConstants[provider].value
model_credential = ModelProvideConstants[provider].value.get_model_credential(model_type,
model_name)
source_model_credential = json.loads(rsa_long_decrypt(model.credential))
source_encryption_model_credential = model_credential.encryption_dict(source_model_credential)
if credential is not None:
for k in source_encryption_model_credential.keys():
- if credential[k] == source_encryption_model_credential[k]:
+ if k in credential and credential[k] == source_encryption_model_credential[k]:
credential[k] = source_model_credential[k]
- return credential, model_credential
+ return credential, model_credential, provider_handler
class Create(serializers.Serializer):
- user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户id"))
+ user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("user id")))
+
+ name = serializers.CharField(required=True, max_length=64, error_messages=ErrMessage.char(_("model name")))
+
+ provider = serializers.CharField(required=True, error_messages=ErrMessage.char(_("provider")))
- name = serializers.CharField(required=True, max_length=20, error_messages=ErrMessage.char("模型名称"))
+ model_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("model type")))
- provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商"))
+ permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("permission type")),
+ validators=[
+ validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"),
+ message=_(
+ "permissions only supportPUBLIC|PRIVATE"),
+ code=500)
+ ])
- model_type = serializers.CharField(required=True, error_messages=ErrMessage.char("模型类型"))
+ model_name = serializers.CharField(required=True, error_messages=ErrMessage.char(_("model name")))
- model_name = serializers.CharField(required=True, error_messages=ErrMessage.char("基础模型"))
+ model_params_form = serializers.ListField(required=False, default=list,
+ error_messages=ErrMessage.char(_("parameter configuration")))
- credential = serializers.DictField(required=True, error_messages=ErrMessage.dict("认证信息"))
+ credential = serializers.DictField(required=True,
+ error_messages=ErrMessage.dict(_("certification information")))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if QuerySet(Model).filter(user_id=self.data.get('user_id'),
name=self.data.get('name')).exists():
- raise AppApiException(500, f'模型名称【{self.data.get("name")}】已存在')
- # 校验模型认证数据
- ModelProvideConstants[self.data.get('provider')].value.get_model_credential(self.data.get('model_type'),
- self.data.get(
- 'model_name')).is_valid(
- self.data.get('model_type'),
- self.data.get('model_name'),
- self.data.get('credential'),
- raise_exception=True)
+ raise AppApiException(500, _('Model name【{model_name}】already exists').format(
+ model_name=self.data.get("name")))
+ default_params = {item['field']: item['default_value'] for item in self.data.get('model_params_form')}
+ ModelProvideConstants[self.data.get('provider')].value.is_valid_credential(self.data.get('model_type'),
+ self.data.get('model_name'),
+ self.data.get('credential'),
+ default_params,
+ raise_exception=True
+ )
def insert(self, user_id, with_valid=False):
status = Status.SUCCESS
@@ -168,10 +224,14 @@ def insert(self, user_id, with_valid=False):
provider = self.data.get('provider')
model_type = self.data.get('model_type')
model_name = self.data.get('model_name')
+ permission_type = self.data.get('permission_type')
+ model_params_form = self.data.get('model_params_form')
model_credential_str = json.dumps(credential)
model = Model(id=uuid.uuid1(), status=status, user_id=user_id, name=name,
credential=rsa_long_encrypt(model_credential_str),
- provider=provider, model_type=model_type, model_name=model_name)
+ provider=provider, model_type=model_type, model_name=model_name,
+ model_params_form=model_params_form,
+ permission_type=permission_type)
model.save()
if status == Status.DOWNLOAD:
thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential))
@@ -187,12 +247,58 @@ def model_to_dict(model: Model):
'meta': model.meta,
'credential': ModelProvideConstants[model.provider].value.get_model_credential(model.model_type,
model.model_name).encryption_dict(
- credential)}
+ credential),
+ 'permission_type': model.permission_type}
+
+ class ModelParams(serializers.Serializer):
+ id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id"))
+
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id")))
+
+ def is_valid(self, *, raise_exception=False):
+ super().is_valid(raise_exception=True)
+ model = QuerySet(Model).filter(id=self.data.get("id")).first()
+ if model is None:
+ raise AppApiException(500, '模型不存在')
+ if model.permission_type == PermissionType.PRIVATE and self.data.get('user_id') != str(model.user_id):
+ raise AppApiException(500, '没有权限访问到此模型')
+
+ def get_model_params(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ model_id = self.data.get('id')
+ model = QuerySet(Model).filter(id=model_id).first()
+ # 已经保存过的模型参数表单
+ return model.model_params_form
+
+ class ModelParamsForm(serializers.Serializer):
+ id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id"))
+
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id")))
+
+ def is_valid(self, *, raise_exception=False):
+ super().is_valid(raise_exception=True)
+ model = QuerySet(Model).filter(id=self.data.get("id")).first()
+ if model is None:
+ raise AppApiException(500, '模型不存在')
+ if model.permission_type == PermissionType.PRIVATE and self.data.get('user_id') != str(model.user_id):
+ raise AppApiException(500, '没有权限访问到此模型')
+
+ def save_model_params_form(self, model_params_form, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ if model_params_form is None:
+ model_params_form = []
+ model_id = self.data.get('id')
+ model = QuerySet(Model).filter(id=model_id).first()
+ model.model_params_form = model_params_form
+ model.save()
+ return True
class Operate(serializers.Serializer):
id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id"))
- user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id")))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
@@ -207,21 +313,53 @@ def one(self, with_valid=False):
return ModelSerializer.model_to_dict(model)
def one_meta(self, with_valid=False):
+ model = None
if with_valid:
- self.is_valid(raise_exception=True)
- model = QuerySet(Model).get(id=self.data.get('id'), user_id=self.data.get('user_id'))
+ super().is_valid(raise_exception=True)
+ model = QuerySet(Model).filter(id=self.data.get("id")).first()
+ if model is None:
+ raise AppApiException(500, _('Model does not exist'))
+ if model.permission_type == 'PRIVATE' and str(model.user_id) != str(self.data.get("user_id")):
+ raise Exception(_('No permission to use this model') + f"{model.name}")
+ if model is None:
+ model = QuerySet(Model).get(id=self.data.get('id'))
return {'id': str(model.id), 'provider': model.provider, 'name': model.name, 'model_type': model.model_type,
'model_name': model.model_name,
'status': model.status,
- 'meta': model.meta, }
+ 'meta': model.meta
+ }
def delete(self, with_valid=True):
if with_valid:
self.is_valid(raise_exception=True)
- application_list = QuerySet(Application).filter(model_id=self.data.get('id')).all()
- if len(application_list) > 0:
- raise AppApiException(500, f"该模型关联了{len(application_list)} 个应用,无法删除该模型。")
- QuerySet(Model).filter(id=self.data.get('id')).delete()
+ model_id = self.data.get('id')
+ model = Model.objects.filter(id=model_id).first()
+ if not model:
+ # 模型不存在,直接返回或抛出异常
+ raise AppApiException(500, "模型不存在")
+ if model.model_type == 'LLM':
+ application_count = Application.objects.filter(model_id=model_id).count()
+ if application_count > 0:
+ raise AppApiException(500, f"该模型关联了{application_count} 个应用,无法删除该模型。")
+ elif model.model_type == 'EMBEDDING':
+ dataset_count = DataSet.objects.filter(embedding_mode_id=model_id).count()
+ if dataset_count > 0:
+ raise AppApiException(500, f"该模型关联了{dataset_count} 个知识库,无法删除该模型。")
+ elif model.model_type == 'TTS':
+ dataset_count = Application.objects.filter(tts_model_id=model_id).count()
+ if dataset_count > 0:
+ raise AppApiException(500, f"该模型关联了{dataset_count} 个应用,无法删除该模型。")
+ elif model.model_type == 'STT':
+ dataset_count = Application.objects.filter(stt_model_id=model_id).count()
+ if dataset_count > 0:
+ raise AppApiException(500, f"该模型关联了{dataset_count} 个应用,无法删除该模型。")
+ model.delete()
+ return True
+
+ def pause_download(self, with_valid=True):
+ if with_valid:
+ self.is_valid(raise_exception=True)
+ QuerySet(Model).filter(id=self.data.get('id')).update(status=Status.PAUSE_DOWNLOAD)
return True
def edit(self, instance: Dict, user_id: str, with_valid=True):
@@ -232,22 +370,25 @@ def edit(self, instance: Dict, user_id: str, with_valid=True):
if model is None:
raise AppApiException(500, '不存在的id')
else:
- credential, model_credential = ModelSerializer.Edit(data={**instance, 'user_id': user_id}).is_valid(
+ credential, model_credential, provider_handler = ModelSerializer.Edit(
+ data={**instance, 'user_id': user_id}).is_valid(
model=model)
try:
model.status = Status.SUCCESS
+ default_params = {item['field']: item['default_value'] for item in model.model_params_form}
# 校验模型认证数据
- model_credential.is_valid(
- model.model_type,
- instance.get("model_name"),
- credential,
- raise_exception=True)
+ provider_handler.is_valid_credential(model.model_type,
+ instance.get("model_name"),
+ credential,
+ default_params,
+ raise_exception=True)
+
except AppApiException as e:
if e.code == ValidCode.model_not_fount:
model.status = Status.DOWNLOAD
else:
raise e
- update_keys = ['credential', 'name', 'model_type', 'model_name']
+ update_keys = ['credential', 'name', 'model_type', 'model_name', 'permission_type']
for update_key in update_keys:
if update_key in instance and instance.get(update_key) is not None:
if update_key == 'credential':
@@ -255,6 +396,8 @@ def edit(self, instance: Dict, user_id: str, with_valid=True):
model.__setattr__(update_key, rsa_long_encrypt(model_credential_str))
else:
model.__setattr__(update_key, instance.get(update_key))
+ # 修改模型时候删除缓存
+ ModelManage.delete_key(str(model.id))
model.save()
if model.status == Status.DOWNLOAD:
thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential))
@@ -263,7 +406,7 @@ def edit(self, instance: Dict, user_id: str, with_valid=True):
class ProviderSerializer(serializers.Serializer):
- provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商"))
+ provider = serializers.CharField(required=True, error_messages=ErrMessage.char(_("provider")))
method = serializers.CharField(required=True, error_messages=ErrMessage.char("执行函数名称"))
diff --git a/apps/setting/serializers/system_setting.py b/apps/setting/serializers/system_setting.py
index a66b15805f2..9f1525bbff3 100644
--- a/apps/setting/serializers/system_setting.py
+++ b/apps/setting/serializers/system_setting.py
@@ -13,6 +13,7 @@
from common.exception.app_exception import AppApiException
from common.util.field_message import ErrMessage
from setting.models.system_management import SystemSetting, SettingType
+from django.utils.translation import gettext_lazy as _
class SystemSettingSerializer(serializers.Serializer):
@@ -25,13 +26,13 @@ def one():
return system_setting.meta
class Create(serializers.Serializer):
- email_host = serializers.CharField(required=True, error_messages=ErrMessage.char("SMTP 主机"))
- email_port = serializers.IntegerField(required=True, error_messages=ErrMessage.char("SMTP 端口"))
- email_host_user = serializers.CharField(required=True, error_messages=ErrMessage.char("发件人邮箱"))
- email_host_password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"))
- email_use_tls = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否开启TLS"))
- email_use_ssl = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否开启SSL"))
- from_email = serializers.EmailField(required=True, error_messages=ErrMessage.char("发送人邮箱"))
+ email_host = serializers.CharField(required=True, error_messages=ErrMessage.char(_('SMTP host')))
+ email_port = serializers.IntegerField(required=True, error_messages=ErrMessage.char(_('SMTP port')))
+ email_host_user = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Sender\'s email')))
+ email_host_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Password')))
+ email_use_tls = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_('Whether to enable TLS')))
+ email_use_ssl = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_('Whether to enable SSL')))
+ from_email = serializers.EmailField(required=True, error_messages=ErrMessage.char(_('Sender\'s email')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
@@ -45,7 +46,7 @@ def is_valid(self, *, raise_exception=False):
self.data.get("email_use_ssl")
).open()
except Exception as e:
- raise AppApiException(1004, "邮箱校验失败")
+ raise AppApiException(1004, _('Email verification failed'))
def update_or_save(self):
self.is_valid(raise_exception=True)
diff --git a/apps/setting/serializers/team_serializers.py b/apps/setting/serializers/team_serializers.py
index 46266bb35db..0b264e88b29 100644
--- a/apps/setting/serializers/team_serializers.py
+++ b/apps/setting/serializers/team_serializers.py
@@ -29,6 +29,7 @@
from smartdoc.conf import PROJECT_DIR
from users.models.user import User
from users.serializers.user_serializers import UserSerializer
+from django.utils.translation import gettext_lazy as _
user_cache = cache.caches['user_cache']
@@ -38,39 +39,39 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['id', 'username', 'email', 'role', 'is_active', 'team_id', 'member_id'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"),
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"),
- 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用"),
- 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title="团队id", description="团队id"),
- 'member_id': openapi.Schema(type=openapi.TYPE_STRING, title="成员id", description="成员id"),
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')),
+ 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'), description=_('Is active')),
+ 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')),
+ 'member_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('member id'), description=_('member id')),
}
)
class TeamMemberPermissionOperate(ApiMixin, serializers.Serializer):
- USE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("使用"))
- MANAGE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("管理"))
+ USE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('use')))
+ MANAGE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('manage')))
def get_request_body_api(self):
return openapi.Schema(type=openapi.TYPE_OBJECT,
- title="类型",
- description="操作权限USE,MANAGE权限",
+ title=_('type'),
+ description=_('Operation permissions USE, MANAGE permissions'),
properties={
'USE': openapi.Schema(type=openapi.TYPE_BOOLEAN,
- title="使用权限",
- description="使用权限 True|False"),
+ title=_('use permission'),
+ description=_('use permission True|False')),
'MANAGE': openapi.Schema(type=openapi.TYPE_BOOLEAN,
- title="管理权限",
- description="管理权限 True|False")
+ title=_('manage permission'),
+ description=_('manage permission True|False'))
}
)
class UpdateTeamMemberItemPermissionSerializer(ApiMixin, serializers.Serializer):
- target_id = serializers.CharField(required=True, error_messages=ErrMessage.char("目标id"))
- type = serializers.CharField(required=True, error_messages=ErrMessage.char("目标类型"))
+ target_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('target id')))
+ type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')))
operate = TeamMemberPermissionOperate(required=True, many=False)
def get_request_body_api(self):
@@ -78,10 +79,10 @@ def get_request_body_api(self):
type=openapi.TYPE_OBJECT,
required=['id', 'type', 'operate'],
properties={
- 'target_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库/应用id",
- description="知识库或者应用的id"),
+ 'target_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id/application id'),
+ description=_('dataset id/application id')),
'type': openapi.Schema(type=openapi.TYPE_STRING,
- title="类型",
+ title=_('type'),
description="DATASET|APPLICATION",
),
'operate': TeamMemberPermissionOperate().get_request_body_api()
@@ -100,7 +101,8 @@ def is_valid(self, *, user_id=None):
os.path.join(PROJECT_DIR, "apps", "setting", 'sql', 'check_member_permission_target_exists.sql')),
[json.dumps(permission_list), user_id, user_id])
if illegal_target_id_list is not None and len(illegal_target_id_list) > 0:
- raise AppApiException(500, '不存在的 应用|知识库id[' + str(illegal_target_id_list) + ']')
+ raise AppApiException(500,
+ _('Non-existent application|knowledge base id[') + str(illegal_target_id_list) + ']')
def update_or_save(self, member_id: str):
team_member_permission_list = self.data.get("team_member_permission_list")
@@ -134,8 +136,8 @@ def get_request_body_api(self):
required=['id'],
properties={
'team_member_permission_list':
- openapi.Schema(type=openapi.TYPE_ARRAY, title="权限数据",
- description="权限数据",
+ openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Permission data'),
+ description=_('Permission data'),
items=UpdateTeamMemberItemPermissionSerializer().get_request_body_api()
),
}
@@ -143,7 +145,7 @@ def get_request_body_api(self):
class TeamMemberSerializer(ApiMixin, serializers.Serializer):
- team_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("团队id"))
+ team_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('team id')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
@@ -152,8 +154,8 @@ def is_valid(self, *, raise_exception=False):
def get_bach_request_body_api():
return openapi.Schema(
type=openapi.TYPE_ARRAY,
- title="用户id列表",
- description="用户id列表",
+ title=_('user id list'),
+ description=_('user id list'),
items=openapi.Schema(type=openapi.TYPE_STRING)
)
@@ -163,12 +165,30 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['username_or_email'],
properties={
- 'username_or_email': openapi.Schema(type=openapi.TYPE_STRING, title="用户名或者邮箱",
- description="用户名或者邮箱"),
+ 'username_or_email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username or email'),
+ description=_('Username or email')),
}
)
+ @staticmethod
+ def get_response_body_api():
+ return openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ properties={
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')),
+ 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')),
+ 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'),
+ description=_('Is active')),
+ 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')),
+ 'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
+ 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('member type'),
+ description=_('member type manage|member')),
+ }
+ )
+
@transaction.atomic
def batch_add_member(self, user_id_list: List[str], with_valid=True):
"""
@@ -187,18 +207,20 @@ def batch_add_member(self, user_id_list: List[str], with_valid=True):
create_team_member_list = [
self.to_member_model(add_user_id, team_member_user_id_list, use_user_id_list, team_id) for add_user_id in
user_id_list]
- QuerySet(TeamMember).bulk_create(create_team_member_list) if len(create_team_member_list) > 0 else None
+ QuerySet(TeamMember).bulk_create(
+ [team_member for team_member in create_team_member_list if team_member is not None]) if len(
+ create_team_member_list) > 0 else None
return TeamMemberSerializer(
data={'team_id': self.data.get("team_id")}).list_member()
def to_member_model(self, add_user_id, team_member_user_id_list, use_user_id_list, user_id):
if use_user_id_list.__contains__(add_user_id):
if team_member_user_id_list.__contains__(add_user_id) or user_id == add_user_id:
- raise AppApiException(500, "团队中已存在当前成员,不要重复添加")
+ return None
else:
return TeamMember(team_id=self.data.get("team_id"), user_id=add_user_id)
else:
- raise AppApiException(500, "不存在的用户")
+ return None
def add_member(self, username_or_email: str, with_valid=True):
"""
@@ -210,14 +232,14 @@ def add_member(self, username_or_email: str, with_valid=True):
if with_valid:
self.is_valid(raise_exception=True)
if username_or_email is None:
- raise AppApiException(500, "用户名或者邮箱必填")
+ raise AppApiException(500, _('Username or email is required'))
user = QuerySet(User).filter(
Q(username=username_or_email) | Q(email=username_or_email)).first()
if user is None:
- raise AppApiException(500, "不存在的用户")
+ raise AppApiException(500, _('User does not exist'))
if QuerySet(TeamMember).filter(Q(team_id=self.data.get('team_id')) & Q(user=user)).exists() or self.data.get(
"team_id") == str(user.id):
- raise AppApiException(500, "团队中已存在当前成员,不要重复添加")
+ raise AppApiException(500, _('The current members already exist in the team, do not add them again.'))
TeamMember(team_id=self.data.get("team_id"), user=user).save()
return self.list_member(with_valid=False)
@@ -241,22 +263,22 @@ def list_member(self, with_valid=True):
def get_response_body_api(self):
return get_api_response(openapi.Schema(
- type=openapi.TYPE_ARRAY, title="成员列表", description="成员列表",
+ type=openapi.TYPE_ARRAY, title=_('member list'), description=_('member list'),
items=UserSerializer().get_response_body_api()
))
class Operate(ApiMixin, serializers.Serializer):
# 团队 成员id
- member_id = serializers.CharField(required=True, error_messages=ErrMessage.char("成员id"))
+ member_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('member id')))
# 团队id
- team_id = serializers.CharField(required=True, error_messages=ErrMessage.char("团队id"))
+ team_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('team id')))
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)
if self.data.get('member_id') != 'root' and not QuerySet(TeamMember).filter(
team_id=self.data.get('team_id'),
id=self.data.get('member_id')).exists():
- raise AppApiException(500, "不存在的成员,请先添加成员")
+ raise AppApiException(500, _('The member does not exist, please add a member first'))
return True
@@ -290,7 +312,7 @@ def edit(self, member_permission: Dict):
self.is_valid(raise_exception=True)
member_id = self.data.get("member_id")
if member_id == 'root':
- raise AppApiException(500, "管理员权限不允许修改")
+ raise AppApiException(500, _('Administrator rights do not allow modification'))
s = UpdateTeamMemberPermissionSerializer(data=member_permission)
s.is_valid(user_id=self.data.get("team_id"))
s.update_or_save(member_id)
@@ -304,7 +326,7 @@ def delete(self):
self.is_valid(raise_exception=True)
member_id = self.data.get("member_id")
if member_id == 'root':
- raise AppApiException(500, "无法移除团队管理员")
+ raise AppApiException(500, _('Unable to remove team admin'))
# 删除成员权限
QuerySet(TeamMemberPermission).filter(member_id=member_id).delete()
# 删除成员
@@ -317,4 +339,4 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='团队成员id')]
+ description=_('member id')), ]
diff --git a/apps/setting/serializers/valid_serializers.py b/apps/setting/serializers/valid_serializers.py
new file mode 100644
index 00000000000..1ddd393b51e
--- /dev/null
+++ b/apps/setting/serializers/valid_serializers.py
@@ -0,0 +1,55 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: valid_serializers.py
+ @date:2024/7/8 18:00
+ @desc:
+"""
+import re
+
+from django.core import validators
+from django.db.models import QuerySet
+from rest_framework import serializers
+
+from application.models import Application
+from common.exception.app_exception import AppApiException
+from common.models.db_model_manage import DBModelManage
+from common.util.field_message import ErrMessage
+from dataset.models import DataSet
+from users.models import User
+from django.utils.translation import gettext_lazy as _
+
+model_message_dict = {
+ 'dataset': {'model': DataSet, 'count': 50,
+ 'message': _(
+ 'The community version supports up to 50 knowledge bases. If you need more knowledge bases, please contact us (https://fit2cloud.com/).')},
+ 'application': {'model': Application, 'count': 5,
+ 'message': _(
+ 'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).')},
+ 'user': {'model': User, 'count': 2,
+ 'message': _(
+ 'The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).')}
+}
+
+
+class ValidSerializer(serializers.Serializer):
+ valid_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')), validators=[
+ validators.RegexValidator(regex=re.compile("^application|dataset|user$"),
+ message="类型只支持:application|dataset|user", code=500)
+ ])
+ valid_count = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_('check quantity')))
+
+ def valid(self, is_valid=True):
+ if is_valid:
+ self.is_valid(raise_exception=True)
+ model_value = model_message_dict.get(self.data.get('valid_type'))
+ xpack_cache = DBModelManage.get_model('xpack_cache')
+ is_license_valid = xpack_cache.get('XPACK_LICENSE_IS_VALID', False) if xpack_cache is not None else False
+ if not is_license_valid:
+ if self.data.get('valid_count') != model_value.get('count'):
+ raise AppApiException(400, model_value.get('message'))
+ if QuerySet(
+ model_value.get('model')).count() >= model_value.get('count'):
+ raise AppApiException(400, model_value.get('message'))
+ return True
diff --git a/apps/setting/sql/get_member_permission.sql b/apps/setting/sql/get_member_permission.sql
index f6b2d953fcc..17edd80ad7f 100644
--- a/apps/setting/sql/get_member_permission.sql
+++ b/apps/setting/sql/get_member_permission.sql
@@ -8,7 +8,8 @@ FROM
"id",
"name",
'DATASET' AS "type",
- user_id
+ user_id,
+ "type" AS "icon"
FROM
dataset
WHERE
@@ -17,7 +18,8 @@ FROM
"id",
"name",
'APPLICATION' AS "type",
- user_id
+ user_id,
+ "icon" AS "icon"
FROM
application
WHERE
diff --git a/apps/setting/swagger_api/provide_api.py b/apps/setting/swagger_api/provide_api.py
index f68ac5be4d1..263b6c245c7 100644
--- a/apps/setting/swagger_api/provide_api.py
+++ b/apps/setting/swagger_api/provide_api.py
@@ -9,6 +9,7 @@
from drf_yasg import openapi
from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
class ModelQueryApi(ApiMixin):
@@ -18,20 +19,20 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='模型名称'),
+ description=_('name')),
openapi.Parameter(name='model_type', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='模型类型'),
+ description=_('model type')),
openapi.Parameter(name='model_name', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='基础模型名称'),
+ description=_('model name')),
openapi.Parameter(name='provider',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='供应名称')
+ description=_('provider')),
]
@@ -39,22 +40,25 @@ class ModelEditApi(ApiMixin):
@staticmethod
def get_request_body_api():
return openapi.Schema(type=openapi.TYPE_OBJECT,
- title="调用函数所需要的参数",
- description="调用函数所需要的参数",
+ title=_('parameters required to call the function'),
+ description=_('parameters required to call the function'),
required=['provide', 'model_info'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING,
- title="模型名称",
- description="模型名称"),
+ title=_('name'),
+ description=_('name')),
'model_type': openapi.Schema(type=openapi.TYPE_STRING,
- title="供应商",
- description="供应商"),
+ title=_('model type'),
+ description=_('model type')),
'model_name': openapi.Schema(type=openapi.TYPE_STRING,
- title="供应商",
- description="供应商"),
+ title=_('model name'),
+ description=_('model name')),
+ 'provider': openapi.Schema(type=openapi.TYPE_STRING,
+ title=_('provider'),
+ description=_('provider')),
'credential': openapi.Schema(type=openapi.TYPE_OBJECT,
- title="模型证书信息",
- description="模型证书信息")
+ title=_('model certificate information'),
+ description=_('model certificate information'))
}
)
@@ -64,25 +68,28 @@ class ModelCreateApi(ApiMixin):
@staticmethod
def get_request_body_api():
return openapi.Schema(type=openapi.TYPE_OBJECT,
- title="调用函数所需要的参数",
- description="调用函数所需要的参数",
+ title=_('parameters required to call the function'),
+ description=_('parameters required to call the function'),
required=['provide', 'model_info'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING,
- title="模型名称",
- description="模型名称"),
+ title=_('name'),
+ description=_('name')),
'provider': openapi.Schema(type=openapi.TYPE_STRING,
- title="供应商",
- description="供应商"),
+ title=_('provider'),
+ description=_('provider')),
+ 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
+ description="PUBLIC|PRIVATE"),
'model_type': openapi.Schema(type=openapi.TYPE_STRING,
- title="供应商",
- description="供应商"),
+ title=_('model type'),
+ description=_('model type')),
'model_name': openapi.Schema(type=openapi.TYPE_STRING,
- title="供应商",
- description="供应商"),
+ title=_('model name'),
+ description=_('model name')),
'credential': openapi.Schema(type=openapi.TYPE_OBJECT,
- title="模型证书信息",
- description="模型证书信息")
+ title=_('model certificate information'),
+ description=_('model certificate information')),
+
}
)
@@ -95,7 +102,7 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='供应名称'),
+ description=_('provider')),
]
@staticmethod
@@ -104,10 +111,10 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['key', 'value'],
properties={
- 'key': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型描述",
- description="模型类型描述", default="大语言模型"),
- 'value': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型值",
- description="模型类型值", default="LLM"),
+ 'key': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type description'),
+ description=_('model type description'), default=_('large language model')),
+ 'value': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type value'),
+ description=_('model type value'), default="LLM"),
}
)
@@ -119,12 +126,12 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='供应名称'),
+ description=_('provider')),
openapi.Parameter(name='model_type',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='模型类型'),
+ description=_('model type')),
]
@staticmethod
@@ -133,12 +140,12 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['name', 'desc', 'model_type'],
properties={
- 'name': openapi.Schema(type=openapi.TYPE_STRING, title="模型名称",
- description="模型名称", default="模型名称"),
- 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="模型描述",
- description="模型描述", default="xxx模型"),
- 'model_type': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型值",
- description="模型类型值", default="LLM"),
+ 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('name'),
+ description=_('name'), default=_('name')),
+ 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('model description'),
+ description=_('model description')),
+ 'model_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type value'),
+ description=_('model type value'), default="LLM"),
}
)
@@ -150,17 +157,17 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='供应名称'),
+ description=_('provider')),
openapi.Parameter(name='model_type',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='模型类型'),
+ description=_('model type')),
openapi.Parameter(name='model_name',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='模型名称'),
+ description=_('model name')),
]
@staticmethod
@@ -169,17 +176,17 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='供应商'),
+ description=_('provider')),
openapi.Parameter(name='method',
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='需要执行的函数'),
+ description=_('function that needs to be executed')),
]
@staticmethod
def get_request_body_api():
return openapi.Schema(type=openapi.TYPE_OBJECT,
- title="调用函数所需要的参数",
- description="调用函数所需要的参数",
+ title=_('parameters required to call the function'),
+ description=_('parameters required to call the function'),
)
diff --git a/apps/setting/swagger_api/system_setting.py b/apps/setting/swagger_api/system_setting.py
index 1246ff27d3a..282c20d206f 100644
--- a/apps/setting/swagger_api/system_setting.py
+++ b/apps/setting/swagger_api/system_setting.py
@@ -9,69 +9,70 @@
from drf_yasg import openapi
from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
class SystemSettingEmailApi(ApiMixin):
@staticmethod
def get_request_body_api():
return openapi.Schema(type=openapi.TYPE_OBJECT,
- title="邮箱相关参数",
- description="邮箱相关参数",
+ title=_('Email related parameters'),
+ description=_('Email related parameters'),
required=['email_host', 'email_port', 'email_host_user', 'email_host_password',
'email_use_tls', 'email_use_ssl', 'from_email'],
properties={
'email_host': openapi.Schema(type=openapi.TYPE_STRING,
- title="SMTP 主机",
- description="SMTP 主机"),
+ title=_('SMTP host'),
+ description=_('SMTP host')),
'email_port': openapi.Schema(type=openapi.TYPE_NUMBER,
- title="SMTP 端口",
- description="SMTP 端口"),
+ title=_('SMTP port'),
+ description=_('SMTP port')),
'email_host_user': openapi.Schema(type=openapi.TYPE_STRING,
- title="发件人邮箱",
- description="发件人邮箱"),
+ title=_('Sender\'s email'),
+ description=_('Sender\'s email')),
'email_host_password': openapi.Schema(type=openapi.TYPE_STRING,
- title="密码",
- description="密码"),
+ title=_('Password'),
+ description=_('Password')),
'email_use_tls': openapi.Schema(type=openapi.TYPE_BOOLEAN,
- title="是否开启TLS",
- description="是否开启TLS"),
+ title=_('Whether to enable TLS'),
+ description=_('Whether to enable TLS')),
'email_use_ssl': openapi.Schema(type=openapi.TYPE_BOOLEAN,
- title="是否开启SSL",
- description="是否开启SSL"),
+ title=_('Whether to enable SSL'),
+ description=_('Whether to enable SSL')),
'from_email': openapi.Schema(type=openapi.TYPE_STRING,
- title="发送人邮箱",
- description="发送人邮箱")
+ title=_('Sender\'s email'),
+ description=_('Sender\'s email'))
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(type=openapi.TYPE_OBJECT,
- title="邮箱相关参数",
- description="邮箱相关参数",
+ title=_('Email related parameters'),
+ description=_('Email related parameters'),
required=['email_host', 'email_port', 'email_host_user', 'email_host_password',
'email_use_tls', 'email_use_ssl', 'from_email'],
properties={
'email_host': openapi.Schema(type=openapi.TYPE_STRING,
- title="SMTP 主机",
- description="SMTP 主机"),
+ title=_('SMTP host'),
+ description=_('SMTP host')),
'email_port': openapi.Schema(type=openapi.TYPE_NUMBER,
- title="SMTP 端口",
- description="SMTP 端口"),
+ title=_('SMTP port'),
+ description=_('SMTP port')),
'email_host_user': openapi.Schema(type=openapi.TYPE_STRING,
- title="发件人邮箱",
- description="发件人邮箱"),
+ title=_('Sender\'s email'),
+ description=_('Sender\'s email')),
'email_host_password': openapi.Schema(type=openapi.TYPE_STRING,
- title="密码",
- description="密码"),
+ title=_('Password'),
+ description=_('Password')),
'email_use_tls': openapi.Schema(type=openapi.TYPE_BOOLEAN,
- title="是否开启TLS",
- description="是否开启TLS"),
+ title=_('Whether to enable TLS'),
+ description=_('Whether to enable TLS')),
'email_use_ssl': openapi.Schema(type=openapi.TYPE_BOOLEAN,
- title="是否开启SSL",
- description="是否开启SSL"),
+ title=_('Whether to enable SSL'),
+ description=_('Whether to enable SSL')),
'from_email': openapi.Schema(type=openapi.TYPE_STRING,
- title="发送人邮箱",
- description="发送人邮箱")
+ title=_('Sender\'s email'),
+ description=_('Sender\'s email'))
}
)
diff --git a/apps/setting/swagger_api/valid_api.py b/apps/setting/swagger_api/valid_api.py
new file mode 100644
index 00000000000..f5bc5c9a294
--- /dev/null
+++ b/apps/setting/swagger_api/valid_api.py
@@ -0,0 +1,28 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: valid_api.py
+ @date:2024/7/8 17:52
+ @desc:
+"""
+from drf_yasg import openapi
+
+from common.mixins.api_mixin import ApiMixin
+from django.utils.translation import gettext_lazy as _
+
+
+class ValidApi(ApiMixin):
+ @staticmethod
+ def get_request_params_api():
+ return [openapi.Parameter(name='valid_type',
+ in_=openapi.IN_PATH,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('Verification type: application|dataset|user')),
+ openapi.Parameter(name='valid_count',
+ in_=openapi.IN_PATH,
+ type=openapi.TYPE_STRING,
+ required=True,
+ description=_('check quantity'))
+ ]
diff --git a/apps/setting/urls.py b/apps/setting/urls.py
index 42fea74ec7d..73fe9ba12db 100644
--- a/apps/setting/urls.py
+++ b/apps/setting/urls.py
@@ -1,3 +1,5 @@
+import os
+
from django.urls import path
from . import views
@@ -12,11 +14,26 @@
path('provider/model_type_list', views.Provide.ModelTypeList.as_view(), name="provider/model_type_list"),
path('provider/model_list', views.Provide.ModelList.as_view(),
name="provider/model_name_list"),
+ path('provider/model_params_form', views.Provide.ModelParamsForm.as_view(),
+ name="provider/model_params_form"),
path('provider/model_form', views.Provide.ModelForm.as_view(),
name="provider/model_form"),
path('model', views.Model.as_view(), name='model'),
+ path('model//model_params_form', views.Model.ModelParamsForm.as_view(),
+ name='model/model_params_form'),
path('model/', views.Model.Operate.as_view(), name='model/operate'),
+ path('model//pause_download', views.Model.PauseDownload.as_view(), name='model/operate'),
path('model//meta', views.Model.ModelMeta.as_view(), name='model/operate/meta'),
- path('email_setting', views.SystemSetting.Email.as_view(), name='email_setting')
+ path('email_setting', views.SystemSetting.Email.as_view(), name='email_setting'),
+ path('valid//', views.Valid.as_view())
]
+if os.environ.get('SERVER_NAME', 'web') == 'local_model':
+ urlpatterns += [
+ path('model//embed_documents', views.ModelApply.EmbedDocuments.as_view(),
+ name='model/embed_documents'),
+ path('model//embed_query', views.ModelApply.EmbedQuery.as_view(),
+ name='model/embed_query'),
+ path('model//compress_documents', views.ModelApply.CompressDocuments.as_view(),
+ name='model/embed_query'),
+ ]
diff --git a/apps/setting/views/Team.py b/apps/setting/views/Team.py
index 71710e3d67b..9c2ade72fbc 100644
--- a/apps/setting/views/Team.py
+++ b/apps/setting/views/Team.py
@@ -13,29 +13,36 @@
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import PermissionConstants
+from common.log.log import log
from common.response import result
from setting.serializers.team_serializers import TeamMemberSerializer, get_response_body_api, \
UpdateTeamMemberPermissionSerializer
+from django.utils.translation import gettext_lazy as _
+
+from setting.views.common import get_member_operation_object, get_member_operation_object_batch
class TeamMember(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取团队成员列表",
- operation_id="获取团员成员列表",
+ @swagger_auto_schema(operation_summary=_('Get a list of team members'),
+ operation_id=_('Get a list of team members'),
responses=result.get_api_response(get_response_body_api()),
- tags=["团队"])
+ tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_READ)
def get(self, request: Request):
return result.success(TeamMemberSerializer(data={'team_id': str(request.user.id)}).list_member())
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="添加成员",
- operation_id="添加成员",
+ @swagger_auto_schema(operation_summary=_('Add member'),
+ operation_id=_('Add member'),
request_body=TeamMemberSerializer().get_request_body_api(),
- tags=["团队"])
+ responses=result.get_default_response(),
+ tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_CREATE)
+ @log(menu='Team', operate='Add member',
+ get_operation_object=lambda r, k: {'name': r.data.get('username_or_email')})
def post(self, request: Request):
team = TeamMemberSerializer(data={'team_id': str(request.user.id)})
return result.success((team.add_member(**request.data)))
@@ -44,11 +51,14 @@ class Batch(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="批量添加成员",
- operation_id="批量添加成员",
+ @swagger_auto_schema(operation_summary=_('Add members in batches'),
+ operation_id=_('Add members in batches'),
request_body=TeamMemberSerializer.get_bach_request_body_api(),
- tags=["团队"])
+ responses=result.get_api_array_response(TeamMemberSerializer.get_response_body_api()),
+ tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_CREATE)
+ @log(menu='Team', operate='Add members in batches',
+ get_operation_object=lambda r, k: get_member_operation_object_batch(r.data))
def post(self, request: Request):
return result.success(
TeamMemberSerializer(data={'team_id': request.user.id}).batch_add_member(request.data))
@@ -57,34 +67,41 @@ class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取团队成员权限",
- operation_id="获取团队成员权限",
+ @swagger_auto_schema(operation_summary=_('Get team member permissions'),
+ operation_id=_('Get team member permissions'),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
- tags=["团队"])
+ tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_READ)
def get(self, request: Request, member_id: str):
return result.success(TeamMemberSerializer.Operate(
data={'member_id': member_id, 'team_id': str(request.user.id)}).list_member_permission())
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改团队成员权限",
- operation_id="修改团队成员权限",
+ @swagger_auto_schema(operation_summary=_('Update team member permissions'),
+ operation_id=_('Update team member permissions'),
request_body=UpdateTeamMemberPermissionSerializer().get_request_body_api(),
+ responses=result.get_default_response(),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
- tags=["团队"]
+ tags=[_('Team')]
)
@has_permissions(PermissionConstants.TEAM_EDIT)
+ @log(menu='Team', operate='Update team member permissions',
+ get_operation_object=lambda r, k: get_member_operation_object(k.get('member_id'))
+ )
def put(self, request: Request, member_id: str):
return result.success(TeamMemberSerializer.Operate(
data={'member_id': member_id, 'team_id': str(request.user.id)}).edit(request.data))
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="移除成员",
- operation_id="移除成员",
+ @swagger_auto_schema(operation_summary=_('Remove member'),
+ operation_id=_('Remove member'),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
- tags=["团队"]
+ responses=result.get_default_response(),
+ tags=[_('Team')]
)
@has_permissions(PermissionConstants.TEAM_DELETE)
+ @log(menu='Team', operate='Remove member',
+ get_operation_object=lambda r, k: get_member_operation_object(k.get('member_id')))
def delete(self, request: Request, member_id: str):
return result.success(TeamMemberSerializer.Operate(
data={'member_id': member_id, 'team_id': str(request.user.id)}).delete())
diff --git a/apps/setting/views/__init__.py b/apps/setting/views/__init__.py
index 0885ef978f8..4fe505635c6 100644
--- a/apps/setting/views/__init__.py
+++ b/apps/setting/views/__init__.py
@@ -9,3 +9,5 @@
from .Team import *
from .model import *
from .system_setting import *
+from .valid import *
+from .model_apply import *
diff --git a/apps/setting/views/common.py b/apps/setting/views/common.py
new file mode 100644
index 00000000000..ccc68e8ffa6
--- /dev/null
+++ b/apps/setting/views/common.py
@@ -0,0 +1,78 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: common.py
+ @date:2025/3/25 16:26
+ @desc:
+"""
+from django.db.models import QuerySet
+
+from common.util.common import encryption
+from setting.models import Model
+from users.models import User
+
+
+def get_model_operation_object(model_id):
+ model_model = QuerySet(model=Model).filter(id=model_id).first()
+ if model_model is not None:
+ return {
+ "name": model_model.name
+ }
+ return {}
+
+
+def get_member_operation_object(member_id):
+ user_model = QuerySet(model=User).filter(id=member_id).first()
+ if user_model is not None:
+ return {
+ "name": user_model.username
+ }
+ return {}
+
+
+def get_member_operation_object_batch(member_id_list):
+ user_model_list = QuerySet(model=User).filter(id__in=member_id_list)
+ if user_model_list is not None:
+ return {
+ "name": f'[{",".join([user.username for user in user_model_list])}]',
+ "user_list": [{'name': user.username} for user in user_model_list]
+ }
+ return {}
+
+
+def encryption_str(_value):
+ if isinstance(_value, str):
+ return encryption(_value)
+ return _value
+
+
+def encryption_credential(credential):
+ if isinstance(credential, dict):
+ return {key: encryption_str(credential.get(key)) for key in credential}
+ return credential
+
+
+def get_edit_model_details(request):
+ path = request.path
+ body = request.data
+ query = request.query_params
+ credential = body.get('credential', {})
+ credential_encryption_ed = encryption_credential(credential)
+ return {
+ 'path': path,
+ 'body': {**body, 'credential': credential_encryption_ed},
+ 'query': query
+ }
+
+
+def get_email_details(request):
+ path = request.path
+ body = request.data
+ query = request.query_params
+ email_host_password = body.get('email_host_password', '')
+ return {
+ 'path': path,
+ 'body': {**body, 'email_host_password': encryption_str(email_host_password)},
+ 'query': query
+ }
diff --git a/apps/setting/views/model.py b/apps/setting/views/model.py
index 7ba0304fcc0..4fe13d1d95f 100644
--- a/apps/setting/views/model.py
+++ b/apps/setting/views/model.py
@@ -13,32 +13,42 @@
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import PermissionConstants
+from common.log.log import log
from common.response import result
from common.util.common import query_params_to_single_dict
from setting.models_provider.constants.model_provider_constants import ModelProvideConstants
-from setting.serializers.provider_serializers import ProviderSerializer, ModelSerializer
+from setting.serializers.provider_serializers import ProviderSerializer, ModelSerializer, \
+ get_default_model_params_setting
from setting.swagger_api.provide_api import ProvideApi, ModelCreateApi, ModelQueryApi, ModelEditApi
+from django.utils.translation import gettext_lazy as _
+
+from setting.views.common import get_model_operation_object, get_edit_model_details
class Model(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="创建模型",
- operation_id="创建模型",
- request_body=ModelCreateApi.get_request_body_api()
- , tags=["模型"])
+ @swagger_auto_schema(operation_summary=_('Create model'),
+ operation_id=_('Create model'),
+ request_body=ModelCreateApi.get_request_body_api(),
+ manual_parameters=result.get_api_response(ModelCreateApi.get_request_body_api())
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
+ @log(menu='model', operate='Create model',
+ get_operation_object=lambda r, k: {'name': r.data.get('name')},
+ get_details=get_edit_model_details)
def post(self, request: Request):
return result.success(
ModelSerializer.Create(data={**request.data, 'user_id': str(request.user.id)}).insert(request.user.id,
with_valid=True))
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="下载模型,只试用与Ollama平台",
- operation_id="下载模型,只试用与Ollama平台",
- request_body=ModelCreateApi.get_request_body_api()
- , tags=["模型"])
+ @swagger_auto_schema(operation_summary=_('Download model, trial only with Ollama platform'),
+ operation_id=_('Download model, trial only with Ollama platform'),
+ request_body=ModelCreateApi.get_request_body_api(),
+ responses=result.get_api_response(ModelCreateApi.get_request_body_api())
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
def put(self, request: Request):
return result.success(
@@ -46,10 +56,10 @@ def put(self, request: Request):
with_valid=True))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取模型列表",
- operation_id="获取模型列表",
+ @swagger_auto_schema(operation_summary=_('Get model list'),
+ operation_id=_('Get model list'),
manual_parameters=ModelQueryApi.get_request_params_api()
- , tags=["模型"])
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request):
return result.success(
@@ -61,42 +71,88 @@ class ModelMeta(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="查询模型meta信息,该接口不携带认证信息",
- operation_id="查询模型meta信息,该接口不携带认证信息",
- tags=["模型"])
+ @swagger_auto_schema(operation_summary=_(
+ 'Query model meta information, this interface does not carry authentication information'),
+ operation_id=_(
+ 'Query model meta information, this interface does not carry authentication information'),
+ tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request, model_id: str):
return result.success(
ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).one_meta(with_valid=True))
+ class PauseDownload(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Pause model download'),
+ operation_id=_('Pause model download'),
+ tags=[_('model')])
+ @has_permissions(PermissionConstants.MODEL_CREATE)
+ def put(self, request: Request, model_id: str):
+ return result.success(
+ ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).pause_download())
+
+ class ModelParamsForm(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get model parameter form'),
+ operation_id=_('Get model parameter form'),
+ manual_parameters=ProvideApi.ModelForm.get_request_params_api(),
+ tags=[_('model')])
+ @has_permissions(PermissionConstants.MODEL_READ)
+ def get(self, request: Request, model_id: str):
+ return result.success(
+ ModelSerializer.ModelParams(data={'id': model_id, 'user_id': request.user.id}).get_model_params())
+
+ @action(methods=['PUT'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Save model parameter form'),
+ operation_id=_('Save model parameter form'),
+ manual_parameters=ProvideApi.ModelForm.get_request_params_api(),
+ tags=[_('model')])
+ @has_permissions(PermissionConstants.MODEL_READ)
+ @log(menu='model', operate='Save model parameter form',
+ get_operation_object=lambda r, k: get_model_operation_object(k.get('model_id')))
+ def put(self, request: Request, model_id: str):
+ return result.success(
+ ModelSerializer.ModelParamsForm(data={'id': model_id, 'user_id': request.user.id})
+ .save_model_params_form(request.data))
+
class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改模型",
- operation_id="修改模型",
- request_body=ModelEditApi.get_request_body_api()
- , tags=["模型"])
+ @swagger_auto_schema(operation_summary=_('Update model'),
+ operation_id=_('Update model'),
+ request_body=ModelEditApi.get_request_body_api(),
+ responses=result.get_api_response(ModelEditApi.get_request_body_api())
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
+ @log(menu='model', operate='Update model',
+ get_operation_object=lambda r, k: get_model_operation_object(k.get('model_id')),
+ get_details=get_edit_model_details)
def put(self, request: Request, model_id: str):
return result.success(
ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).edit(request.data,
str(request.user.id)))
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除模型",
- operation_id="删除模型",
+ @swagger_auto_schema(operation_summary=_('Delete model'),
+ operation_id=_('Delete model'),
responses=result.get_default_response()
- , tags=["模型"])
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_DELETE)
+ @log(menu='model', operate='Delete model',
+ get_operation_object=lambda r, k: get_model_operation_object(k.get('model_id')))
def delete(self, request: Request, model_id: str):
return result.success(
ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).delete())
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="查询模型详细信息",
- operation_id="查询模型详细信息",
- tags=["模型"])
+ @swagger_auto_schema(operation_summary=_('Query model details'),
+ operation_id=_('Query model details'),
+ tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request, model_id: str):
return result.success(
@@ -110,22 +166,32 @@ class Exec(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="调用供应商函数,获取表单数据",
- operation_id="调用供应商函数,获取表单数据",
+ @swagger_auto_schema(operation_summary=_('Call the supplier function to obtain form data'),
+ operation_id=_('Call the supplier function to obtain form data'),
manual_parameters=ProvideApi.get_request_params_api(),
- request_body=ProvideApi.get_request_body_api()
- , tags=["模型"])
+ request_body=ProvideApi.get_request_body_api(),
+ responses=result.get_api_response(ProvideApi.get_request_body_api())
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
+ @log(menu='model', operate='Call the supplier function to obtain form data')
def post(self, request: Request, provider: str, method: str):
return result.success(
ProviderSerializer(data={'provider': provider, 'method': method}).exec(request.data, with_valid=True))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取模型供应商数据",
- operation_id="获取模型供应商列表"
- , tags=["模型"])
+ @swagger_auto_schema(operation_summary=_('Get a list of model suppliers'),
+ operation_id=_('Get a list of model suppliers')
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request):
+ model_type = request.query_params.get('model_type')
+ if model_type:
+ providers = []
+ for key in ModelProvideConstants.__members__:
+ if len([item for item in ModelProvideConstants[key].value.get_model_type_list() if
+ item['value'] == model_type]) > 0:
+ providers.append(ModelProvideConstants[key].value.get_model_provide_info().to_dict())
+ return result.success(providers)
return result.success(
[ModelProvideConstants[key].value.get_model_provide_info().to_dict() for key in
ModelProvideConstants.__members__])
@@ -134,11 +200,11 @@ class ModelTypeList(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取模型类型列表",
- operation_id="获取模型类型类型列表",
+ @swagger_auto_schema(operation_summary=_('Get a list of model types'),
+ operation_id=_('Get a list of model types'),
manual_parameters=ProvideApi.ModelTypeList.get_request_params_api(),
responses=result.get_api_array_response(ProvideApi.ModelTypeList.get_response_body_api())
- , tags=["模型"])
+ , tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request):
provider = request.query_params.get('provider')
@@ -148,11 +214,11 @@ class ModelList(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取模型列表",
- operation_id="获取模型创建表单",
+ @swagger_auto_schema(operation_summary=_('Get the model creation form'),
+ operation_id=_('Get the model creation form'),
manual_parameters=ProvideApi.ModelList.get_request_params_api(),
responses=result.get_api_array_response(ProvideApi.ModelList.get_response_body_api())
- , tags=["模型"]
+ , tags=[_('model')]
)
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request):
@@ -163,14 +229,32 @@ def get(self, request: Request):
ModelProvideConstants[provider].value.get_model_list(
model_type))
+ class ModelParamsForm(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get model default parameters'),
+ operation_id=_('Get the model creation form'),
+ manual_parameters=ProvideApi.ModelList.get_request_params_api(),
+ responses=result.get_api_array_response(ProvideApi.ModelList.get_response_body_api())
+ , tags=[_('model')]
+ )
+ @has_permissions(PermissionConstants.MODEL_READ)
+ def get(self, request: Request):
+ provider = request.query_params.get('provider')
+ model_type = request.query_params.get('model_type')
+ model_name = request.query_params.get('model_name')
+
+ return result.success(get_default_model_params_setting(provider, model_type, model_name))
+
class ModelForm(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取模型创建表单",
- operation_id="获取模型创建表单",
+ @swagger_auto_schema(operation_summary=_('Get the model creation form'),
+ operation_id=_('Get the model creation form'),
manual_parameters=ProvideApi.ModelForm.get_request_params_api(),
- tags=["模型"])
+ tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
def get(self, request: Request):
provider = request.query_params.get('provider')
diff --git a/apps/setting/views/model_apply.py b/apps/setting/views/model_apply.py
new file mode 100644
index 00000000000..73fb699f039
--- /dev/null
+++ b/apps/setting/views/model_apply.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: model_apply.py
+ @date:2024/8/20 20:38
+ @desc:
+"""
+from urllib.request import Request
+
+from drf_yasg.utils import swagger_auto_schema
+from rest_framework.decorators import action
+from rest_framework.views import APIView
+
+from common.response import result
+from setting.serializers.model_apply_serializers import ModelApplySerializers
+from django.utils.translation import gettext_lazy as _
+
+
+class ModelApply(APIView):
+ class EmbedDocuments(APIView):
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Vectorization documentation'),
+ operation_id=_('Vectorization documentation'),
+ responses=result.get_default_response(),
+ tags=[_('model')])
+ def post(self, request: Request, model_id):
+ return result.success(
+ ModelApplySerializers(data={'model_id': model_id}).embed_documents(request.data))
+
+ class EmbedQuery(APIView):
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Vectorization documentation'),
+ operation_id=_('Vectorization documentation'),
+ responses=result.get_default_response(),
+ tags=[_('model')])
+ def post(self, request: Request, model_id):
+ return result.success(
+ ModelApplySerializers(data={'model_id': model_id}).embed_query(request.data))
+
+ class CompressDocuments(APIView):
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Reorder documents'),
+ operation_id=_('Reorder documents'),
+ responses=result.get_default_response(),
+ tags=[_('model')])
+ def post(self, request: Request, model_id):
+ return result.success(
+ ModelApplySerializers(data={'model_id': model_id}).compress_documents(request.data))
diff --git a/apps/setting/views/system_setting.py b/apps/setting/views/system_setting.py
index e08a4702e3a..64dda262e08 100644
--- a/apps/setting/views/system_setting.py
+++ b/apps/setting/views/system_setting.py
@@ -14,9 +14,13 @@
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import RoleConstants
+from common.log.log import log
from common.response import result
from setting.serializers.system_setting import SystemSettingSerializer
from setting.swagger_api.system_setting import SystemSettingEmailApi
+from django.utils.translation import gettext_lazy as _
+
+from setting.views.common import get_email_details
class SystemSetting(APIView):
@@ -24,33 +28,39 @@ class Email(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="创建或者修改邮箱设置",
- operation_id="创建或者修改邮箱设置",
- request_body=SystemSettingEmailApi.get_request_body_api(), tags=["邮箱设置"],
+ @swagger_auto_schema(operation_summary=_('Create or update email settings'),
+ operation_id=_('Create or update email settings'),
+ request_body=SystemSettingEmailApi.get_request_body_api(), tags=[_('Email settings')],
responses=result.get_api_response(SystemSettingEmailApi.get_response_body_api()))
@has_permissions(RoleConstants.ADMIN)
+ @log(menu='Email settings', operate='Create or update email settings',
+ get_details=get_email_details
+ )
def put(self, request: Request):
return result.success(
SystemSettingSerializer.EmailSerializer.Create(
data=request.data).update_or_save())
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="测试邮箱设置",
- operation_id="测试邮箱设置",
+ @swagger_auto_schema(operation_summary=_('Test email settings'),
+ operation_id=_('Test email settings'),
request_body=SystemSettingEmailApi.get_request_body_api(),
responses=result.get_default_response(),
- tags=["邮箱设置"])
+ tags=[_('Email settings')])
@has_permissions(RoleConstants.ADMIN)
+ @log(menu='Email settings', operate='Test email settings',
+ get_details=get_email_details
+ )
def post(self, request: Request):
return result.success(
SystemSettingSerializer.EmailSerializer.Create(
data=request.data).is_valid())
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取邮箱设置",
- operation_id="获取邮箱设置",
+ @swagger_auto_schema(operation_summary=_('Get email settings'),
+ operation_id=_('Get email settings'),
responses=result.get_api_response(SystemSettingEmailApi.get_response_body_api()),
- tags=["邮箱设置"])
+ tags=[_('Email settings')])
@has_permissions(RoleConstants.ADMIN)
def get(self, request: Request):
return result.success(
diff --git a/apps/setting/views/valid.py b/apps/setting/views/valid.py
new file mode 100644
index 00000000000..c52b8905ef2
--- /dev/null
+++ b/apps/setting/views/valid.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: valid.py
+ @date:2024/7/8 17:50
+ @desc:
+"""
+from drf_yasg.utils import swagger_auto_schema
+from rest_framework.decorators import action
+from rest_framework.request import Request
+from rest_framework.views import APIView
+
+from common.auth import TokenAuth, has_permissions
+from common.constants.permission_constants import RoleConstants
+from common.response import result
+from setting.serializers.valid_serializers import ValidSerializer
+from setting.swagger_api.valid_api import ValidApi
+from django.utils.translation import gettext_lazy as _
+
+
+class Valid(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_('Get verification results'),
+ operation_id=_('Get verification results'),
+ manual_parameters=ValidApi.get_request_params_api(),
+ responses=result.get_default_response()
+ , tags=["校验"])
+ @has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
+ def get(self, request: Request, valid_type: str, valid_count: int):
+ return result.success(ValidSerializer(data={'valid_type': valid_type, 'valid_count': valid_count}).valid())
diff --git a/apps/smartdoc/conf.py b/apps/smartdoc/conf.py
index 27e1e8b08fa..8da97883ca9 100644
--- a/apps/smartdoc/conf.py
+++ b/apps/smartdoc/conf.py
@@ -13,6 +13,7 @@
import re
from importlib import import_module
from urllib.parse import urljoin, urlparse
+
import yaml
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -75,25 +76,25 @@ class DoesNotExist(Exception):
class Config(dict):
defaults = {
# 数据库相关配置
- "DB_HOST": "",
- "DB_PORT": "",
- "DB_USER": "",
- "DB_PASSWORD": "",
- "DB_ENGINE": "django.db.backends.postgresql_psycopg2",
- # 邮件相关配置
- "EMAIL_ADDRESS": "",
- "EMAIL_USE_TLS": False,
- "EMAIL_USE_SSL": True,
- "EMAIL_HOST": "",
- "EMAIL_PORT": 465,
- "EMAIL_HOST_USER": "",
- "EMAIL_HOST_PASSWORD": "",
+ "DB_HOST": "127.0.0.1",
+ "DB_PORT": 5432,
+ "DB_USER": "root",
+ "DB_PASSWORD": "Password123@postgres",
+ "DB_ENGINE": "dj_db_conn_pool.backends.postgresql",
+ "DB_MAX_OVERFLOW": 80,
+ 'LANGUAGE_CODE': 'zh-CN',
# 向量模型
"EMBEDDING_MODEL_NAME": "shibing624/text2vec-base-chinese",
"EMBEDDING_DEVICE": "cpu",
"EMBEDDING_MODEL_PATH": os.path.join(PROJECT_DIR, 'models'),
# 向量库配置
- "VECTOR_STORE_NAME": 'pg_vector'
+ "VECTOR_STORE_NAME": 'pg_vector',
+ "DEBUG": False,
+ 'SANDBOX': False,
+ 'LOCAL_MODEL_HOST': '127.0.0.1',
+ 'LOCAL_MODEL_PORT': '11636',
+ 'LOCAL_MODEL_PROTOCOL': "http",
+ 'LOCAL_MODEL_HOST_WORKER': 1
}
@@ -110,9 +111,17 @@ def get_db_setting(self) -> dict:
"PORT": self.get('DB_PORT'),
"USER": self.get('DB_USER'),
"PASSWORD": self.get('DB_PASSWORD'),
- "ENGINE": self.get('DB_ENGINE')
+ "ENGINE": self.get('DB_ENGINE'),
+ "POOL_OPTIONS": {
+ "POOL_SIZE": 20,
+ "MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW')),
+ 'RECYCLE': 30 * 60
+ }
}
+ def get_language_code(self):
+ return self.get('LANGUAGE_CODE', 'zh-CN')
+
def __init__(self, *args):
super().__init__(*args)
@@ -180,8 +189,36 @@ def load_from_yml(self):
loaded = self.from_yaml(i)
if loaded:
return True
+ msg = f"""
- return False
+ Error: No config file found.
+
+ You can run `cp config_example.yml {self.root_path}/config.yml`, and edit it.
+
+ """
+ raise ImportError(msg)
+
+ def load_from_env(self):
+ keys = os.environ.keys()
+ config = {key.replace('MAXKB_', ''): os.environ.get(key) for key in keys if key.startswith('MAXKB_')}
+ if len(config.keys()) <= 0:
+ msg = f"""
+
+ Error: No config env found.
+
+ Please set environment variables
+ MAXKB_CONFIG_TYPE: 配置文件读取方式 FILE: 使用配置文件配置 ENV: 使用ENV配置
+ MAXKB_DB_NAME: 数据库名称
+ MAXKB_DB_HOST: 数据库主机
+ MAXKB_DB_PORT: 数据库端口
+ MAXKB_DB_USER: 数据库用户名
+ MAXKB_DB_PASSWORD: 数据库密码
+ MAXKB_EMBEDDING_MODEL_PATH: 向量模型目录
+ MAXKB_EMBEDDING_MODEL_NAME: 向量模型名称
+ """
+ raise ImportError(msg)
+ self.from_mapping(config)
+ return True
@classmethod
def load_user_config(cls, root_path=None, config_class=None):
@@ -190,15 +227,10 @@ def load_user_config(cls, root_path=None, config_class=None):
if not root_path:
root_path = PROJECT_DIR
manager = cls(root_path=root_path)
- if manager.load_from_yml():
- config = manager.config
+ config_type = os.environ.get('MAXKB_CONFIG_TYPE')
+ if config_type is None or config_type != 'ENV':
+ manager.load_from_yml()
else:
- msg = f"""
-
- Error: No config file found.
-
- You can run `cp config_example.yml {root_path}/config.yml`, and edit it.
-
- """
- raise ImportError(msg)
+ manager.load_from_env()
+ config = manager.config
return config
diff --git a/apps/smartdoc/settings/__init__.py b/apps/smartdoc/settings/__init__.py
index 2908253e386..4e7ea78e3b3 100644
--- a/apps/smartdoc/settings/__init__.py
+++ b/apps/smartdoc/settings/__init__.py
@@ -8,3 +8,5 @@
"""
from .base import *
from .logging import *
+from .auth import *
+from .lib import *
diff --git a/apps/smartdoc/settings/auth.py b/apps/smartdoc/settings/auth.py
new file mode 100644
index 00000000000..077f98b3abb
--- /dev/null
+++ b/apps/smartdoc/settings/auth.py
@@ -0,0 +1,19 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: auth.py
+ @date:2024/7/9 18:47
+ @desc:
+"""
+USER_TOKEN_AUTH = 'common.auth.handle.impl.user_token.UserToken'
+
+PUBLIC_ACCESS_TOKEN_AUTH = 'common.auth.handle.impl.public_access_token.PublicAccessToken'
+
+APPLICATION_KEY_AUTH = 'common.auth.handle.impl.application_key.ApplicationKey'
+
+AUTH_HANDLES = [
+ USER_TOKEN_AUTH,
+ PUBLIC_ACCESS_TOKEN_AUTH,
+ APPLICATION_KEY_AUTH
+]
diff --git a/apps/smartdoc/settings/base.py b/apps/smartdoc/settings/base.py
index 04e8810e5ee..de81420798a 100644
--- a/apps/smartdoc/settings/base.py
+++ b/apps/smartdoc/settings/base.py
@@ -3,13 +3,15 @@
import os
from pathlib import Path
+from PIL import Image
+
from ..const import CONFIG, PROJECT_DIR
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
-
+Image.MAX_IMAGE_PIXELS = 20000000000
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
@@ -24,6 +26,8 @@
'default': CONFIG.get_db_setting()
}
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
# Application definition
INSTALLED_APPS = [
@@ -38,24 +42,30 @@
'rest_framework',
"drf_yasg", # swagger 接口
'django_filters', # 条件过滤
- 'django_apscheduler'
+ 'django_apscheduler',
+ 'common',
+ 'function_lib',
+ 'django_celery_beat'
]
MIDDLEWARE = [
+ 'django.middleware.locale.LocaleMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
+ 'common.middleware.gzip.GZipMiddleware',
'common.middleware.static_headers_middleware.StaticHeadersMiddleware',
- 'common.middleware.cross_domain_middleware.CrossDomainMiddleware'
-
+ 'common.middleware.cross_domain_middleware.CrossDomainMiddleware',
+ 'common.middleware.doc_headers_middleware.DocHeadersMiddleware'
]
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=60 * 60 * 2) # <-- 设置token有效时间
}
+APPS_DIR = os.path.join(PROJECT_DIR, 'apps')
ROOT_URLCONF = 'smartdoc.urls'
# FORCE_SCRIPT_NAME
TEMPLATES = [
@@ -76,6 +86,7 @@
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'common.config.swagger_conf.CustomSwaggerAutoSchema',
+ 'DEFAULT_GENERATOR_CLASS': 'common.config.swagger_conf.CustomOpenAPISchemaGenerator',
"DEFAULT_MODEL_RENDERING": "example",
'USE_SESSION_AUTH': False,
'SECURITY_DEFINITIONS': {
@@ -91,9 +102,20 @@
CACHES = {
"default": {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
+ 'LOCATION': 'unique-snowflake',
+ 'TIMEOUT': 60 * 30,
+ 'OPTIONS': {
+ 'MAX_ENTRIES': 150,
+ 'CULL_FREQUENCY': 5,
+ }
+ },
+ 'default_file': {
+ 'BACKEND': 'common.cache.file_cache.FileCache',
+ 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "default_file_cache") # 文件夹路径
},
- 'model_cache': {
- 'BACKEND': 'common.cache.mem_cache.MemCache'
+ 'chat_cache': {
+ 'BACKEND': 'common.cache.file_cache.FileCache',
+ 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "chat_cache") # 文件夹路径
},
# 存储用户信息
'user_cache': {
@@ -105,8 +127,9 @@
'BACKEND': 'common.cache.file_cache.FileCache',
'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "token_cache") # 文件夹路径
},
- "chat_cache": {
- 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
+ 'captcha_cache': {
+ 'BACKEND': 'common.cache.file_cache.FileCache',
+ 'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "captcha_cache") # 文件夹路径
}
}
@@ -156,13 +179,31 @@
# Internationalization
# https://docs.djangoproject.com/en/4.2/topics/i18n/
-LANGUAGE_CODE = 'en-us'
-
TIME_ZONE = CONFIG.get_time_zone()
+# 启用国际化
USE_I18N = True
-USE_TZ = False
+# 启用本地化
+USE_L10N = True
+
+# 启用时区
+USE_TZ = True
+
+# 默认语言
+LANGUAGE_CODE = CONFIG.get("LANGUAGE_CODE")
+
+# 支持的语言
+LANGUAGES = [
+ ('en', 'English'),
+ ('zh', '中文简体'),
+ ('zh-hant', '中文繁体')
+]
+
+# 翻译文件路径
+LOCALE_PATHS = [
+ os.path.join(BASE_DIR.parent, 'locales')
+]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/
diff --git a/apps/smartdoc/settings/lib.py b/apps/smartdoc/settings/lib.py
new file mode 100644
index 00000000000..fc1d3244f47
--- /dev/null
+++ b/apps/smartdoc/settings/lib.py
@@ -0,0 +1,48 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: lib.py
+ @date:2024/8/16 17:12
+ @desc:
+"""
+import os
+import shutil
+
+from smartdoc.const import CONFIG, PROJECT_DIR
+
+# celery相关配置
+celery_data_dir = os.path.join(PROJECT_DIR, 'data', 'celery_task')
+if not os.path.exists(celery_data_dir) or not os.path.isdir(celery_data_dir):
+ os.makedirs(celery_data_dir)
+broker_path = os.path.join(celery_data_dir, "celery_db.sqlite3")
+backend_path = os.path.join(celery_data_dir, "celery_results.sqlite3")
+# 使用sql_lite 当做broker 和 响应接收
+CELERY_BROKER_URL = f'sqla+sqlite:///{broker_path}'
+CELERY_result_backend = f'db+sqlite:///{backend_path}'
+CELERY_timezone = CONFIG.TIME_ZONE
+CELERY_ENABLE_UTC = False
+CELERY_task_serializer = 'pickle'
+CELERY_result_serializer = 'pickle'
+CELERY_accept_content = ['json', 'pickle']
+CELERY_RESULT_EXPIRES = 600
+CELERY_WORKER_TASK_LOG_FORMAT = '%(asctime).19s %(message)s'
+CELERY_WORKER_LOG_FORMAT = '%(asctime).19s %(message)s'
+CELERY_TASK_EAGER_PROPAGATES = True
+CELERY_WORKER_REDIRECT_STDOUTS = True
+CELERY_WORKER_REDIRECT_STDOUTS_LEVEL = "INFO"
+CELERY_TASK_SOFT_TIME_LIMIT = 3600
+CELERY_WORKER_CANCEL_LONG_RUNNING_TASKS_ON_CONNECTION_LOSS = True
+CELERY_ACKS_LATE = True
+celery_once_path = os.path.join(celery_data_dir, "celery_once")
+try:
+ if os.path.exists(celery_once_path) and os.path.isdir(celery_once_path):
+ shutil.rmtree(celery_once_path)
+except Exception as e:
+ pass
+CELERY_ONCE = {
+ 'backend': 'celery_once.backends.File',
+ 'settings': {'location': celery_once_path}
+}
+CELERY_BROKER_CONNECTION_RETRY_ON_STARTUP = True
+CELERY_LOG_DIR = os.path.join(PROJECT_DIR, 'logs', 'celery')
diff --git a/apps/smartdoc/settings/logging.py b/apps/smartdoc/settings/logging.py
index 2627f12014b..9c3df8c159a 100644
--- a/apps/smartdoc/settings/logging.py
+++ b/apps/smartdoc/settings/logging.py
@@ -91,7 +91,7 @@
},
'sqlalchemy': {
'handlers': ['console', 'file', 'syslog'],
- 'level': LOG_LEVEL,
+ 'level': "ERROR",
'propagate': False,
},
'django.db.backends': {
@@ -114,6 +114,11 @@
'level': LOG_LEVEL,
'propagate': False,
},
+ 'common.event': {
+ 'handlers': ['console', 'file'],
+ 'level': "DEBUG",
+ 'propagate': False,
+ },
}
}
diff --git a/apps/smartdoc/urls.py b/apps/smartdoc/urls.py
index 9e85a1874a5..b243809cc77 100644
--- a/apps/smartdoc/urls.py
+++ b/apps/smartdoc/urls.py
@@ -5,13 +5,13 @@
https://docs.djangoproject.com/en/4.2/topics/http/urls/
Examples:
Function views
- 1. Add an import: forms my_app import views
+ 1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
- 1. Add an import: forms other_app.views import Home
+ 1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
- 1. Import the include() function: forms django.urls import include, path
+ 1. Import the include() function_lib: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import os
@@ -19,32 +19,23 @@
from django.http import HttpResponse
from django.urls import path, re_path, include
from django.views import static
-from drf_yasg import openapi
-from drf_yasg.views import get_schema_view
-from rest_framework import permissions, status
+from rest_framework import status
-from common.auth import AnonymousAuthentication
+from application.urls import urlpatterns as application_urlpatterns
+from common.cache_data.static_resource_cache import get_index_html
+from common.constants.cache_code_constants import CacheCodeConstants
+from common.init.init_doc import init_doc
from common.response.result import Result
+from common.util.cache_util import get_cache
from smartdoc import settings
from smartdoc.conf import PROJECT_DIR
-schema_view = get_schema_view(
-
- openapi.Info(
- title="Python API",
- default_version='v1',
- description="智能客服平台",
- ),
- public=True,
- permission_classes=[permissions.AllowAny],
- authentication_classes=[AnonymousAuthentication]
-)
-
urlpatterns = [
path("api/", include("users.urls")),
path("api/", include("dataset.urls")),
path("api/", include("setting.urls")),
- path("api/", include("application.urls"))
+ path("api/", include("application.urls")),
+ path("api/", include("function_lib.urls"))
]
@@ -70,23 +61,14 @@ def page_not_found(request, exception):
"""
if request.path.startswith("/api/"):
return Result(response_status=status.HTTP_404_NOT_FOUND, code=404, message="找不到接口")
- else:
- index_path = os.path.join(PROJECT_DIR, 'apps', "static", 'ui', 'index.html')
- if not os.path.exists(index_path):
- return HttpResponse("页面不存在", status=404)
- file = open(index_path, "r", encoding='utf-8')
- content = file.read()
- file.close()
- if request.path.startswith('/ui/chat/'):
- return HttpResponse(content, status=200)
- return HttpResponse(content, status=200, headers={'X-Frame-Options': 'DENY'})
+ index_path = os.path.join(PROJECT_DIR, 'apps', "static", 'ui', 'index.html')
+ if not os.path.exists(index_path):
+ return HttpResponse("页面不存在", status=404)
+ content = get_index_html(index_path)
+ if request.path.startswith('/ui/chat/'):
+ return HttpResponse(content, status=200)
+ return HttpResponse(content, status=200, headers={'X-Frame-Options': 'DENY'})
handler404 = page_not_found
-
-urlpatterns += [
- re_path(r'^doc(?P\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0),
- name='schema-json'), # 导出
- path('doc/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
- path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
-]
+init_doc(urlpatterns, application_urlpatterns)
diff --git a/apps/smartdoc/wsgi.py b/apps/smartdoc/wsgi.py
index e04ec52744d..6c7c6811587 100644
--- a/apps/smartdoc/wsgi.py
+++ b/apps/smartdoc/wsgi.py
@@ -19,9 +19,10 @@
def post_handler():
from common import event
from common import job
+ from common.models.db_model_manage import DBModelManage
event.run()
- event.ListenerManagement.init_embedding_model_signal.send()
job.run()
+ DBModelManage.init()
post_handler()
diff --git a/apps/users/apps.py b/apps/users/apps.py
index 1ea7bf62ffe..8e08561521a 100644
--- a/apps/users/apps.py
+++ b/apps/users/apps.py
@@ -5,3 +5,5 @@ class UsersConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'users'
+ def ready(self):
+ from ops.celery import signal_handler
diff --git a/apps/users/migrations/0003_user_source.py b/apps/users/migrations/0003_user_source.py
new file mode 100644
index 00000000000..7292cc1b595
--- /dev/null
+++ b/apps/users/migrations/0003_user_source.py
@@ -0,0 +1,18 @@
+# Generated by Django 4.2.13 on 2024-07-11 19:16
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('users', '0002_user_create_time_user_update_time'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='user',
+ name='source',
+ field=models.CharField(default='LOCAL', max_length=10, verbose_name='来源'),
+ ),
+ ]
diff --git a/apps/users/migrations/0004_alter_user_email.py b/apps/users/migrations/0004_alter_user_email.py
new file mode 100644
index 00000000000..c77416ba1d3
--- /dev/null
+++ b/apps/users/migrations/0004_alter_user_email.py
@@ -0,0 +1,18 @@
+# Generated by Django 4.2.13 on 2024-07-16 17:03
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('users', '0003_user_source'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='user',
+ name='email',
+ field=models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='邮箱'),
+ ),
+ ]
diff --git a/apps/users/migrations/0005_user_language.py b/apps/users/migrations/0005_user_language.py
new file mode 100644
index 00000000000..d2d3092d7a9
--- /dev/null
+++ b/apps/users/migrations/0005_user_language.py
@@ -0,0 +1,17 @@
+# Generated by Django 4.2.15 on 2025-01-20 06:59
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ('users', '0004_alter_user_email'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='user',
+ name='language',
+ field=models.CharField(default=None, null=True, max_length=10, verbose_name='语言'),
+ ),
+ ]
diff --git a/apps/users/models/user.py b/apps/users/models/user.py
index 08b5d8f2c03..4e3fd35adf7 100644
--- a/apps/users/models/user.py
+++ b/apps/users/models/user.py
@@ -20,6 +20,12 @@
__all__ = ["User", "password_encrypt", 'get_user_dynamics_permission']
+from smartdoc.const import CONFIG
+
+
+def get_language():
+ return CONFIG.get_language_code()
+
def password_encrypt(raw_password):
"""
@@ -63,13 +69,15 @@ def get_user_dynamics_permission(user_id: str):
class User(AppModelMixin):
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
- email = models.EmailField(unique=True, verbose_name="邮箱")
+ email = models.EmailField(unique=True, null=True, blank=True, verbose_name="邮箱")
phone = models.CharField(max_length=20, verbose_name="电话", default="")
nick_name = models.CharField(max_length=150, verbose_name="昵称", default="")
username = models.CharField(max_length=150, unique=True, verbose_name="用户名")
password = models.CharField(max_length=150, verbose_name="密码")
role = models.CharField(max_length=150, verbose_name="角色")
+ source = models.CharField(max_length=10, verbose_name="来源", default="LOCAL")
is_active = models.BooleanField(default=True)
+ language = models.CharField(max_length=10, verbose_name="语言", null=True, default=None)
create_time = models.DateTimeField(verbose_name="创建时间", auto_now_add=True, null=True)
update_time = models.DateTimeField(verbose_name="修改时间", auto_now=True, null=True)
diff --git a/apps/users/serializers/user_serializers.py b/apps/users/serializers/user_serializers.py
index 2d0da7f5651..96a4bb390a0 100644
--- a/apps/users/serializers/user_serializers.py
+++ b/apps/users/serializers/user_serializers.py
@@ -6,17 +6,22 @@
@date:2023/9/5 16:32
@desc:
"""
+import base64
import datetime
import os
import random
import re
import uuid
+from captcha.image import ImageCaptcha
+from django.conf import settings
from django.core import validators, signing, cache
from django.core.mail import send_mail
from django.core.mail.backends.smtp import EmailBackend
from django.db import transaction
-from django.db.models import Q, QuerySet
+from django.db.models import Q, QuerySet, Prefetch
+from django.utils.translation import get_language
+from django.utils.translation import gettext_lazy as _, to_locale
from drf_yasg import openapi
from rest_framework import serializers
@@ -25,25 +30,52 @@
from common.constants.exception_code_constants import ExceptionCodeConstants
from common.constants.permission_constants import RoleConstants, get_permission_list_by_role
from common.db.search import page_search
-from common.event import ListenerManagement
from common.exception.app_exception import AppApiException
from common.mixins.api_mixin import ApiMixin
+from common.models.db_model_manage import DBModelManage
from common.response.result import get_api_response
+from common.util.common import valid_license, get_random_chars
from common.util.field_message import ErrMessage
from common.util.lock import lock
from dataset.models import DataSet, Document, Paragraph, Problem, ProblemParagraphMapping
+from embedding.task import delete_embedding_by_dataset_id_list
+from function_lib.models.function import FunctionLib
from setting.models import Team, SystemSetting, SettingType, Model, TeamMember, TeamMemberPermission
from smartdoc.conf import PROJECT_DIR
from users.models.user import User, password_encrypt, get_user_dynamics_permission
user_cache = cache.caches['user_cache']
+captcha_cache = cache.caches['captcha_cache']
+
+
+class CaptchaSerializer(ApiMixin, serializers.Serializer):
+ @staticmethod
+ def get_response_body_api():
+ return get_api_response(openapi.Schema(
+ type=openapi.TYPE_STRING,
+ title="captcha",
+ default="xxxx",
+ description="captcha"
+ ))
+
+ @staticmethod
+ def generate():
+ chars = get_random_chars()
+ image = ImageCaptcha()
+ data = image.generate(chars)
+ captcha = base64.b64encode(data.getbuffer())
+ captcha_cache.set(f"LOGIN:{chars.lower()}", chars, timeout=5 * 60)
+ return 'data:image/png;base64,' + captcha.decode()
class SystemSerializer(ApiMixin, serializers.Serializer):
@staticmethod
def get_profile():
version = os.environ.get('MAXKB_VERSION')
- return {'version': version}
+ xpack_cache = DBModelManage.get_model('xpack_cache')
+ return {'version': version, 'IS_XPACK': hasattr(settings, 'IS_XPACK'),
+ 'XPACK_LICENSE_IS_VALID': False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID',
+ False)}
@staticmethod
def get_response_body_api():
@@ -51,24 +83,31 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=[],
properties={
- 'version': openapi.Schema(type=openapi.TYPE_STRING, title="系统版本号", description="系统版本号"),
+ 'version': openapi.Schema(type=openapi.TYPE_STRING, title=_("System version number"),
+ description=_("System version number")),
}
)
class LoginSerializer(ApiMixin, serializers.Serializer):
username = serializers.CharField(required=True,
- error_messages=ErrMessage.char("用户名"))
+ error_messages=ErrMessage.char(_("Username")))
+
+ password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")))
- password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"))
+ captcha = serializers.CharField(required=True, error_messages=ErrMessage.char(_("captcha")))
def is_valid(self, *, raise_exception=False):
"""
校验参数
- :param raise_exception: 是否抛出异常 只能是True
- :return: 用户信息
+ :param raise_exception: Whether to throw an exception can only be True
+ :return: User information
"""
super().is_valid(raise_exception=True)
+ captcha = self.data.get('captcha')
+ captcha_value = captcha_cache.get(f"LOGIN:{captcha.lower()}")
+ if captcha_value is None:
+ raise AppApiException(1005, _("Captcha code error or expiration"))
username = self.data.get("username")
password = password_encrypt(self.data.get("password"))
user = QuerySet(User).filter(Q(username=username,
@@ -77,13 +116,13 @@ def is_valid(self, *, raise_exception=False):
if user is None:
raise ExceptionCodeConstants.INCORRECT_USERNAME_AND_PASSWORD.value.to_app_api_exception()
if not user.is_active:
- raise AppApiException(1005, "用户已被禁用,请联系管理员!")
+ raise AppApiException(1005, _("The user has been disabled, please contact the administrator!"))
return user
def get_user_token(self):
"""
- 获取用户Token
- :return: 用户Token(认证信息)
+ Get user token
+ :return: User Token (authentication information)
"""
user = self.is_valid()
token = signing.dumps({'username': user.username, 'id': str(user.id), 'email': user.email,
@@ -99,8 +138,9 @@ def get_request_body_api(self):
type=openapi.TYPE_OBJECT,
required=['username', 'password'],
properties={
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码")
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
+ 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")),
+ 'captcha': openapi.Schema(type=openapi.TYPE_STRING, title=_("captcha"), description=_("captcha"))
}
)
@@ -115,36 +155,38 @@ def get_response_body_api(self):
class RegisterSerializer(ApiMixin, serializers.Serializer):
"""
- 注册请求对象
+ Register request object
"""
email = serializers.EmailField(
required=True,
- error_messages=ErrMessage.char("邮箱"),
+ error_messages=ErrMessage.char(_("Email")),
validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message,
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
username = serializers.CharField(required=True,
- error_messages=ErrMessage.char("用户名"),
+ error_messages=ErrMessage.char(_("Username")),
max_length=20,
min_length=6,
validators=[
- validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"),
- message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等")
+ validators.RegexValidator(regex=re.compile("^.{6,20}$"),
+ message=_("Username must be 6-20 characters long"))
])
- password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"),
+ password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")])
+ , message=_(
+ "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
re_password = serializers.CharField(required=True,
- error_messages=ErrMessage.char("确认密码"),
+ error_messages=ErrMessage.char(_("Confirm Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")])
+ , message=_(
+ "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
- code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码"))
+ code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code")))
class Meta:
model = User
@@ -174,19 +216,19 @@ def is_valid(self, *, raise_exception=False):
return True
+ @valid_license(model=User, count=2,
+ message=_(
+ "The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/)."))
@transaction.atomic
def save(self, **kwargs):
m = User(
**{'id': uuid.uuid1(), 'email': self.data.get("email"), 'username': self.data.get("username"),
'role': RoleConstants.USER.name})
m.set_password(self.data.get("password"))
- # 插入用户
m.save()
- # 初始化用户团队
- Team(**{'user': m, 'name': m.username + '的团队'}).save()
+ Team(**{'user': m, 'name': m.username + _("team")}).save()
email = self.data.get("email")
code_cache_key = email + ":register"
- # 删除验证码缓存
user_cache.delete(code_cache_key)
@staticmethod
@@ -195,11 +237,13 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['username', 'email', 'password', 're_password', 'code'],
properties={
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"),
- 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", description="确认密码"),
- 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码")
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")),
+ 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"),
+ description=_("Confirm Password")),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"),
+ description=_("Verification code"))
}
)
@@ -210,16 +254,18 @@ class CheckCodeSerializer(ApiMixin, serializers.Serializer):
"""
email = serializers.EmailField(
required=True,
- error_messages=ErrMessage.char("邮箱"),
+ error_messages=ErrMessage.char(_("Email")),
validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message,
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
- code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码"))
+ code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code")))
type = serializers.CharField(required=True,
- error_messages=ErrMessage.char("类型"),
+ error_messages=ErrMessage.char(_("Type")),
validators=[
validators.RegexValidator(regex=re.compile("^register|reset_password$"),
- message="类型只支持register|reset_password", code=500)
+ message=_(
+ "The type only supports register|reset_password"),
+ code=500)
])
def is_valid(self, *, raise_exception=False):
@@ -238,40 +284,56 @@ def get_request_body_api(self):
type=openapi.TYPE_OBJECT,
required=['email', 'code', 'type'],
properties={
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"),
- 'type': openapi.Schema(type=openapi.TYPE_STRING, title="类型", description="register|reset_password")
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"),
+ description=_("Verification code")),
+ 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Type"), description="register|reset_password")
}
)
def get_response_body_api(self):
return get_api_response(openapi.Schema(
type=openapi.TYPE_BOOLEAN,
- title="是否成功",
+ title=_('Is it successful'),
default=True,
- description="错误提示"))
+ description=_('Error message')))
+
+
+class SwitchLanguageSerializer(serializers.Serializer):
+ user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id')), )
+ language = serializers.CharField(required=True, error_messages=ErrMessage.char(_('language')))
+
+ def switch(self):
+ self.is_valid(raise_exception=True)
+ language = self.data.get('language')
+ support_language_list = ['zh-CN', 'zh-Hant', 'en-US']
+ if not support_language_list.__contains__(language):
+ raise AppApiException(500, _('language only support:') + ','.join(support_language_list))
+ QuerySet(User).filter(id=self.data.get('user_id')).update(language=language)
class RePasswordSerializer(ApiMixin, serializers.Serializer):
email = serializers.EmailField(
required=True,
- error_messages=ErrMessage.char("邮箱"),
+ error_messages=ErrMessage.char(_("Email")),
validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message,
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
- code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码"))
+ code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code")))
- password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"),
+ password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")])
+ , message=_(
+ "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
- re_password = serializers.CharField(required=True, error_messages=ErrMessage.char("确认密码"),
+ re_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Confirm Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")]
+ , message=_(
+ "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]
)
class Meta:
@@ -309,10 +371,12 @@ def get_request_body_api(self):
type=openapi.TYPE_OBJECT,
required=['email', 'code', "password", 're_password'],
properties={
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"),
- 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"),
- 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", description="确认密码")
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"),
+ description=_("Verification code")),
+ 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")),
+ 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"),
+ description=_("Confirm Password"))
}
)
@@ -320,13 +384,13 @@ def get_request_body_api(self):
class SendEmailSerializer(ApiMixin, serializers.Serializer):
email = serializers.EmailField(
required=True
- , error_messages=ErrMessage.char("邮箱"),
+ , error_messages=ErrMessage.char(_("Email")),
validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message,
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
- type = serializers.CharField(required=True, error_messages=ErrMessage.char("类型"), validators=[
+ type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Type")), validators=[
validators.RegexValidator(regex=re.compile("^register|reset_password$"),
- message="类型只支持register|reset_password", code=500)
+ message=_("The type only supports register|reset_password"), code=500)
])
class Meta:
@@ -344,7 +408,8 @@ def is_valid(self, *, raise_exception=False):
code_cache_key_lock = code_cache_key + "_lock"
ttl = user_cache.ttl(code_cache_key_lock)
if ttl is not None:
- raise AppApiException(500, f"{ttl.total_seconds()}秒内请勿重复发送邮件")
+ raise AppApiException(500, _("Do not send emails again within {seconds} seconds").format(
+ seconds=int(ttl.total_seconds())))
return True
def send(self):
@@ -359,8 +424,10 @@ def send(self):
code = "".join(list(map(lambda i: random.choice(['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'
]), range(6))))
# 获取邮件模板
- file = open(os.path.join(PROJECT_DIR, "apps", "common", 'template', 'email_template.html'), "r",
- encoding='utf-8')
+ language = get_language()
+ file = open(
+ os.path.join(PROJECT_DIR, "apps", "common", 'template', f'email_template_{to_locale(language)}.html'), "r",
+ encoding='utf-8')
content = file.read()
file.close()
code_cache_key = email + ":" + state
@@ -370,7 +437,8 @@ def send(self):
system_setting = QuerySet(SystemSetting).filter(type=SettingType.EMAIL.value).first()
if system_setting is None:
user_cache.delete(code_cache_key_lock)
- raise AppApiException(1004, "邮箱未设置,请联系管理员设置")
+ raise AppApiException(1004,
+ _("The email service has not been set up. Please contact the administrator to set up the email service in [Email Settings]."))
try:
connection = EmailBackend(system_setting.meta.get("email_host"),
system_setting.meta.get('email_port'),
@@ -381,14 +449,15 @@ def send(self):
system_setting.meta.get('email_use_ssl')
)
# 发送邮件
- send_mail(f'【MaxKB 智能知识库-{"用户注册" if state == "register" else "修改密码"}】',
- '',
- html_message=f'{content.replace("${code}", code)}',
- from_email=system_setting.meta.get('from_email'),
- recipient_list=[email], fail_silently=False, connection=connection)
+ send_mail(_('【Intelligent knowledge base question and answer system-{action}】').format(
+ action=_('User registration') if state == 'register' else _('Change password')),
+ '',
+ html_message=f'{content.replace("${code}", code)}',
+ from_email=system_setting.meta.get('from_email'),
+ recipient_list=[email], fail_silently=False, connection=connection)
except Exception as e:
user_cache.delete(code_cache_key_lock)
- raise AppApiException(500, f"{str(e)}邮件发送失败")
+ raise AppApiException(500, f"{str(e)}" + _("Email sending failed"))
user_cache.set(code_cache_key, code, timeout=datetime.timedelta(minutes=30))
return True
@@ -397,8 +466,8 @@ def get_request_body_api(self):
type=openapi.TYPE_OBJECT,
required=['email', 'type'],
properties={
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'type': openapi.Schema(type=openapi.TYPE_STRING, title="类型", description="register|reset_password")
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_('Email')),
+ 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('Type'), description="register|reset_password")
}
)
@@ -419,7 +488,8 @@ def get_user_profile(user: User):
permission_list += [p.value for p in get_permission_list_by_role(RoleConstants[user.role])]
return {'id': user.id, 'username': user.username, 'email': user.email, 'role': user.role,
'permissions': [str(p) for p in permission_list],
- 'is_edit_password': user.password == 'd880e722c47a34d8e9fce789fc62389d' if user.role == 'ADMIN' else False}
+ 'is_edit_password': user.password == 'd880e722c47a34d8e9fce789fc62389d' if user.role == 'ADMIN' else False,
+ 'language': user.language}
@staticmethod
def get_response_body_api():
@@ -427,12 +497,13 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['id', 'username', 'email', 'role', 'is_active'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"),
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"),
- 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用"),
- "permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title="权限列表", description="权限列表",
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")),
+ 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active")),
+ "permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Permissions"),
+ description=_("Permissions"),
items=openapi.Schema(type=openapi.TYPE_STRING))
}
)
@@ -449,11 +520,11 @@ def get_response_body_api(self):
type=openapi.TYPE_OBJECT,
required=['id', 'username', 'email', 'role', 'is_active'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"),
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"),
- 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用")
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")),
+ 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active"))
}
)
@@ -466,7 +537,7 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
- description='邮箱或者用户名')]
+ description=_("Email or username"))]
@staticmethod
def get_response_body_api():
@@ -474,9 +545,10 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['username', 'email', 'id'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title='用户主键id', description="用户主键id"),
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址")
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title='ID', description="ID"),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"),
+ description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email"))
}
)
@@ -484,14 +556,50 @@ def list(self, with_valid=True):
if with_valid:
self.is_valid(raise_exception=True)
email_or_username = self.data.get('email_or_username')
- return [{'id': user_model.id, 'username': user_model.username, 'email': user_model.email} for user_model in
+ return [{'id': user_model.id, 'username': user_model.username, 'email': user_model.email} for user_model
+ in
QuerySet(User).filter(Q(username=email_or_username) | Q(email=email_or_username))]
+ def listByType(self, type, user_id):
+ teamIds = TeamMember.objects.filter(user_id=user_id).values_list('id', flat=True)
+ targets = TeamMemberPermission.objects.filter(
+ member_id__in=teamIds,
+ auth_target_type=type,
+ operate__contains=['USE']
+ ).values_list('target', flat=True)
+ prefetch_users = Prefetch('user', queryset=User.objects.only('id', 'username'))
+
+ user_list = []
+ if type == 'DATASET':
+ user_list = DataSet.objects.filter(
+ Q(id__in=targets) | Q(user_id=user_id)
+ ).prefetch_related(prefetch_users).distinct('user_id')
+ elif type == 'APPLICATION':
+ user_list = Application.objects.filter(
+ Q(id__in=targets) | Q(user_id=user_id)
+ ).prefetch_related(prefetch_users).distinct('user_id')
+ elif type == 'FUNCTION':
+ user_list = FunctionLib.objects.filter(
+ Q(permission_type='PUBLIC') | Q(user_id=user_id)
+ ).prefetch_related(prefetch_users).distinct('user_id')
+
+ other_users = [
+ {'id': app.user.id, 'username': app.user.username}
+ for app in user_list if app.user.id != user_id
+ ]
+ users = [
+ {'id': 'all', 'username': _('All')},
+ {'id': user_id, 'username': _('Me')}
+ ]
+ users.extend(other_users)
+ return users
+
class UserInstanceSerializer(ApiMixin, serializers.ModelSerializer):
class Meta:
model = User
- fields = ['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time', 'update_time']
+ fields = ['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time', 'update_time',
+ 'source']
@staticmethod
def get_response_body_api():
@@ -500,15 +608,19 @@ def get_response_body_api():
required=['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time',
'update_time'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"),
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活", description="是否激活"),
- 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"),
- 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名"),
- 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description="修改时间"),
- 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description="修改时间")
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"),
+ description=_("Is active")),
+ 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")),
+ 'source': openapi.Schema(type=openapi.TYPE_STRING, title=_("Source"), description=_("Source")),
+ 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")),
+ 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Create time"),
+ description=_("Create time")),
+ 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Update time"),
+ description=_("Update time"))
}
)
@@ -518,7 +630,7 @@ def get_request_params_api():
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
- description='用户名id')
+ description='ID')
]
@@ -526,7 +638,7 @@ def get_request_params_api():
class UserManageSerializer(serializers.Serializer):
class Query(ApiMixin, serializers.Serializer):
email_or_username = serializers.CharField(required=False, allow_null=True,
- error_messages=ErrMessage.char("邮箱或者用户名"))
+ error_messages=ErrMessage.char(_('Email or username')))
@staticmethod
def get_request_params_api():
@@ -534,7 +646,7 @@ def get_request_params_api():
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=False,
- description='邮箱或者用户名')]
+ description=_("Email or username"))]
@staticmethod
def get_response_body_api():
@@ -542,9 +654,10 @@ def get_response_body_api():
type=openapi.TYPE_OBJECT,
required=['username', 'email', 'id'],
properties={
- 'id': openapi.Schema(type=openapi.TYPE_STRING, title='用户主键id', description="用户主键id"),
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址")
+ 'id': openapi.Schema(type=openapi.TYPE_STRING, title='ID', description="ID"),
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"),
+ description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email"))
}
)
@@ -573,27 +686,29 @@ def page(self, current_page: int, page_size: int, with_valid=True):
class UserInstance(ApiMixin, serializers.Serializer):
email = serializers.EmailField(
required=True,
- error_messages=ErrMessage.char("邮箱"),
+ error_messages=ErrMessage.char(_("Email")),
validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message,
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
username = serializers.CharField(required=True,
- error_messages=ErrMessage.char("用户名"),
+ error_messages=ErrMessage.char(_("Username")),
max_length=20,
min_length=6,
validators=[
- validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"),
- message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等")
+ validators.RegexValidator(regex=re.compile("^.{6,20}$"),
+ message=_(
+ 'Username must be 6-20 characters long'))
])
- password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"),
+ password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")])
+ , message=_(
+ "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
- nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char("姓名"), max_length=64,
+ nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64,
allow_null=True, allow_blank=True)
- phone = serializers.CharField(required=False, error_messages=ErrMessage.char("手机号"), max_length=20,
+ phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20,
allow_null=True, allow_blank=True)
def is_valid(self, *, raise_exception=True):
@@ -613,55 +728,61 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['username', 'email', 'password'],
properties={
- 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"),
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"),
- 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"),
- 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"),
- 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名")
+ 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"),
+ description=_("Username")),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"),
+ description=_("Password")),
+ 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")),
+ 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name"))
}
)
class UserEditInstance(ApiMixin, serializers.Serializer):
email = serializers.EmailField(
required=False,
- error_messages=ErrMessage.char("邮箱"),
+ error_messages=ErrMessage.char(_("Email")),
validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message,
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
- nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char("姓名"), max_length=64,
+ nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64,
allow_null=True, allow_blank=True)
- phone = serializers.CharField(required=False, error_messages=ErrMessage.char("手机号"), max_length=20,
+ phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20,
allow_null=True, allow_blank=True)
- is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("是否可用"))
+ is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_("Is active")))
def is_valid(self, *, user_id=None, raise_exception=False):
super().is_valid(raise_exception=True)
- if QuerySet(User).filter(email=self.data.get('email')).exclude(id=user_id).exists():
- raise AppApiException(1004, "邮箱已经被使用")
+ if self.data.get('email') is not None and QuerySet(User).filter(email=self.data.get('email')).exclude(
+ id=user_id).exists():
+ raise AppApiException(1004, _('Email is already in use'))
@staticmethod
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
- 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱"),
- 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名"),
- 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"),
- 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"),
+ 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
+ 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")),
+ 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")),
+ 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"),
+ description=_("Is active")),
}
)
class RePasswordInstance(ApiMixin, serializers.Serializer):
- password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"),
+ password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")])
- re_password = serializers.CharField(required=True, error_messages=ErrMessage.char("确认密码"),
+ , message=_(
+ "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
+ re_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Confirm Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
- , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")]
+ , message=_(
+ "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]
)
@staticmethod
@@ -670,9 +791,10 @@ def get_request_body_api():
type=openapi.TYPE_OBJECT,
required=['password', 're_password'],
properties={
- 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"),
- 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码",
- description="确认密码"),
+ 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"),
+ description=_("Password")),
+ 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"),
+ description=_("Confirm Password")),
}
)
@@ -681,6 +803,9 @@ def is_valid(self, *, raise_exception=False):
if self.data.get('password') != self.data.get('re_password'):
raise ExceptionCodeConstants.PASSWORD_NOT_EQ_RE_PASSWORD.value.to_app_api_exception()
+ @valid_license(model=User, count=2,
+ message=_(
+ 'The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).'))
@transaction.atomic
def save(self, instance, with_valid=True):
if with_valid:
@@ -690,20 +815,20 @@ def save(self, instance, with_valid=True):
phone="" if instance.get('phone') is None else instance.get('phone'),
nick_name="" if instance.get('nick_name') is None else instance.get('nick_name')
, username=instance.get('username'), password=password_encrypt(instance.get('password')),
- role=RoleConstants.USER.name,
+ role=RoleConstants.USER.name, source="LOCAL",
is_active=True)
user.save()
# 初始化用户团队
- Team(**{'user': user, 'name': user.username + '的团队'}).save()
+ Team(**{'user': user, 'name': user.username + _('team')}).save()
return UserInstanceSerializer(user).data
class Operate(serializers.Serializer):
- id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id"))
+ id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("ID"))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if not QuerySet(User).filter(id=self.data.get('id')).exists():
- raise AppApiException(1004, "用户不存在")
+ raise AppApiException(1004, _('User does not exist'))
@transaction.atomic
def delete(self, with_valid=True):
@@ -711,7 +836,7 @@ def delete(self, with_valid=True):
self.is_valid(raise_exception=True)
user = QuerySet(User).filter(id=self.data.get('id')).first()
if user.role == RoleConstants.ADMIN.name:
- raise AppApiException(1004, "无法删除管理员")
+ raise AppApiException(1004, _('Unable to delete administrator'))
user_id = self.data.get('id')
team_member_list = QuerySet(TeamMember).filter(Q(user_id=user_id) | Q(team_id=user_id))
@@ -729,7 +854,7 @@ def delete(self, with_valid=True):
QuerySet(Paragraph).filter(dataset_id__in=dataset_id_list).delete()
QuerySet(ProblemParagraphMapping).filter(dataset_id__in=dataset_id_list).delete()
QuerySet(Problem).filter(dataset_id__in=dataset_id_list).delete()
- ListenerManagement.delete_embedding_by_dataset_id_list_signal.send(dataset_id_list)
+ delete_embedding_by_dataset_id_list(dataset_id_list)
dataset_list.delete()
# 删除团队
QuerySet(Team).filter(user_id=self.data.get('id')).delete()
@@ -748,7 +873,7 @@ def edit(self, instance, with_valid=True):
user = QuerySet(User).filter(id=self.data.get('id')).first()
if user.role == RoleConstants.ADMIN.name and 'is_active' in instance and instance.get(
'is_active') is not None:
- raise AppApiException(1004, "不能修改管理员状态")
+ raise AppApiException(1004, _('Cannot modify administrator status'))
update_keys = ['email', 'nick_name', 'phone', 'is_active']
for update_key in update_keys:
if update_key in instance and instance.get(update_key) is not None:
diff --git a/apps/users/task/__init__.py b/apps/users/task/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/apps/users/urls.py b/apps/users/urls.py
index 55388d894c1..a9d1e134c90 100644
--- a/apps/users/urls.py
+++ b/apps/users/urls.py
@@ -6,6 +6,8 @@
urlpatterns = [
path('profile', views.Profile.as_view()),
path('user', views.User.as_view(), name="profile"),
+ path('user/captcha', views.CaptchaView.as_view(), name='captcha'),
+ path('user/language', views.SwitchUserLanguageView.as_view(), name='language'),
path('user/list', views.User.Query.as_view()),
path('user/login', views.Login.as_view(), name='login'),
path('user/logout', views.Logout.as_view(), name='logout'),
@@ -21,4 +23,5 @@
name="user_manage_re_password"),
path("user_manage//", views.UserManage.Page.as_view(),
name="user_manage_re_password"),
+ path('user/list/', views.UserListView.as_view()),
]
diff --git a/apps/users/views/common.py b/apps/users/views/common.py
new file mode 100644
index 00000000000..c3f086b7b1c
--- /dev/null
+++ b/apps/users/views/common.py
@@ -0,0 +1,32 @@
+# coding=utf-8
+"""
+ @project: MaxKB
+ @Author:虎
+ @file: common.py
+ @date:2025/3/25 16:46
+ @desc:
+"""
+from common.util.common import encryption
+from users.models import User
+from django.db.models import QuerySet
+
+
+def get_user_operation_object(user_id):
+ user_model = QuerySet(model=User).filter(id=user_id).first()
+ if user_model is not None:
+ return {
+ "name": user_model.username
+ }
+ return {}
+
+
+def get_re_password_details(request):
+ path = request.path
+ body = request.data
+ query = request.query_params
+ return {
+ 'path': path,
+ 'body': {**body, 'password': encryption(body.get('password', '')),
+ 're_password': encryption(body.get('re_password', ''))},
+ 'query': query
+ }
diff --git a/apps/users/views/user.py b/apps/users/views/user.py
index e691ff4b989..d4b9f8a6a41 100644
--- a/apps/users/views/user.py
+++ b/apps/users/views/user.py
@@ -7,6 +7,7 @@
@desc:
"""
from django.core import cache
+from django.utils.translation import gettext_lazy as _
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
@@ -18,11 +19,15 @@
from common.auth.authenticate import TokenAuth
from common.auth.authentication import has_permissions
from common.constants.permission_constants import PermissionConstants, CompareConstants, ViewPermission, RoleConstants
+from common.log.log import log
from common.response import result
+from common.util.common import encryption
from smartdoc.settings import JWT_AUTH
from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \
RePasswordSerializer, \
- SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer
+ SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer, \
+ SwitchLanguageSerializer, CaptchaSerializer
+from users.views.common import get_user_operation_object, get_re_password_details
user_cache = cache.caches['user_cache']
token_cache = cache.caches['token_cache']
@@ -30,10 +35,10 @@
class Profile(APIView):
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取MaxKB相关信息",
- operation_id="获取MaxKB相关信息",
+ @swagger_auto_schema(operation_summary=_("Get MaxKB related information"),
+ operation_id=_("Get MaxKB related information"),
responses=result.get_api_response(SystemSerializer.get_response_body_api()),
- tags=['系统参数'])
+ tags=[_('System parameters')])
def get(self, request: Request):
return result.success(SystemSerializer.get_profile())
@@ -42,10 +47,10 @@ class User(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取当前用户信息",
- operation_id="获取当前用户信息",
+ @swagger_auto_schema(operation_summary=_("Get current user information"),
+ operation_id=_("Get current user information"),
responses=result.get_api_response(UserProfile.get_response_body_api()),
- tags=['用户'])
+ tags=[])
@has_permissions(PermissionConstants.USER_READ)
def get(self, request: Request):
return result.success(UserProfile.get_user_profile(request.user))
@@ -54,35 +59,63 @@ class Query(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取用户列表",
- operation_id="获取用户列表",
+ @swagger_auto_schema(operation_summary=_("Get user list"),
+ operation_id=_("Get user list"),
manual_parameters=UserSerializer.Query.get_request_params_api(),
responses=result.get_api_array_response(UserSerializer.Query.get_response_body_api()),
- tags=['用户'])
+ tags=[_("User management")])
@has_permissions(PermissionConstants.USER_READ)
def get(self, request: Request):
return result.success(
UserSerializer.Query(data={'email_or_username': request.query_params.get('email_or_username')}).list())
+class SwitchUserLanguageView(APIView):
+ authentication_classes = [TokenAuth]
+
+ @action(methods=['POST'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Switch Language"),
+ operation_id=_("Switch Language"),
+ request_body=openapi.Schema(
+ type=openapi.TYPE_OBJECT,
+ required=['language'],
+ properties={
+ 'language': openapi.Schema(type=openapi.TYPE_STRING, title=_("language"),
+ description=_("language")),
+ }
+ ),
+ responses=result.get_default_response(),
+ tags=[_("User management")])
+ @log(menu='User management', operate='Switch Language',
+ get_operation_object=lambda r, k: {'name': r.user.username})
+ def post(self, request: Request):
+ data = {**request.data, 'user_id': request.user.id}
+ return result.success(SwitchLanguageSerializer(data=data).switch())
+
+
class ResetCurrentUserPasswordView(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="修改当前用户密码",
- operation_id="修改当前用户密码",
+ @swagger_auto_schema(operation_summary=_("Modify current user password"),
+ operation_id=_("Modify current user password"),
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['email', 'code', "password", 're_password'],
properties={
- 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"),
- 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"),
- 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="密码",
- description="密码")
+ 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"),
+ description=_("Verification code")),
+ 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"),
+ description=_("Password")),
+ 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"),
+ description=_("Password"))
}
),
- responses=RePasswordSerializer().get_response_body_api(),
- tags=['用户'])
+ responses=result.get_default_response(),
+ tags=[_("User management")])
+ @log(menu='User management', operate='Modify current user password',
+ get_operation_object=lambda r, k: {'name': r.user.username},
+ get_details=get_re_password_details)
def post(self, request: Request):
data = {'email': request.user.email}
data.update(request.data)
@@ -90,7 +123,7 @@ def post(self, request: Request):
if serializer_obj.reset_password():
token_cache.delete(request.META.get('HTTP_AUTHORIZATION'))
return result.success(True)
- return result.error("修改密码失败")
+ return result.error(_("Failed to change password"))
class SendEmailToCurrentUserView(APIView):
@@ -98,10 +131,12 @@ class SendEmailToCurrentUserView(APIView):
@action(methods=['POST'], detail=False)
@permission_classes((AllowAny,))
- @swagger_auto_schema(operation_summary="发送邮件到当前用户",
- operation_id="发送邮件到当前用户",
+ @swagger_auto_schema(operation_summary=_("Send email to current user"),
+ operation_id=_("Send email to current user"),
responses=SendEmailSerializer().get_response_body_api(),
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='Send email to current user',
+ get_operation_object=lambda r, k: {'name': r.user.username})
def post(self, request: Request):
serializer_obj = SendEmailSerializer(data={'email': request.user.email, 'type': "reset_password"})
if serializer_obj.is_valid(raise_exception=True):
@@ -113,24 +148,52 @@ class Logout(APIView):
@action(methods=['POST'], detail=False)
@permission_classes((AllowAny,))
- @swagger_auto_schema(operation_summary="登出",
- operation_id="登出",
+ @swagger_auto_schema(operation_summary=_("Sign out"),
+ operation_id=_("Sign out"),
responses=SendEmailSerializer().get_response_body_api(),
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='Sign out',
+ get_operation_object=lambda r, k: {'name': r.user.username})
def post(self, request: Request):
token_cache.delete(request.META.get('HTTP_AUTHORIZATION'))
return result.success(True)
+def _get_details(request):
+ path = request.path
+ body = request.data
+ query = request.query_params
+ return {
+ 'path': path,
+ 'body': {**body, 'password': encryption(body.get('password', ''))},
+ 'query': query
+ }
+
+
+class CaptchaView(APIView):
+
+ @action(methods=['GET'], detail=False)
+ @swagger_auto_schema(operation_summary=_("Obtain graphical captcha"),
+ operation_id=_("Obtain graphical captcha"),
+ responses=CaptchaSerializer().get_response_body_api(),
+ security=[],
+ tags=[_("User management")])
+ def get(self, request: Request):
+ return result.success(CaptchaSerializer().generate())
+
+
class Login(APIView):
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="登录",
- operation_id="登录",
+ @swagger_auto_schema(operation_summary=_("Log in"),
+ operation_id=_("Log in"),
request_body=LoginSerializer().get_request_body_api(),
responses=LoginSerializer().get_response_body_api(),
security=[],
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='Log in', get_user=lambda r: {'username': r.data.get('username', None)},
+ get_details=_get_details,
+ get_operation_object=lambda r, k: {'name': r.data.get('username')})
def post(self, request: Request):
login_request = LoginSerializer(data=request.data)
# 校验请求参数
@@ -144,29 +207,36 @@ class Register(APIView):
@action(methods=['POST'], detail=False)
@permission_classes((AllowAny,))
- @swagger_auto_schema(operation_summary="用户注册",
- operation_id="用户注册",
+ @swagger_auto_schema(operation_summary=_("User registration"),
+ operation_id=_("User registration"),
request_body=RegisterSerializer().get_request_body_api(),
responses=RegisterSerializer().get_response_body_api(),
security=[],
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='User registration',
+ get_operation_object=lambda r, k: {'name': r.data.get('username', None)},
+ get_user=lambda r: {'user_name': r.data.get('username', None)})
def post(self, request: Request):
serializer_obj = RegisterSerializer(data=request.data)
if serializer_obj.is_valid(raise_exception=True):
serializer_obj.save()
- return result.success("注册成功")
+ return result.success(_("Registration successful"))
class RePasswordView(APIView):
@action(methods=['POST'], detail=False)
@permission_classes((AllowAny,))
- @swagger_auto_schema(operation_summary="修改密码",
- operation_id="修改密码",
+ @swagger_auto_schema(operation_summary=_("Change password"),
+ operation_id=_("Change password"),
request_body=RePasswordSerializer().get_request_body_api(),
responses=RePasswordSerializer().get_response_body_api(),
security=[],
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='Change password',
+ get_operation_object=lambda r, k: {'name': r.data.get('email', None)},
+ get_user=lambda r: {'user_name': None, 'email': r.data.get('email', None)},
+ get_details=get_re_password_details)
def post(self, request: Request):
serializer_obj = RePasswordSerializer(data=request.data)
return result.success(serializer_obj.reset_password())
@@ -176,12 +246,15 @@ class CheckCode(APIView):
@action(methods=['POST'], detail=False)
@permission_classes((AllowAny,))
- @swagger_auto_schema(operation_summary="校验验证码是否正确",
- operation_id="校验验证码是否正确",
+ @swagger_auto_schema(operation_summary=_("Check whether the verification code is correct"),
+ operation_id=_("Check whether the verification code is correct"),
request_body=CheckCodeSerializer().get_request_body_api(),
responses=CheckCodeSerializer().get_response_body_api(),
security=[],
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='Check whether the verification code is correct',
+ get_operation_object=lambda r, k: {'name': r.data.get('email', None)},
+ get_user=lambda r: {'user_name': None, 'email': r.data.get('email', None)})
def post(self, request: Request):
return result.success(CheckCodeSerializer(data=request.data).is_valid(raise_exception=True))
@@ -189,12 +262,15 @@ def post(self, request: Request):
class SendEmail(APIView):
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="发送邮件",
- operation_id="发送邮件",
+ @swagger_auto_schema(operation_summary=_("Send email"),
+ operation_id=_("Send email"),
request_body=SendEmailSerializer().get_request_body_api(),
responses=SendEmailSerializer().get_response_body_api(),
security=[],
- tags=['用户'])
+ tags=[_("User management")])
+ @log(menu='User management', operate='Send email',
+ get_operation_object=lambda r, k: {'name': r.data.get('email', None)},
+ get_user=lambda r: {'user_name': None, 'email': r.data.get('email', None)})
def post(self, request: Request):
serializer_obj = SendEmailSerializer(data=request.data)
if serializer_obj.is_valid(raise_exception=True):
@@ -205,16 +281,18 @@ class UserManage(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
- @swagger_auto_schema(operation_summary="添加用户",
- operation_id="添加用户",
+ @swagger_auto_schema(operation_summary=_("Add user"),
+ operation_id=_("Add user"),
request_body=UserManageSerializer.UserInstance.get_request_body_api(),
responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()),
- tags=["用户管理"]
+ tags=[_("User management")]
)
@has_permissions(ViewPermission(
[RoleConstants.ADMIN],
[PermissionConstants.USER_READ],
compare=CompareConstants.AND))
+ @log(menu='User management', operate='Add user',
+ get_operation_object=lambda r, k: {'name': r.data.get('username', None)})
def post(self, request: Request):
return result.success(UserManageSerializer().save(request.data))
@@ -222,9 +300,9 @@ class Page(APIView):
authentication_classes = [TokenAuth]
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取用户分页列表",
- operation_id="获取用户分页列表",
- tags=["用户管理"],
+ @swagger_auto_schema(operation_summary=_("Get user paginated list"),
+ operation_id=_("Get user paginated list"),
+ tags=[_("User management")],
manual_parameters=UserManageSerializer.Query.get_request_params_api(),
responses=result.get_page_api_response(UserInstanceSerializer.get_response_body_api()),
)
@@ -242,16 +320,19 @@ class RePassword(APIView):
authentication_classes = [TokenAuth]
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改密码",
- operation_id="修改密码",
+ @swagger_auto_schema(operation_summary=_("Change password"),
+ operation_id=_("Change password"),
manual_parameters=UserInstanceSerializer.get_request_params_api(),
request_body=UserManageSerializer.RePasswordInstance.get_request_body_api(),
responses=result.get_default_response(),
- tags=["用户管理"])
+ tags=[_("User management")])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN],
[PermissionConstants.USER_READ],
compare=CompareConstants.AND))
+ @log(menu='User management', operate='Change password',
+ get_operation_object=lambda r, k: get_user_operation_object(k.get('user_id')),
+ get_details=get_re_password_details)
def put(self, request: Request, user_id):
return result.success(
UserManageSerializer.Operate(data={'id': user_id}).re_password(request.data, with_valid=True))
@@ -260,24 +341,26 @@ class Operate(APIView):
authentication_classes = [TokenAuth]
@action(methods=['DELETE'], detail=False)
- @swagger_auto_schema(operation_summary="删除用户",
- operation_id="删除用户",
+ @swagger_auto_schema(operation_summary=_("Delete user"),
+ operation_id=_("Delete user"),
manual_parameters=UserInstanceSerializer.get_request_params_api(),
responses=result.get_default_response(),
- tags=["用户管理"])
+ tags=[_("User management")])
@has_permissions(ViewPermission(
[RoleConstants.ADMIN],
[PermissionConstants.USER_READ],
compare=CompareConstants.AND))
+ @log(menu='User management', operate='Delete user',
+ get_operation_object=lambda r, k: get_user_operation_object(k.get('user_id')))
def delete(self, request: Request, user_id):
return result.success(UserManageSerializer.Operate(data={'id': user_id}).delete(with_valid=True))
@action(methods=['GET'], detail=False)
- @swagger_auto_schema(operation_summary="获取用户信息",
- operation_id="获取用户信息",
+ @swagger_auto_schema(operation_summary=_("Get user information"),
+ operation_id=_("Get user information"),
manual_parameters=UserInstanceSerializer.get_request_params_api(),
responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()),
- tags=["用户管理"]
+ tags=[_("User management")]
)
@has_permissions(ViewPermission(
[RoleConstants.ADMIN],
@@ -287,17 +370,32 @@ def get(self, request: Request, user_id):
return result.success(UserManageSerializer.Operate(data={'id': user_id}).one(with_valid=True))
@action(methods=['PUT'], detail=False)
- @swagger_auto_schema(operation_summary="修改用户信息",
- operation_id="修改用户信息",
+ @swagger_auto_schema(operation_summary=_("Update user information"),
+ operation_id=_("Update user information"),
manual_parameters=UserInstanceSerializer.get_request_params_api(),
request_body=UserManageSerializer.UserEditInstance.get_request_body_api(),
responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()),
- tags=["用户管理"]
+ tags=[_("User management")]
)
@has_permissions(ViewPermission(
[RoleConstants.ADMIN],
[PermissionConstants.USER_READ],
compare=CompareConstants.AND))
+ @log(menu='User management', operate='Update user information',
+ get_operation_object=lambda r, k: get_user_operation_object(k.get('user_id')))
def put(self, request: Request, user_id):
return result.success(
UserManageSerializer.Operate(data={'id': user_id}).edit(request.data, with_valid=True))
+
+
+class UserListView(APIView):
+ authentication_classes = [TokenAuth]
+
+ @swagger_auto_schema(operation_summary=_("Get user list by type"),
+ operation_id=_("Get user list by type"),
+ manual_parameters=UserSerializer.Query.get_request_params_api(),
+ responses=result.get_api_array_response(UserSerializer.Query.get_response_body_api()),
+ tags=[_("User management")])
+ @has_permissions(PermissionConstants.USER_READ)
+ def get(self, request: Request, type):
+ return result.success(UserSerializer().listByType(type, request.user.id))
diff --git a/config_example.yml b/config_example.yml
index b4310642751..824de3aa44b 100644
--- a/config_example.yml
+++ b/config_example.yml
@@ -1,12 +1,3 @@
-# 邮箱配置
-EMAIL_ADDRESS:
-EMAIL_USE_TLS: False
-EMAIL_USE_SSL: True
-EMAIL_HOST: smtp.qq.com
-EMAIL_PORT: 465
-EMAIL_HOST_USER:
-EMAIL_HOST_PASSWORD:
-
# 数据库链接信息
DB_NAME: maxkb
DB_HOST: localhost
@@ -18,3 +9,4 @@ DB_ENGINE: django.db.backends.postgresql_psycopg2
DEBUG: false
TIME_ZONE: Asia/Shanghai
+
diff --git a/installer/Dockerfile b/installer/Dockerfile
index 6462a8f5d53..796e8535ef3 100644
--- a/installer/Dockerfile
+++ b/installer/Dockerfile
@@ -1,11 +1,11 @@
-FROM ghcr.io/1panel-dev/maxkb-vector-model:v1.0.1 as vector-model
-FROM node:18-alpine3.18 as web-build
+FROM ghcr.io/1panel-dev/maxkb-vector-model:v1.0.1 AS vector-model
+FROM node:18-alpine3.18 AS web-build
COPY ui ui
RUN cd ui && \
npm install && \
npm run build && \
rm -rf ./node_modules
-FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.6 as stage-build
+FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.8 AS stage-build
ARG DEPENDENCIES=" \
python3-pip"
@@ -17,40 +17,63 @@ RUN apt-get update && \
COPY . /opt/maxkb/app
RUN mkdir -p /opt/maxkb/app /opt/maxkb/model /opt/maxkb/conf && \
- cp -f /opt/maxkb/app/installer/config.yaml /opt/maxkb/conf && \
rm -rf /opt/maxkb/app/ui
+
COPY --from=web-build ui /opt/maxkb/app/ui
WORKDIR /opt/maxkb/app
RUN python3 -m venv /opt/py3 && \
- pip install poetry --break-system-packages && \
+ pip install poetry==1.8.5 --break-system-packages && \
poetry config virtualenvs.create false && \
. /opt/py3/bin/activate && \
- if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "^2.2.1+cpu", source = "pytorch"}/g' pyproject.toml; fi && \
- poetry install
+ if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "2.6.0+cpu", source = "pytorch"}/g' pyproject.toml; fi && \
+ poetry install && \
+ export MAXKB_CONFIG_TYPE=ENV && python3 /opt/maxkb/app/apps/manage.py compilemessages
-FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.6
+FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.8
ARG DOCKER_IMAGE_TAG=dev \
BUILD_AT \
GITHUB_COMMIT
-ENV MAXKB_VERSION ${DOCKER_IMAGE_TAG} (build at ${BUILD_AT}, commit: ${GITHUB_COMMIT})
+ENV MAXKB_VERSION="${DOCKER_IMAGE_TAG} (build at ${BUILD_AT}, commit: ${GITHUB_COMMIT})" \
+ MAXKB_CONFIG_TYPE=ENV \
+ MAXKB_DB_NAME=maxkb \
+ MAXKB_DB_HOST=127.0.0.1 \
+ MAXKB_DB_PORT=5432 \
+ MAXKB_DB_USER=root \
+ MAXKB_DB_PASSWORD=Password123@postgres \
+ MAXKB_DB_MAX_OVERFLOW=80 \
+ MAXKB_EMBEDDING_MODEL_NAME=/opt/maxkb/model/embedding/shibing624_text2vec-base-chinese \
+ MAXKB_EMBEDDING_MODEL_PATH=/opt/maxkb/model/embedding \
+ MAXKB_SANDBOX=1 \
+ LANG=en_US.UTF-8 \
+ PATH=/opt/py3/bin:$PATH \
+ POSTGRES_USER=root \
+ POSTGRES_PASSWORD=Password123@postgres \
+ POSTGRES_MAX_CONNECTIONS=1000 \
+ PIP_TARGET=/opt/maxkb/app/sandbox/python-packages \
+ PYTHONPATH=/opt/maxkb/app/sandbox/python-packages \
+ PYTHONUNBUFFERED=1
+
WORKDIR /opt/maxkb/app
COPY --from=stage-build /opt/maxkb /opt/maxkb
COPY --from=stage-build /opt/py3 /opt/py3
COPY --from=vector-model /opt/maxkb/app/model /opt/maxkb/model
-ENV LANG=en_US.UTF-8 \
- PATH=/opt/py3/bin:$PATH
-
-ENV POSTGRES_USER root
-ENV POSTGRES_PASSWORD Password123@postgres
-
RUN chmod 755 /opt/maxkb/app/installer/run-maxkb.sh && \
cp -r /opt/maxkb/model/base/hub /opt/maxkb/model/tokenizer && \
cp -f /opt/maxkb/app/installer/run-maxkb.sh /usr/bin/run-maxkb.sh && \
- cp -f /opt/maxkb/app/installer/init.sql /docker-entrypoint-initdb.d
+ cp -f /opt/maxkb/app/installer/init.sql /docker-entrypoint-initdb.d && \
+ curl -L --connect-timeout 120 -m 1800 https://resource.fit2cloud.com/maxkb/ffmpeg/get-ffmpeg-linux | sh && \
+ mkdir -p /opt/maxkb/app/sandbox/python-packages && \
+ find /opt/maxkb/app -mindepth 1 -not -name 'sandbox' -exec chmod 700 {} + && \
+ chmod 755 /tmp && \
+ useradd --no-create-home --home /opt/maxkb/app/sandbox sandbox -g root && \
+ chown -R sandbox:root /opt/maxkb/app/sandbox && \
+ chmod g-x /usr/local/bin/* /usr/bin/* /bin/* /usr/sbin/* /sbin/* /usr/lib/postgresql/15/bin/* && \
+ chmod g+x /usr/local/bin/python* && \
+ find /etc/ -type f ! -path '/etc/resolv.conf' ! -path '/etc/hosts' | xargs chmod g-rx
EXPOSE 8080
ENTRYPOINT ["bash", "-c"]
-CMD [ "/usr/bin/run-maxkb.sh" ]
\ No newline at end of file
+CMD [ "/usr/bin/run-maxkb.sh" ]
diff --git a/installer/Dockerfile-python-pg b/installer/Dockerfile-python-pg
index eb250146881..f871ac4ef4f 100644
--- a/installer/Dockerfile-python-pg
+++ b/installer/Dockerfile-python-pg
@@ -1,14 +1,19 @@
-FROM postgres:15.6-bookworm
+FROM python:3.11-slim-bullseye AS python-stage
+FROM postgres:15.8-bullseye
ARG DEPENDENCIES=" \
+ libexpat1-dev \
+ libffi-dev \
curl \
+ ca-certificates \
vim \
- python3.11-mini \
- python3.11-venv \
+ gettext \
postgresql-15-pgvector"
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
echo "Asia/Shanghai" > /etc/timezone && \
apt-get update && apt-get install -y --no-install-recommends $DEPENDENCIES && \
apt-get clean all && \
- rm -rf /var/lib/apt/lists/*
\ No newline at end of file
+ rm -rf /var/lib/apt/lists/*
+
+COPY --from=python-stage /usr/local /usr/local
\ No newline at end of file
diff --git a/installer/Dockerfile-vector-model b/installer/Dockerfile-vector-model
index b44cadc454c..a7326614bc8 100644
--- a/installer/Dockerfile-vector-model
+++ b/installer/Dockerfile-vector-model
@@ -1,4 +1,4 @@
-FROM python:3.11-slim-bookworm as vector-model
+FROM python:3.11-slim-bookworm AS vector-model
COPY installer/install_model.py install_model.py
RUN pip3 install --upgrade pip setuptools && \
diff --git a/installer/config.yaml b/installer/config.yaml
index c9f45db869f..8127fc9ab67 100644
--- a/installer/config.yaml
+++ b/installer/config.yaml
@@ -13,7 +13,7 @@ DB_HOST: 127.0.0.1
DB_PORT: 5432
DB_USER: root
DB_PASSWORD: Password123@postgres
-DB_ENGINE: django.db.backends.postgresql_psycopg2
+DB_ENGINE: dj_db_conn_pool.backends.postgresql
EMBEDDING_MODEL_PATH: /opt/maxkb/model/embedding
EMBEDDING_MODEL_NAME: /opt/maxkb/model/embedding/shibing624_text2vec-base-chinese
diff --git a/installer/run-maxkb.sh b/installer/run-maxkb.sh
index 597da7f025c..238875e06b1 100644
--- a/installer/run-maxkb.sh
+++ b/installer/run-maxkb.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-
+rm -f /opt/maxkb/app/tmp/*.pid
# Start postgresql
-docker-entrypoint.sh postgres &
+docker-entrypoint.sh postgres -c max_connections=${POSTGRES_MAX_CONNECTIONS} &
sleep 10
# Wait postgresql
until pg_isready --host=127.0.0.1; do sleep 1 && echo "waiting for postgres"; done
diff --git a/installer/start-maxkb.sh b/installer/start-maxkb.sh
new file mode 100644
index 00000000000..4e88eff52b6
--- /dev/null
+++ b/installer/start-maxkb.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+rm -f /opt/maxkb/app/tmp/*.pid
+python /opt/maxkb/app/main.py start
\ No newline at end of file
diff --git a/main.py b/main.py
index dbe48e7e32d..5c6413ee4a4 100644
--- a/main.py
+++ b/main.py
@@ -2,6 +2,7 @@
import logging
import os
import sys
+import time
import django
from django.core import management
@@ -43,8 +44,40 @@ def perform_db_migrate():
def start_services():
- management.call_command('migrate')
- management.call_command('runserver', "0.0.0.0:8080")
+ services = args.services if isinstance(args.services, list) else [args.services]
+ start_args = []
+ if args.daemon:
+ start_args.append('--daemon')
+ if args.force:
+ start_args.append('--force')
+ if args.worker:
+ start_args.extend(['--worker', str(args.worker)])
+ else:
+ worker = os.environ.get('CORE_WORKER')
+ if isinstance(worker, str) and worker.isdigit():
+ start_args.extend(['--worker', worker])
+
+ try:
+ management.call_command(action, *services, *start_args)
+ except KeyboardInterrupt:
+ logging.info('Cancel ...')
+ time.sleep(2)
+ except Exception as exc:
+ logging.error("Start service error {}: {}".format(services, exc))
+ time.sleep(2)
+
+
+def dev():
+ services = args.services if isinstance(args.services, list) else args.services
+ if services.__contains__('web'):
+ management.call_command('runserver', "0.0.0.0:8080")
+ elif services.__contains__('celery'):
+ management.call_command('celery', 'celery')
+ elif services.__contains__('local_model'):
+ os.environ.setdefault('SERVER_NAME', 'local_model')
+ from smartdoc.const import CONFIG
+ bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
+ management.call_command('runserver', bind)
if __name__ == '__main__':
@@ -60,16 +93,31 @@ def start_services():
)
parser.add_argument(
'action', type=str,
- choices=("start", "upgrade_db", "collect_static"),
+ choices=("start", "dev", "upgrade_db", "collect_static"),
help="Action to run"
)
- args = parser.parse_args()
+ args, e = parser.parse_known_args()
+ parser.add_argument(
+ "services", type=str, default='all' if args.action == 'start' else 'web', nargs="*",
+ choices=("all", "web", "task") if args.action == 'start' else ("web", "celery", 'local_model'),
+ help="The service to start",
+ )
+ parser.add_argument('-d', '--daemon', nargs="?", const=True)
+ parser.add_argument('-w', '--worker', type=int, nargs="?")
+ parser.add_argument('-f', '--force', nargs="?", const=True)
+ args = parser.parse_args()
action = args.action
if action == "upgrade_db":
perform_db_migrate()
elif action == "collect_static":
collect_static()
+ elif action == 'dev':
+ collect_static()
+ perform_db_migrate()
+ dev()
else:
collect_static()
+ perform_db_migrate()
start_services()
+
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 00000000000..b9a9b8c63c4
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,7 @@
+{
+ "name": "MaxKB",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {}
+}
+
diff --git a/pyproject.toml b/pyproject.toml
index 3eddbace0c8..ea87b807d8a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,44 +1,74 @@
[tool.poetry]
name = "maxkb"
version = "0.1.0"
-description = "智能知识库"
+description = "智能知识库问答系统"
authors = ["shaohuzhang1 "]
readme = "README.md"
+package-mode = false
[tool.poetry.dependencies]
-python = "^3.11"
-django = "4.1.13"
-djangorestframework = "3.14.0"
+python = ">=3.11,<3.12"
+django = "4.2.20"
+djangorestframework = "3.16.0"
drf-yasg = "1.21.7"
django-filter = "23.2"
-langchain = "^0.1.11"
-psycopg2-binary = "2.9.7"
-jieba = "^0.42.1"
-diskcache = "^5.6.3"
-pillow = "^10.2.0"
-filetype = "^1.2.0"
-torch = "^2.2.1"
-sentence-transformers = "^2.2.2"
-blinker = "^1.6.3"
-openai = "^1.13.3"
-tiktoken = "^0.5.1"
-qianfan = "^0.3.6.1"
-pycryptodome = "^3.19.0"
-beautifulsoup4 = "^4.12.2"
-html2text = "^2024.2.26"
-langchain-openai = "^0.0.8"
-django-ipware = "^6.0.4"
-django-apscheduler = "^0.6.2"
-pymupdf = "1.24.1"
-python-docx = "^1.1.0"
-xlwt = "^1.3.0"
-dashscope = "^1.17.0"
-zhipuai = "^2.0.1"
-httpx = "^0.27.0"
-httpx-sse = "^0.4.0"
-websocket-client = "^1.7.0"
-langchain-google-genai = "^1.0.3"
-
+langchain = "0.3.23"
+langchain-openai = "0.3.12"
+langchain-anthropic = "0.3.12"
+langchain-community = "0.3.21"
+langchain-deepseek = "0.1.3"
+langchain-google-genai = "2.1.2"
+langchain-mcp-adapters = "0.0.11"
+langchain-huggingface = "0.1.2"
+langchain-ollama = "0.3.2"
+langgraph = "0.3.27"
+mcp = "1.8.0"
+psycopg2-binary = "2.9.10"
+jieba = "0.42.1"
+diskcache = "5.6.3"
+pillow = "10.4.0"
+filetype = "1.2.0"
+torch = "2.6.0"
+sentence-transformers = "4.0.2"
+openai = "1.72.0"
+tiktoken = "0.7.0"
+qianfan = "0.3.18"
+pycryptodome = "3.22.0"
+beautifulsoup4 = "4.13.3"
+html2text = "2024.2.26"
+django-ipware = "6.0.5"
+django-apscheduler = "0.6.2"
+pymupdf = "1.24.9"
+pypdf = "4.3.1"
+rapidocr-onnxruntime = "1.3.24"
+python-docx = "1.1.2"
+xlwt = "1.3.0"
+dashscope = "1.23.1"
+zhipuai = "2.1.5.20250410"
+httpx = "0.27.2"
+httpx-sse = "0.4.0"
+websockets = "13.1"
+openpyxl = "3.1.5"
+xlrd = "2.0.1"
+gunicorn = "23.0.0"
+python-daemon = "3.0.1"
+boto3 = "1.37.31"
+tencentcloud-sdk-python = "3.0.1357"
+xinference-client = "1.4.1"
+psutil = "6.1.1"
+celery = { extras = ["sqlalchemy"], version = "5.5.1" }
+django-celery-beat = "2.7.0"
+celery-once = "3.0.1"
+anthropic = "0.49.0"
+pylint = "3.3.6"
+pydub = "0.25.1"
+cffi = "1.17.1"
+pysilk = "0.0.1"
+django-db-connection-pool = "1.2.5"
+opencv-python-headless = "4.11.0.86"
+pymysql = "1.1.1"
+accelerate = "1.6.0"
+captcha = "0.7.1"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
diff --git a/ui/env.d.ts b/ui/env.d.ts
index 52f54527078..08bb5e826b0 100644
--- a/ui/env.d.ts
+++ b/ui/env.d.ts
@@ -1,5 +1,7 @@
///
declare module 'element-plus/dist/locale/zh-cn.mjs'
+declare module 'element-plus/dist/locale/en.mjs'
+declare module 'element-plus/dist/locale/zh-tw.mjs'
declare module 'markdown-it-task-lists'
declare module 'markdown-it-abbr'
declare module 'markdown-it-anchor'
@@ -8,7 +10,10 @@ declare module 'markdown-it-sub'
declare module 'markdown-it-sup'
declare module 'markdown-it-toc-done-right'
declare module 'katex'
+interface Window {
+ sendMessage: ?((message: string, other_params_data: any) => void)
+}
interface ImportMeta {
readonly env: ImportMetaEnv
}
-declare type Recordable = Record;
+declare type Recordable = Record
diff --git a/ui/index.html b/ui/index.html
index 1f9ca13a8b2..09bec9ae48c 100644
--- a/ui/index.html
+++ b/ui/index.html
@@ -3,8 +3,12 @@
-
-
+
+
%VITE_APP_TITLE%
diff --git a/ui/package.json b/ui/package.json
index 229b9d3a701..cee7a41c8fd 100644
--- a/ui/package.json
+++ b/ui/package.json
@@ -4,7 +4,7 @@
"private": true,
"scripts": {
"dev": "vite",
- "build": "run-p type-check build-only",
+ "build": "set NODE_OPTIONS=--max_old_space_size=4096 && run-p type-check build-only",
"preview": "vite preview",
"test:unit": "vitest",
"build-only": "vite build",
@@ -13,26 +13,28 @@
"format": "prettier --write src/"
},
"dependencies": {
+ "@antv/layout": "^0.3.1",
+ "@codemirror/theme-one-dark": "^6.1.2",
"@ctrl/tinycolor": "^4.1.0",
+ "@logicflow/core": "^1.2.27",
+ "@logicflow/extension": "^1.2.27",
+ "@types/sortablejs": "^1.15.8",
+ "@vavt/cm-extension": "^1.6.0",
"@vueuse/core": "^10.9.0",
- "axios": "^0.28.0",
+ "@wecom/jssdk": "^2.1.0",
+ "axios": "^1.8.3",
+ "codemirror": "^6.0.1",
"cropperjs": "^1.6.2",
+ "dingtalk-jsapi": "^2.15.6",
"echarts": "^5.5.0",
- "element-plus": "^2.5.6",
+ "element-plus": "^2.9.1",
+ "file-saver": "^2.0.5",
+ "highlight.js": "^11.9.0",
"install": "^0.13.0",
"katex": "^0.16.10",
"lodash": "^4.17.21",
- "markdown-it": "^13.0.2",
- "markdown-it-abbr": "^1.0.4",
- "markdown-it-anchor": "^8.6.7",
- "markdown-it-footnote": "^3.0.3",
- "markdown-it-highlightjs": "^4.0.1",
- "markdown-it-sub": "^1.0.0",
- "markdown-it-sup": "^1.0.0",
- "markdown-it-task-lists": "^2.1.1",
- "markdown-it-toc-done-right": "^4.2.0",
- "md-editor-v3": "4.12.1",
- "medium-zoom": "^1.1.0",
+ "marked": "^12.0.2",
+ "md-editor-v3": "^4.16.7",
"mermaid": "^10.9.0",
"mitt": "^3.0.0",
"moment": "^2.30.1",
@@ -40,18 +42,25 @@
"nprogress": "^0.2.0",
"pinia": "^2.1.6",
"pinyin-pro": "^3.18.2",
+ "recorder-core": "^1.3.24040900",
"screenfull": "^6.0.2",
+ "sortablejs": "^1.15.6",
+ "use-element-plus-theme": "^0.0.5",
"vue": "^3.3.4",
"vue-clipboard3": "^2.0.0",
+ "vue-codemirror": "^6.1.1",
+ "vue-demi": "latest",
+ "vue-draggable-plus": "^0.6.0",
"vue-i18n": "^9.13.1",
- "vue-router": "^4.2.4"
+ "vue-router": "^4.2.4",
+ "vue3-menus": "^1.1.2",
+ "vuedraggable": "^4.1.0"
},
"devDependencies": {
"@rushstack/eslint-patch": "^1.3.2",
"@tsconfig/node18": "^18.2.0",
+ "@types/file-saver": "^2.0.7",
"@types/jsdom": "^21.1.1",
- "@types/markdown-it": "^13.0.7",
- "@types/markdown-it-highlightjs": "^3.3.4",
"@types/node": "^18.17.5",
"@types/nprogress": "^0.2.0",
"@vitejs/plugin-vue": "^4.3.1",
@@ -64,7 +73,7 @@
"jsdom": "^22.1.0",
"npm-run-all": "^4.1.5",
"prettier": "^3.0.0",
- "sass": "^1.66.1",
+ "sass": "1.66.1",
"typescript": "~5.1.6",
"unplugin-vue-define-options": "^1.3.18",
"vite": "^4.4.9",
diff --git a/ui/public/MaxKB.gif b/ui/public/MaxKB.gif
new file mode 100644
index 00000000000..055d49a6a11
Binary files /dev/null and b/ui/public/MaxKB.gif differ
diff --git a/ui/public/embeb.js b/ui/public/embeb.js
deleted file mode 100644
index 23517cf59d4..00000000000
--- a/ui/public/embeb.js
+++ /dev/null
@@ -1,307 +0,0 @@
-function auth(token, protocol, host) {
- const XML = new XMLHttpRequest()
- XML.open('POST', `${protocol}//${host}/api/application/authentication`, false)
- XML.setRequestHeader('Content-Type', 'application/json')
- res = XML.send(JSON.stringify({ access_token: token }))
- return XML.status == 200
-}
-
-const guideHtml=`
-
-
-
-
-
🌟 遇见问题,不再有障碍!
-
你好,我是你的智能小助手。
- 点我,开启高效解答模式,让问题变成过去式。
-
- 我知道了
-
-
-
-`
-const chatButtonHtml=
-``
-
-
-
-const getChatContainerHtml=(protocol,host,token)=>{
- return `
-
-
-
-
`
-}
-/**
- * 初始化引导
- * @param {*} root
- */
-const initGuide=(root)=>{
- root.insertAdjacentHTML("beforeend",guideHtml)
- const button=root.querySelector(".maxkb-button")
- const close_icon=root.querySelector('.maxkb-close')
- const close_func=()=>{
- root.removeChild(root.querySelector('.maxkb-tips'))
- root.removeChild(root.querySelector('.maxkb-mask'))
- localStorage.setItem('maxkbMaskTip',true)
- }
- button.onclick=close_func
- close_icon.onclick=close_func
-}
-const initChat=(root)=>{
- // 添加对话icon
- root.insertAdjacentHTML("beforeend",chatButtonHtml)
- // 添加对话框
- root.insertAdjacentHTML('beforeend',getChatContainerHtml(window.maxkbChatConfig.protocol,window.maxkbChatConfig.host,window.maxkbChatConfig.token))
- // 按钮元素
- const chat_button=root.querySelector('.maxkb-chat-button')
- // 对话框元素
- const chat_container=root.querySelector('#maxkb-chat-container')
-
- const viewport=root.querySelector('.maxkb-openviewport')
- const closeviewport=root.querySelector('.maxkb-closeviewport')
- const close_func=()=>{
- chat_container.style['display']=chat_container.style['display']=='block'?'none':'block'
- }
- close_icon=chat_container.querySelector('.maxkb-close')
- chat_button.onclick = close_func
- close_icon.onclick=close_func
- const viewport_func=()=>{
- if(chat_container.classList.contains('maxkb-enlarge')){
- chat_container.classList.remove("maxkb-enlarge");
- viewport.classList.remove('maxkb-viewportnone')
- closeviewport.classList.add('maxkb-viewportnone')
- }else{
- chat_container.classList.add("maxkb-enlarge");
- viewport.classList.add('maxkb-viewportnone')
- closeviewport.classList.remove('maxkb-viewportnone')
- }
- }
- viewport.onclick=viewport_func
- closeviewport.onclick=viewport_func
-}
-/**
- * 第一次进来的引导提示
- */
-function initMaxkb(){
- const maxkb=document.createElement('div')
- const root=document.createElement('div')
- root.id="maxkb"
- initMaxkbStyle(maxkb)
- maxkb.appendChild(root)
- document.body.appendChild(maxkb)
- const maxkbMaskTip=localStorage.getItem('maxkbMaskTip')
- if(maxkbMaskTip==null){
- initGuide(root)
- }
- initChat(root)
-}
-
-
-// 初始化全局样式
-function initMaxkbStyle(root){
- style=document.createElement('style')
- style.type='text/css'
- style.innerText= `
- /* 放大 */
- #maxkb .maxkb-enlarge {
- width: 50%!important;
- height: 100%!important;
- bottom: 0!important;
- right: 0 !important;
- }
- @media only screen and (max-width: 768px){
- #maxkb .maxkb-enlarge {
- width: 100%!important;
- height: 100%!important;
- right: 0 !important;
- bottom: 0!important;
- }
- }
-
- /* 引导 */
-
- #maxkb .maxkb-mask {
- position: fixed;
- z-index: 999;
- background-color: transparent;
- height: 100%;
- width: 100%;
- top: 0;
- left: 0;
- }
- #maxkb .maxkb-mask .maxkb-content {
- width: 45px;
- height: 50px;
- box-shadow: 1px 1px 1px 2000px rgba(0,0,0,.6);
- border-radius: 50% 0 0 50%;
- position: absolute;
- right: 0;
- bottom: 42px;
- z-index: 1000;
- }
- #maxkb .maxkb-tips {
- position: fixed;
- bottom: 30px;
- right: 60px;
- padding: 22px 24px 24px;
- border-radius: 6px;
- color: #ffffff;
- font-size: 14px;
- background: #3370FF;
- z-index: 1000;
- }
- #maxkb .maxkb-tips .maxkb-arrow {
- position: absolute;
- background: #3370FF;
- width: 10px;
- height: 10px;
- pointer-events: none;
- transform: rotate(45deg);
- box-sizing: border-box;
- /* left */
- right: -5px;
- bottom: 33px;
- border-left-color: transparent;
- border-bottom-color: transparent
- }
- #maxkb .maxkb-tips .maxkb-title {
- font-size: 20px;
- font-weight: 500;
- margin-bottom: 8px;
- }
- #maxkb .maxkb-tips .maxkb-button {
- text-align: right;
- margin-top: 24px;
- }
- #maxkb .maxkb-tips .maxkb-button button {
- border-radius: 4px;
- background: #FFF;
- padding: 3px 12px;
- color: #3370FF;
- cursor: pointer;
- outline: none;
- border: none;
- }
- #maxkb .maxkb-tips .maxkb-button button::after{
- border: none;
- }
- #maxkb .maxkb-tips .maxkb-close {
- position: absolute;
- right: 20px;
- top: 20px;
- cursor: pointer;
-
- }
- #maxkb-chat-container {
- width: 420px;
- height: 600px;
- display:none;
- }
- @media only screen and (max-width: 768px) {
- #maxkb-chat-container {
- width: 100%;
- height: 70%;
- right: 0 !important;
- }
- }
-
- #maxkb .maxkb-chat-button{
- position: fixed;
- bottom: 30px;
- right: 0;
- cursor: pointer;
- }
- #maxkb #maxkb-chat-container{
- z-index:10000;position: relative;
- border-radius: 8px;
- border: 1px solid var(--N300, #DEE0E3);
- background: linear-gradient(188deg, rgba(235, 241, 255, 0.20) 39.6%, rgba(231, 249, 255, 0.20) 94.3%), #EFF0F1;
- box-shadow: 0px 4px 8px 0px rgba(31, 35, 41, 0.10);
- position: fixed;bottom: 20px;right: 45px;overflow: hidden;
- }
- #maxkb #maxkb-chat-container .maxkb-chat-close{
- position: absolute;
- top: 15px;
- right: 10px;
- cursor: pointer;
- }
- #maxkb #maxkb-chat-container .maxkb-openviewport{
- position: absolute;
- top: 15px;
- right: 50px;
- cursor: pointer;
- }
- #maxkb #maxkb-chat-container .maxkb-closeviewport{
- position: absolute;
- top: 15px;
- right: 50px;
- cursor: pointer;
- }
- #maxkb #maxkb-chat-container .maxkb-viewportnone{
- display:none;
- }
- #maxkb #maxkb-chat-container #maxkb-chat{
- height:100%;
- width:100%;
- border: none;
-}
- #maxkb #maxkb-chat-container {
- animation: appear .4s ease-in-out;
- }
- @keyframes appear {
- from {
- height: 0;;
- }
-
- to {
- height: 600px;
- }
- }`
- root.appendChild(style)
-}
-
-function embedChatbot() {
- const t = window.maxkbChatConfig
- check = auth(t.token, t.protocol, t.host)
- if (t && t.token && t.protocol && t.host && check) {
- // 初始化maxkb智能小助手
- initMaxkb()
- } else console.error('invalid parameter')
-}
-window.onload = embedChatbot
diff --git a/ui/public/fx/bochaai/detail.md b/ui/public/fx/bochaai/detail.md
new file mode 100644
index 00000000000..35481d2658c
--- /dev/null
+++ b/ui/public/fx/bochaai/detail.md
@@ -0,0 +1,16 @@
+## 概述
+
+博查工具是一个支持自然语言搜索的 Web Search API,从近百亿网页和生态内容源中搜索高质量世界知识,包括新闻、图片、视频、百科、机酒、学术等。
+
+
+## 配置
+
+1. 获取API Key
+在[博查开放平台](https://open.bochaai.com/overview) 上申请 API 密钥。
+
+2. 在函数库中配置
+在函数库的博查函数面板中,点击 … > 启用参数,填写 API 密钥,并启用该函数。
+
+3. 在应用中使用
+在高级编排应用中,点击添加组件->函数库->博查,设置使用参数。
+
diff --git a/ui/public/fx/bochaai/icon.png b/ui/public/fx/bochaai/icon.png
new file mode 100644
index 00000000000..530a086ed20
Binary files /dev/null and b/ui/public/fx/bochaai/icon.png differ
diff --git a/ui/public/fx/google_search/detail.md b/ui/public/fx/google_search/detail.md
new file mode 100644
index 00000000000..ab68b6ab588
--- /dev/null
+++ b/ui/public/fx/google_search/detail.md
@@ -0,0 +1,21 @@
+## 概述
+
+Google 搜索工具是一个实时 API,可提取搜索引擎结果,提供来自 Google 的结构化数据。它支持各种搜索类型,包括 Web、图像、新闻和地图。
+
+## 配置
+
+1. 创建 Google Custom Search Engine
+在[Programmable Search Engine](https://programmablesearchengine.google.com/)中 添加 Search Engine
+
+2. 获取cx参数
+进入添加的引擎详情中,在【基本】菜单中获取搜索引擎的ID,即cx。
+
+3. 获取 API Key
+打开 https://developers.google.com/custom-search/v1/overview?hl=zh-cn 获取API Key。
+
+4. 配置启动参数
+在Google 搜索函数的启动参数中填写配置以上参数,并启用该函数。
+
+5. 在应用中使用
+在高级编排应用中,点击添加组件->函数库->Google搜索,设置使用参数。
+
diff --git a/ui/public/fx/google_search/icon.png b/ui/public/fx/google_search/icon.png
new file mode 100644
index 00000000000..7b903159b0c
Binary files /dev/null and b/ui/public/fx/google_search/icon.png differ
diff --git a/ui/public/fx/img/MySQL_app_used.jpg b/ui/public/fx/img/MySQL_app_used.jpg
new file mode 100644
index 00000000000..42db47f9c4c
Binary files /dev/null and b/ui/public/fx/img/MySQL_app_used.jpg differ
diff --git a/ui/public/fx/img/MySQL_setting.jpg b/ui/public/fx/img/MySQL_setting.jpg
new file mode 100644
index 00000000000..206c35a8b23
Binary files /dev/null and b/ui/public/fx/img/MySQL_setting.jpg differ
diff --git a/ui/public/fx/img/PostgreSQL_app_used.jpg b/ui/public/fx/img/PostgreSQL_app_used.jpg
new file mode 100644
index 00000000000..7fee014da03
Binary files /dev/null and b/ui/public/fx/img/PostgreSQL_app_used.jpg differ
diff --git a/ui/public/fx/img/PostgreSQL_setting.jpg b/ui/public/fx/img/PostgreSQL_setting.jpg
new file mode 100644
index 00000000000..e279a26c907
Binary files /dev/null and b/ui/public/fx/img/PostgreSQL_setting.jpg differ
diff --git a/ui/public/fx/img/bocha_APIKey.jpg b/ui/public/fx/img/bocha_APIKey.jpg
new file mode 100644
index 00000000000..998aa06f0f3
Binary files /dev/null and b/ui/public/fx/img/bocha_APIKey.jpg differ
diff --git a/ui/public/fx/img/bocha_app_used.jpg b/ui/public/fx/img/bocha_app_used.jpg
new file mode 100644
index 00000000000..71fece501d0
Binary files /dev/null and b/ui/public/fx/img/bocha_app_used.jpg differ
diff --git a/ui/public/fx/img/bocha_setting.jpg b/ui/public/fx/img/bocha_setting.jpg
new file mode 100644
index 00000000000..86daddd31d3
Binary files /dev/null and b/ui/public/fx/img/bocha_setting.jpg differ
diff --git a/ui/public/fx/img/google_APIKey.jpg b/ui/public/fx/img/google_APIKey.jpg
new file mode 100644
index 00000000000..4b6e069e0e6
Binary files /dev/null and b/ui/public/fx/img/google_APIKey.jpg differ
diff --git a/ui/public/fx/img/google_AddSearchEngine.jpg b/ui/public/fx/img/google_AddSearchEngine.jpg
new file mode 100644
index 00000000000..c0182b406d1
Binary files /dev/null and b/ui/public/fx/img/google_AddSearchEngine.jpg differ
diff --git a/ui/public/fx/img/google_app_used.jpg b/ui/public/fx/img/google_app_used.jpg
new file mode 100644
index 00000000000..7b4b492db90
Binary files /dev/null and b/ui/public/fx/img/google_app_used.jpg differ
diff --git a/ui/public/fx/img/google_cx.jpg b/ui/public/fx/img/google_cx.jpg
new file mode 100644
index 00000000000..ce13b5c4a28
Binary files /dev/null and b/ui/public/fx/img/google_cx.jpg differ
diff --git a/ui/public/fx/img/google_setting.jpg b/ui/public/fx/img/google_setting.jpg
new file mode 100644
index 00000000000..501c580f6ba
Binary files /dev/null and b/ui/public/fx/img/google_setting.jpg differ
diff --git a/ui/public/fx/img/langsearch_APIKey.jpg b/ui/public/fx/img/langsearch_APIKey.jpg
new file mode 100644
index 00000000000..0705d54a309
Binary files /dev/null and b/ui/public/fx/img/langsearch_APIKey.jpg differ
diff --git a/ui/public/fx/img/langsearch_app_used.jpg b/ui/public/fx/img/langsearch_app_used.jpg
new file mode 100644
index 00000000000..85db6755f17
Binary files /dev/null and b/ui/public/fx/img/langsearch_app_used.jpg differ
diff --git a/ui/public/fx/img/langsearch_setting.jpg b/ui/public/fx/img/langsearch_setting.jpg
new file mode 100644
index 00000000000..967cd304084
Binary files /dev/null and b/ui/public/fx/img/langsearch_setting.jpg differ
diff --git a/ui/public/fx/langsearch/detail.md b/ui/public/fx/langsearch/detail.md
new file mode 100644
index 00000000000..49f7fa79168
--- /dev/null
+++ b/ui/public/fx/langsearch/detail.md
@@ -0,0 +1,17 @@
+## 概述
+
+LangSearch 是一个提供免费Web Search API和Rerank API的服务,支持新闻、图像、视频等内容。它结合了关键词和向量进行混合搜索,以提高准确性。
+
+
+## 配置
+
+1. 获取API Key
+在[LangSearch](https://langsearch.com/overview) 上申请 API 密钥。
+
+2. 在函数库中配置
+在函数库的LangSearch函数面板中,点击 … > 启用参数,填写 API 密钥,并启用该函数。
+
+3. 在应用中使用
+在高级编排应用中,点击添加组件->函数库->LangSearch,设置使用参数。
+
+
\ No newline at end of file
diff --git a/ui/public/fx/langsearch/icon.png b/ui/public/fx/langsearch/icon.png
new file mode 100644
index 00000000000..72ca125f366
Binary files /dev/null and b/ui/public/fx/langsearch/icon.png differ
diff --git a/ui/public/fx/mysql/detail.md b/ui/public/fx/mysql/detail.md
new file mode 100644
index 00000000000..6900b32ad81
--- /dev/null
+++ b/ui/public/fx/mysql/detail.md
@@ -0,0 +1,14 @@
+## 概述
+
+MySQL查询是一个连接MySQL数据库执行SQL查询的工具。
+
+
+## 配置
+
+1. 在函数库中配置启动参数
+在函数库的MySQL函数面板中,点击 … > 启用参数,填写数据库连接参数,并启用该函数。
+
+2. 在应用中使用
+在高级编排应用中,点击添加组件->函数库->MySQL查询,设置查询内容。
+
+
\ No newline at end of file
diff --git a/ui/public/fx/mysql/icon.png b/ui/public/fx/mysql/icon.png
new file mode 100644
index 00000000000..7367c5e2e02
Binary files /dev/null and b/ui/public/fx/mysql/icon.png differ
diff --git a/ui/public/fx/postgresql/detail.md b/ui/public/fx/postgresql/detail.md
new file mode 100644
index 00000000000..f11a4201131
--- /dev/null
+++ b/ui/public/fx/postgresql/detail.md
@@ -0,0 +1,14 @@
+## 概述
+
+PostgreSQL查询是一个连接PostgreSQL数据库执行SQL查询的工具。
+
+
+## 配置
+
+1. 在函数库中配置启动参数
+在函数库的PostgreSQL函数面板中,点击 … > 启用参数,填写数据库连接参数,并启用该函数。
+
+2. 在应用中使用
+在高级编排应用中,点击添加组件->函数库->PostgreSQL查询,设置查询内容。
+
+
\ No newline at end of file
diff --git a/ui/public/fx/postgresql/icon.png b/ui/public/fx/postgresql/icon.png
new file mode 100644
index 00000000000..7893e74eaf9
Binary files /dev/null and b/ui/public/fx/postgresql/icon.png differ
diff --git a/ui/src/App.vue b/ui/src/App.vue
index d59d59725b4..86643068a13 100644
--- a/ui/src/App.vue
+++ b/ui/src/App.vue
@@ -1,9 +1,7 @@
-
+
-
+
diff --git a/ui/src/api/application-xpack.ts b/ui/src/api/application-xpack.ts
new file mode 100644
index 00000000000..25e973f5717
--- /dev/null
+++ b/ui/src/api/application-xpack.ts
@@ -0,0 +1,41 @@
+import { Result } from '@/request/Result'
+import { get, put } from '@/request/index'
+import { type Ref } from 'vue'
+
+const prefix = '/application'
+
+/**
+ * 替换社区版-获取AccessToken
+ * @param 参数 application_id
+ */
+const getAccessToken: (application_id: string, loading?: Ref
) => Promise> = (
+ application_id,
+ loading
+) => {
+ return get(`${prefix}/${application_id}/setting`, undefined, loading)
+}
+
+/**
+ * 替换社区版-修改AccessToken
+ * @param 参数 application_id
+ * data {
+ * "show_source": boolean,
+ * "show_history": boolean,
+ * "draggable": boolean,
+ * "show_guide": boolean,
+ * "avatar": file,
+ * "float_icon": file,
+ * }
+ */
+const putAccessToken: (
+ application_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, data, loading) => {
+ return put(`${prefix}/${application_id}/setting`, data, undefined, loading)
+}
+
+export default {
+ getAccessToken,
+ putAccessToken
+}
diff --git a/ui/src/api/application.ts b/ui/src/api/application.ts
index c83ff3c8e26..efd4a4985a8 100644
--- a/ui/src/api/application.ts
+++ b/ui/src/api/application.ts
@@ -1,8 +1,9 @@
import { Result } from '@/request/Result'
-import { get, post, postStream, del, put } from '@/request/index'
+import { get, post, postStream, del, put, request, download, exportFile } from '@/request/index'
import type { pageRequest } from '@/api/type/common'
import type { ApplicationFormType } from '@/api/type/application'
import { type Ref } from 'vue'
+import type { FormField } from '@/components/dynamics-form/type'
const prefix = '/application'
@@ -17,12 +18,12 @@ const getAllAppilcation: () => Promise> = () => {
/**
* 获取分页应用
* page {
- "current_page": "string",
- "page_size": "string",
- }
+ "current_page": "string",
+ "page_size": "string",
+ }
* param {
- "name": "string",
- }
+ "name": "string",
+ }
*/
const getApplication: (
page: pageRequest,
@@ -45,8 +46,7 @@ const postApplication: (
/**
* 修改应用
- * @param 参数
-
+ * @param 参数
*/
const putApplication: (
application_id: String,
@@ -117,49 +117,65 @@ const putAccessToken: (
/**
* 应用认证
- * @param 参数
+ * @param 参数
{
- "access_token": "string"
-}
+ "access_token": "string"
+ }
*/
-const postAppAuthentication: (access_token: string, loading?: Ref) => Promise = (
- access_token,
- loading
-) => {
- return post(`${prefix}/authentication`, { access_token }, undefined, loading)
+const postAppAuthentication: (
+ access_token: string,
+ loading?: Ref,
+ authentication_value?: any
+) => Promise = (access_token, loading, authentication_value) => {
+ return post(
+ `${prefix}/authentication`,
+ { access_token: access_token, authentication_value },
+ undefined,
+ loading
+ )
}
/**
* 对话获取应用相关信息
- * @param 参数
+ * @param 参数
{
- "access_token": "string"
-}
+ "access_token": "string"
+ }
*/
-const getProfile: (loading?: Ref) => Promise = (loading) => {
+const getAppProfile: (loading?: Ref) => Promise = (loading) => {
return get(`${prefix}/profile`, undefined, loading)
}
/**
* 获得临时回话Id
- * @param 参数
+ * @param 参数
-}
+ }
*/
const postChatOpen: (data: ApplicationFormType) => Promise> = (data) => {
return post(`${prefix}/chat/open`, data)
}
+/**
+ * 获得工作流临时回话Id
+ * @param 参数
+
+ }
+ */
+const postWorkflowChatOpen: (data: ApplicationFormType) => Promise> = (data) => {
+ return post(`${prefix}/chat_workflow/open`, data)
+}
+
/**
* 正式回话Id
- * @param 参数
+ * @param 参数
* {
- "model_id": "string",
- "multiple_rounds_dialogue": true,
- "dataset_id_list": [
- "string"
- ]
-}
+ "model_id": "string",
+ "multiple_rounds_dialogue": true,
+ "dataset_id_list": [
+ "string"
+ ]
+ }
*/
const getChatOpen: (application_id: String) => Promise> = (application_id) => {
return get(`${prefix}/${application_id}/chat/open`)
@@ -176,11 +192,11 @@ const postChatMessage: (chat_id: string, data: any) => Promise = (chat_id,
/**
* 点赞、点踩
- * @param 参数
+ * @param 参数
* application_id : string; chat_id : string; chat_record_id : string
* {
- "vote_status": "string", // -1 0 1
- }
+ "vote_status": "string", // -1 0 1
+ }
*/
const putChatVote: (
application_id: string,
@@ -228,6 +244,301 @@ const getApplicationModel: (
return get(`${prefix}/${application_id}/model`, loading)
}
+/**
+ * 获取当前用户可使用的模型列表
+ * @param application_id
+ * @param loading
+ * @query { query_text: string, top_number: number, similarity: number }
+ * @returns
+ */
+const getApplicationRerankerModel: (
+ application_id: string,
+ loading?: Ref
+) => Promise>> = (application_id, loading) => {
+ return get(`${prefix}/${application_id}/model`, { model_type: 'RERANKER' }, loading)
+}
+
+/**
+ * 获取当前用户可使用的模型列表
+ * @param application_id
+ * @param loading
+ * @query { query_text: string, top_number: number, similarity: number }
+ * @returns
+ */
+const getApplicationSTTModel: (
+ application_id: string,
+ loading?: Ref
+) => Promise>> = (application_id, loading) => {
+ return get(`${prefix}/${application_id}/model`, { model_type: 'STT' }, loading)
+}
+
+/**
+ * 获取当前用户可使用的模型列表
+ * @param application_id
+ * @param loading
+ * @query { query_text: string, top_number: number, similarity: number }
+ * @returns
+ */
+const getApplicationTTSModel: (
+ application_id: string,
+ loading?: Ref
+) => Promise>> = (application_id, loading) => {
+ return get(`${prefix}/${application_id}/model`, { model_type: 'TTS' }, loading)
+}
+
+const getApplicationImageModel: (
+ application_id: string,
+ loading?: Ref
+) => Promise>> = (application_id, loading) => {
+ return get(`${prefix}/${application_id}/model`, { model_type: 'IMAGE' }, loading)
+}
+
+const getApplicationTTIModel: (
+ application_id: string,
+ loading?: Ref
+) => Promise>> = (application_id, loading) => {
+ return get(`${prefix}/${application_id}/model`, { model_type: 'TTI' }, loading)
+}
+
+/**
+ * 发布应用
+ * @param 参数
+ */
+const putPublishApplication: (
+ application_id: String,
+ data: ApplicationFormType,
+ loading?: Ref
+) => Promise> = (application_id, data, loading) => {
+ return put(`${prefix}/${application_id}/publish`, data, undefined, loading)
+}
+/**
+ * 获取应用所属的函数库列表
+ * @param application_id 应用id
+ * @param loading
+ * @returns
+ */
+const listFunctionLib: (application_id: String, loading?: Ref) => Promise> = (
+ application_id,
+ loading
+) => {
+ return get(`${prefix}/${application_id}/function_lib`, undefined, loading)
+}
+/**
+ * 获取当前人的所有应用列表
+ * @param application_id 应用id
+ * @param loading
+ * @returns
+ */
+export const getApplicationList: (
+ application_id: string,
+ loading?: Ref
+) => Promise> = (application_id, loading) => {
+ return get(`${prefix}/${application_id}/application`, undefined, loading)
+}
+/**
+ * 获取应用所属的函数库
+ * @param application_id
+ * @param function_lib_id
+ * @param loading
+ * @returns
+ */
+const getFunctionLib: (
+ application_id: String,
+ function_lib_id: String,
+ loading?: Ref
+) => Promise> = (application_id, function_lib_id, loading) => {
+ return get(`${prefix}/${application_id}/function_lib/${function_lib_id}`, undefined, loading)
+}
+
+const getMcpTools: (
+ data: any,
+ loading?: Ref
+) => Promise> = (data, loading) => {
+ return get(`${prefix}/mcp_servers`, data, loading)
+}
+
+const getApplicationById: (
+ application_id: String,
+ app_id: String,
+ loading?: Ref
+) => Promise> = (application_id, app_id, loading) => {
+ return get(`${prefix}/${application_id}/application/${app_id}`, undefined, loading)
+}
+/**
+ * 获取模型参数表单
+ * @param application_id 应用id
+ * @param model_id 模型id
+ * @param loading
+ * @returns
+ */
+const getModelParamsForm: (
+ application_id: String,
+ model_id: String,
+ loading?: Ref
+) => Promise>> = (application_id, model_id, loading) => {
+ return get(`${prefix}/${application_id}/model_params_form/${model_id}`, undefined, loading)
+}
+
+/**
+ * 上传文档图片附件
+ */
+const uploadFile: (
+ application_id: String,
+ chat_id: String,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, chat_id, data, loading) => {
+ return post(`${prefix}/${application_id}/chat/${chat_id}/upload_file`, data, undefined, loading)
+}
+
+/**
+ * 语音转文本
+ */
+const postSpeechToText: (
+ application_id: String,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, data, loading) => {
+ return post(`${prefix}/${application_id}/speech_to_text`, data, undefined, loading)
+}
+
+/**
+ * 文本转语音
+ */
+const postTextToSpeech: (
+ application_id: String,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, data, loading) => {
+ return download(`${prefix}/${application_id}/text_to_speech`, 'post', data, undefined, loading)
+}
+
+/**
+ * 播放测试文本
+ */
+const playDemoText: (
+ application_id: String,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, data, loading) => {
+ return download(`${prefix}/${application_id}/play_demo_text`, 'post', data, undefined, loading)
+}
+/**
+ * 获取平台状态
+ */
+const getPlatformStatus: (application_id: string) => Promise> = (application_id) => {
+ return get(`/platform/${application_id}/status`)
+}
+/**
+ * 获取平台配置
+ */
+const getPlatformConfig: (application_id: string, type: string) => Promise> = (
+ application_id,
+ type
+) => {
+ return get(`/platform/${application_id}/${type}`)
+}
+/**
+ * 更新平台配置
+ */
+const updatePlatformConfig: (
+ application_id: string,
+ type: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, type, data, loading) => {
+ return post(`/platform/${application_id}/${type}`, data, undefined, loading)
+}
+/**
+ * 更新平台状态
+ */
+const updatePlatformStatus: (application_id: string, data: any) => Promise> = (
+ application_id,
+ data
+) => {
+ return post(`/platform/${application_id}/status`, data)
+}
+/**
+ * 验证密码
+ */
+const validatePassword: (
+ application_id: string,
+ password: string,
+ loading?: Ref
+) => Promise> = (application_id, password, loading) => {
+ return get(`/application/${application_id}/auth/${password}`, undefined, loading)
+}
+
+/**
+ * workflow历史版本
+ */
+const getWorkFlowVersion: (
+ application_id: string,
+ loading?: Ref
+) => Promise> = (application_id, loading) => {
+ return get(`/application/${application_id}/work_flow_version`, undefined, loading)
+}
+
+/**
+ * workflow历史版本详情
+ */
+const getWorkFlowVersionDetail: (
+ application_id: string,
+ application_version_id: string,
+ loading?: Ref
+) => Promise> = (application_id, application_version_id, loading) => {
+ return get(
+ `/application/${application_id}/work_flow_version/${application_version_id}`,
+ undefined,
+ loading
+ )
+}
+/**
+ * 修改workflow历史版本
+ */
+const putWorkFlowVersion: (
+ application_id: string,
+ application_version_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (application_id, application_version_id, data, loading) => {
+ return put(
+ `/application/${application_id}/work_flow_version/${application_version_id}`,
+ data,
+ undefined,
+ loading
+ )
+}
+
+const getUserList: (type: string, loading?: Ref) => Promise> = (
+ type,
+ loading
+) => {
+ return get(`/user/list/${type}`, undefined, loading)
+}
+
+const exportApplication = (
+ application_id: string,
+ application_name: string,
+ loading?: Ref
+) => {
+ return exportFile(
+ application_name + '.mk',
+ `/application/${application_id}/export`,
+ undefined,
+ loading
+ )
+}
+
+/**
+ * 导入应用
+ */
+const importApplication: (data: any, loading?: Ref) => Promise> = (
+ data,
+ loading
+) => {
+ return post(`${prefix}/import`, data, undefined, loading)
+}
export default {
getAllAppilcation,
getApplication,
@@ -242,8 +553,36 @@ export default {
getAccessToken,
putAccessToken,
postAppAuthentication,
- getProfile,
+ getAppProfile,
putChatVote,
getApplicationHitTest,
- getApplicationModel
+ getApplicationModel,
+ putPublishApplication,
+ postWorkflowChatOpen,
+ listFunctionLib,
+ getFunctionLib,
+ getModelParamsForm,
+ getApplicationRerankerModel,
+ getApplicationSTTModel,
+ getApplicationTTSModel,
+ getApplicationImageModel,
+ getApplicationTTIModel,
+ postSpeechToText,
+ postTextToSpeech,
+ getPlatformStatus,
+ getPlatformConfig,
+ updatePlatformConfig,
+ updatePlatformStatus,
+ validatePassword,
+ getWorkFlowVersion,
+ getWorkFlowVersionDetail,
+ putWorkFlowVersion,
+ playDemoText,
+ getUserList,
+ getApplicationList,
+ uploadFile,
+ exportApplication,
+ importApplication,
+ getApplicationById,
+ getMcpTools
}
diff --git a/ui/src/api/auth-setting.ts b/ui/src/api/auth-setting.ts
new file mode 100644
index 00000000000..e1d239ba2e7
--- /dev/null
+++ b/ui/src/api/auth-setting.ts
@@ -0,0 +1,39 @@
+import {Result} from '@/request/Result'
+import {get, post, del, put} from '@/request/index'
+import type {pageRequest} from '@/api/type/common'
+import {type Ref} from 'vue'
+
+const prefix = '/auth'
+/**
+ * 获取认证设置
+ */
+const getAuthSetting: (auth_type: string, loading?: Ref) => Promise> = (auth_type, loading) => {
+ return get(`${prefix}/${auth_type}/detail`, undefined, loading)
+}
+
+/**
+ * 邮箱测试
+ */
+const postAuthSetting: (data: any, loading?: Ref) => Promise> = (
+ data,
+ loading
+) => {
+ return post(`${prefix}/connection`, data, undefined, loading)
+}
+
+/**
+ * 修改邮箱设置
+ */
+const putAuthSetting: (auth_type: string, data: any, loading?: Ref) => Promise> = (
+ auth_type,
+ data,
+ loading
+) => {
+ return put(`${prefix}/${auth_type}/info`, data, undefined, loading)
+}
+
+export default {
+ getAuthSetting,
+ postAuthSetting,
+ putAuthSetting
+}
diff --git a/ui/src/api/dataset.ts b/ui/src/api/dataset.ts
index 702731a267b..a5a663b03c7 100644
--- a/ui/src/api/dataset.ts
+++ b/ui/src/api/dataset.ts
@@ -1,21 +1,22 @@
import { Result } from '@/request/Result'
-import { get, post, del, put } from '@/request/index'
+import { get, post, del, put, exportExcel, exportFile } from '@/request/index'
import type { datasetData } from '@/api/type/dataset'
import type { pageRequest } from '@/api/type/common'
import type { ApplicationFormType } from '@/api/type/application'
import { type Ref } from 'vue'
+
const prefix = '/dataset'
/**
* 获取分页知识库
- * @param 参数
+ * @param 参数
* page {
- "current_page": "string",
- "page_size": "string",
- }
+ "current_page": "string",
+ "page_size": "string",
+ }
* param {
- "name": "string",
- }
+ "name": "string",
+ }
*/
const getDataset: (
page: pageRequest,
@@ -46,28 +47,28 @@ const delDataset: (dataset_id: String, loading?: Ref) => Promise) => Promise> = (
data,
@@ -78,13 +79,13 @@ const postDataset: (data: datasetData, loading?: Ref) => Promise) => Promise> = (
data,
@@ -92,6 +93,39 @@ const postWebDataset: (data: any, loading?: Ref) => Promise
) => {
return post(`${prefix}/web`, data, undefined, loading)
}
+/**
+ * 创建Lark知识库
+ * @param 参数
+ * {
+ "name": "string",
+ "desc": "string",
+ "app_id": "string",
+ "app_secret": "string",
+ "folder_token": "string",
+ }
+ */
+const postLarkDataset: (data: any, loading?: Ref) => Promise> = (
+ data,
+ loading
+) => {
+ return post(`${prefix}/lark/save`, data, undefined, loading)
+}
+
+/**
+ * 创建QA知识库
+ * @param 参数 formData
+ * {
+ "file": "file",
+ "name": "string",
+ "desc": "string",
+ }
+ */
+const postQADataset: (data: any, loading?: Ref) => Promise> = (
+ data,
+ loading
+) => {
+ return post(`${prefix}/qa`, data, undefined, loading)
+}
/**
* 知识库详情
@@ -106,18 +140,26 @@ const getDatasetDetail: (dataset_id: string, loading?: Ref) => Promise<
/**
* 修改知识库信息
- * @param 参数
+ * @param 参数
* dataset_id
* {
- "name": "string",
- "desc": true
- }
+ "name": "string",
+ "desc": true
+ }
*/
-const putDataset: (dataset_id: string, data: any) => Promise> = (
- dataset_id,
- data: any
-) => {
- return put(`${prefix}/${dataset_id}`, data)
+const putDataset: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (dataset_id, data, loading) => {
+ return put(`${prefix}/${dataset_id}`, data, undefined, loading)
+}
+const putLarkDataset: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (dataset_id, data, loading) => {
+ return put(`${prefix}/lark/${dataset_id}`, data, undefined, loading)
}
/**
* 获取知识库 可关联的应用列表
@@ -160,6 +202,96 @@ const putSyncWebDataset: (
return put(`${prefix}/${dataset_id}/sync_web`, undefined, { sync_type }, loading)
}
+/**
+ * 向量化知识库
+ * @param 参数 dataset_id
+ */
+const putReEmbeddingDataset: (
+ dataset_id: string,
+ loading?: Ref
+) => Promise> = (dataset_id, loading) => {
+ return put(`${prefix}/${dataset_id}/re_embedding`, undefined, undefined, loading)
+}
+
+/**
+ * 导出知识库
+ * @param dataset_name 知识库名称
+ * @param dataset_id 知识库id
+ * @returns
+ */
+const exportDataset: (
+ dataset_name: string,
+ dataset_id: string,
+ loading?: Ref
+) => Promise = (dataset_name, dataset_id, loading) => {
+ return exportExcel(dataset_name + '.xlsx', `dataset/${dataset_id}/export`, undefined, loading)
+}
+/**
+ *导出Zip知识库
+ * @param dataset_name 知识库名称
+ * @param dataset_id 知识库id
+ * @param loading 加载器
+ * @returns
+ */
+const exportZipDataset: (
+ dataset_name: string,
+ dataset_id: string,
+ loading?: Ref
+) => Promise = (dataset_name, dataset_id, loading) => {
+ return exportFile(dataset_name + '.zip', `dataset/${dataset_id}/export_zip`, undefined, loading)
+}
+
+/**
+ * 获取当前用户可使用的模型列表
+ * @param application_id
+ * @param loading
+ * @query { query_text: string, top_number: number, similarity: number }
+ * @returns
+ */
+const getDatasetModel: (
+ dataset_id: string,
+ loading?: Ref
+) => Promise>> = (dataset_id, loading) => {
+ return get(`${prefix}/${dataset_id}/model`, loading)
+}
+/**
+ * 获取飞书文档列表
+ * @param dataset_id
+ * @param folder_token
+ * @param loading
+ * @returns
+ */
+const getLarkDocumentList: (
+ dataset_id: string,
+ folder_token: string,
+ data: any,
+ loading?: Ref
+) => Promise>> = (dataset_id, folder_token, data, loading) => {
+ return post(`${prefix}/lark/${dataset_id}/${folder_token}/doc_list`, data, null, loading)
+}
+
+const importLarkDocument: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise>> = (dataset_id, data, loading) => {
+ return post(`${prefix}/lark/${dataset_id}/import`, data, null, loading)
+}
+/**
+ * 生成关联问题
+ * @param dataset_id 知识库id
+ * @param data
+ * @param loading
+ * @returns
+ */
+const generateRelated: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise>> = (dataset_id, data, loading) => {
+ return put(`${prefix}/${dataset_id}/generate_related`, data, null, loading)
+}
+
export default {
getDataset,
getAllDataset,
@@ -170,5 +302,15 @@ export default {
listUsableApplication,
getDatasetHitTest,
postWebDataset,
- putSyncWebDataset
+ putSyncWebDataset,
+ putReEmbeddingDataset,
+ postQADataset,
+ exportDataset,
+ getDatasetModel,
+ exportZipDataset,
+ postLarkDataset,
+ getLarkDocumentList,
+ importLarkDocument,
+ putLarkDataset,
+ generateRelated
}
diff --git a/ui/src/api/document.ts b/ui/src/api/document.ts
index 2d2fc1f65f1..ad4792af7f4 100644
--- a/ui/src/api/document.ts
+++ b/ui/src/api/document.ts
@@ -1,8 +1,9 @@
import { Result } from '@/request/Result'
-import { get, post, del, put } from '@/request/index'
+import { get, post, del, put, exportExcel, exportFile } from '@/request/index'
import type { Ref } from 'vue'
import type { KeyValue } from '@/api/type/common'
import type { pageRequest } from '@/api/type/common'
+
const prefix = '/dataset'
/**
@@ -26,14 +27,14 @@ const listSplitPattern: (
/**
* 文档分页列表
- * @param 参数 dataset_id,
+ * @param 参数 dataset_id,
* page {
- "current_page": "string",
- "page_size": "string",
- }
-* param {
- "name": "string",
- }
+ "current_page": "string",
+ "page_size": "string",
+ }
+ * param {
+ "name": "string",
+ }
*/
const getDocument: (
@@ -58,22 +59,22 @@ const getAllDocument: (dataset_id: string, loading?: Ref) => Promise Promise> = (dataset_id, data, loading) => {
return del(`${prefix}/${dataset_id}/document/_bach`, undefined, { id_list: data }, loading)
}
+
+const batchRefresh: (
+ dataset_id: string,
+ data: any,
+ stateList: Array,
+ loading?: Ref
+) => Promise> = (dataset_id, data, stateList, loading) => {
+ return put(
+ `${prefix}/${dataset_id}/document/batch_refresh`,
+ { id_list: data, state_list: stateList },
+ undefined,
+ loading
+ )
+}
/**
* 文档详情
* @param 参数 dataset_id
@@ -137,20 +152,42 @@ const getDocumentDetail: (dataset_id: string, document_id: string) => Promise,
loading?: Ref
-) => Promise> = (dataset_id, document_id, loading) => {
+) => Promise> = (dataset_id, document_id, state_list, loading) => {
return put(
`${prefix}/${dataset_id}/document/${document_id}/refresh`,
+ { state_list },
+ undefined,
+ loading
+ )
+}
+
+/**
+ * 同步web站点类型
+ * @param 参数
+ * dataset_id, document_id,
+ */
+const putDocumentSync: (
+ dataset_id: string,
+ document_id: string,
+ loading?: Ref
+) => Promise> = (dataset_id, document_id, loading) => {
+ return put(`${prefix}/${dataset_id}/document/${document_id}/sync`, undefined, undefined, loading)
+}
+const putLarkDocumentSync: (
+ dataset_id: string,
+ document_id: string,
+ loading?: Ref
+) => Promise> = (dataset_id, document_id, loading) => {
+ return put(
+ `${prefix}/lark/${dataset_id}/document/${document_id}/sync`,
undefined,
undefined,
loading
@@ -168,17 +205,24 @@ const delMulSyncDocument: (
) => Promise> = (dataset_id, data, loading) => {
return put(`${prefix}/${dataset_id}/document/_bach`, { id_list: data }, undefined, loading)
}
+const delMulLarkSyncDocument: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (dataset_id, data, loading) => {
+ return put(`${prefix}/lark/${dataset_id}/_batch`, { id_list: data }, undefined, loading)
+}
/**
* 创建Web站点文档
- * @param 参数
+ * @param 参数
* {
- "source_url_list": [
- "string"
- ],
- "selector": "string"
+ "source_url_list": [
+ "string"
+ ],
+ "selector": "string"
+ }
}
-}
*/
const postWebDocument: (
dataset_id: string,
@@ -188,6 +232,33 @@ const postWebDocument: (
return post(`${prefix}/${dataset_id}/document/web`, data, undefined, loading)
}
+/**
+ * 导入QA文档
+ * @param 参数
+ * file
+ }
+ */
+const postQADocument: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (dataset_id, data, loading) => {
+ return post(`${prefix}/${dataset_id}/document/qa`, data, undefined, loading)
+}
+
+/**
+ * 导入表格
+ * @param 参数
+ * file
+ */
+const postTableDocument: (
+ dataset_id: string,
+ data: any,
+ loading?: Ref
+) => Promise> = (dataset_id, data, loading) => {
+ return post(`${prefix}/${dataset_id}/document/table`, data, undefined, loading)
+}
+
/**
* 批量迁移文档
* @param 参数 dataset_id,target_dataset_id,
@@ -220,6 +291,103 @@ const batchEditHitHandling: (
) => Promise> = (dataset_id, data, loading) => {
return put(`${prefix}/${dataset_id}/document/batch_hit_handling`, data, undefined, loading)
}
+
+/**
+ * 获得QA模版
+ * @param 参数 fileName,type,
+ */
+const exportQATemplate: (fileName: string, type: string, loading?: Ref