{
  "canonical_name": "open-webui/open-webui",
  "compilation_id": "pack_5eb7901d33764c009d1d6ec4f42ff0cb",
  "created_at": "2026-05-16T19:36:42.760717+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=mcp_config, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=mcp_config, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `pip install open-webui` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "pip install open-webui",
      "sandbox_container_image": "python:3.12-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "deterministic_isolated_install",
      "sandbox_validation_id": "sbx_3d92df2ee3954299aa37f167b336a655"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_a0af59ccc17b5415b1863b9767e3cb81",
    "canonical_name": "open-webui/open-webui",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/open-webui/open-webui",
    "slug": "open-webui",
    "source_packet_id": "phit_80754f987a8943d8b8eb7e3ea37fa9e0",
    "source_validation_id": "dval_e2f0e782f2844612b65d669f75051cd6"
  },
  "merchandising": {
    "best_for": "需要个人工作台能力，并使用 mcp_host的用户",
    "github_forks": 19412,
    "github_stars": 136324,
    "one_liner_en": "User-friendly AI Interface (Supports Ollama, OpenAI API, ...)",
    "one_liner_zh": "User-friendly AI Interface (Supports Ollama, OpenAI API, ...)",
    "primary_category": {
      "category_id": "personal-workspace",
      "confidence": "high",
      "name_en": "Personal Workspace",
      "name_zh": "个人工作台",
      "reason": "curated popular coverage category matched project identity"
    },
    "target_user": "使用 mcp_host, chatgpt 等宿主 AI 的用户",
    "title_en": "open-webui",
    "title_zh": "open-webui 能力包",
    "visible_tags": [
      {
        "label_en": "Browser Agents",
        "label_zh": "浏览器 Agent",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-browser-agents",
        "type": "product_domain"
      },
      {
        "label_en": "Web Task Automation",
        "label_zh": "网页任务自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-web-task-automation",
        "type": "user_job"
      },
      {
        "label_en": "Browser Automation",
        "label_zh": "浏览器自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-browser-automation",
        "type": "core_capability"
      },
      {
        "label_en": "Node-based Workflow",
        "label_zh": "节点式流程编排",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-node-based-workflow",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Evaluation Suite",
        "label_zh": "评测体系",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-evaluation-suite",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_80754f987a8943d8b8eb7e3ea37fa9e0",
  "page_model": {
    "artifacts": {
      "artifact_slug": "open-webui",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "pip install open-webui",
          "label": "Python / pip · 官方安装入口",
          "source": "https://github.com/open-webui/open-webui#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "浏览器自动化",
        "节点式流程编排",
        "评测体系"
      ],
      "eyebrow": "个人工作台",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要个人工作台能力，并使用 mcp_host的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "User-friendly AI Interface (Supports Ollama, OpenAI API, ...)"
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "mcp_host, chatgpt",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "mcp_config, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:701547123 | https://github.com/open-webui/open-webui | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "downstream_validation.risk_items | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "进入安全/权限治理复核队列。",
            "title": "下游验证发现风险项",
            "user_impact": "下游已经要求复核，不能在页面中弱化。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "risks.scoring_risks | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "把风险写入边界卡，并确认是否需要人工复核。",
            "title": "存在评分风险",
            "user_impact": "风险会影响是否适合普通用户安装。"
          },
          {
            "body": "issue_or_pr_quality=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | issue_or_pr_quality=unknown"
            ],
            "severity": "low",
            "suggested_check": "抽样最近 issue/PR，判断是否长期无人处理。",
            "title": "issue/PR 响应质量未知",
            "user_impact": "用户无法判断遇到问题后是否有人维护。"
          },
          {
            "body": "release_recency=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | release_recency=unknown"
            ],
            "severity": "low",
            "suggested_check": "确认最近 release/tag 和 README 安装命令是否一致。",
            "title": "发布节奏不明确",
            "user_impact": "安装命令和文档可能落后于代码，用户踩坑概率升高。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 826,
        "forks": 19412,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 136324
      },
      "source_url": "https://github.com/open-webui/open-webui",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "User-friendly AI Interface (Supports Ollama, OpenAI API, ...)",
      "title": "open-webui 能力包",
      "trial_prompt": "# open-webui - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for open-webui/open-webui.\n\nProject:\n- Name: open-webui\n- Repository: https://github.com/open-webui/open-webui\n- Summary: User-friendly AI Interface (Supports Ollama, OpenAI API, ...)\n- Host target: mcp_host, chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: User-friendly AI Interface (Supports Ollama, OpenAI API, ...)\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: User-friendly AI Interface (Supports Ollama, OpenAI API, ...)\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. project-introduction: Project Introduction. Produce one small intermediate artifact and wait for confirmation.\n2. installation-guide: Installation Guide. Produce one small intermediate artifact and wait for confirmation.\n3. architecture-overview: Architecture Overview. Produce one small intermediate artifact and wait for confirmation.\n4. data-models: Data Models. Produce one small intermediate artifact and wait for confirmation.\n5. api-routers: API Routers. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/open-webui/open-webui\n- https://github.com/open-webui/open-webui#readme\n- README.md\n- pyproject.toml\n- backend/open_webui/__init__.py\n- Dockerfile\n- docker-compose.yaml\n- .env.example\n- backend/start.sh\n- run.sh\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: [BUG] v0.9.3 - Notes completely broken: cannot open or create notes (Typ（https://github.com/open-webui/open-webui/issues/24484）；github/github_issue: issue: llamacpp load/unload indicator doesn't work（https://github.com/open-webui/open-webui/issues/24544）；github/github_issue: issue: When continuing a conversation in the new version using a chat cr（https://github.com/open-webui/open-webui/issues/24522）；github/github_issue: issue: image_gen is exposed to the model even when image generation is d（https://github.com/open-webui/open-webui/issues/24532）；github/github_issue: feat: Add file types per MCP Integration（https://github.com/open-webui/open-webui/issues/24496）；github/github_issue: feat: apply filter in tool call loop（https://github.com/open-webui/open-webui/issues/24500）；github/github_issue: issue: Cmd+r on Mac (refresh page) causes chat to generate a new respons（https://github.com/open-webui/open-webui/issues/24530）；github/github_release: v0.9.5（https://github.com/open-webui/open-webui/releases/tag/v0.9.5）；github/github_release: v0.9.4（https://github.com/open-webui/open-webui/releases/tag/v0.9.4）；github/github_release: v0.9.3（https://github.com/open-webui/open-webui/releases/tag/v0.9.3）；github/github_release: v0.9.2（https://github.com/open-webui/open-webui/releases/tag/v0.9.2）；github/github_release: v0.9.1（https://github.com/open-webui/open-webui/releases/tag/v0.9.1）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "[BUG] v0.9.3 - Notes completely broken: cannot open or create notes (Typ",
              "url": "https://github.com/open-webui/open-webui/issues/24484"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "issue: llamacpp load/unload indicator doesn't work",
              "url": "https://github.com/open-webui/open-webui/issues/24544"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "issue: When continuing a conversation in the new version using a chat cr",
              "url": "https://github.com/open-webui/open-webui/issues/24522"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "issue: image_gen is exposed to the model even when image generation is d",
              "url": "https://github.com/open-webui/open-webui/issues/24532"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "feat: Add file types per MCP Integration",
              "url": "https://github.com/open-webui/open-webui/issues/24496"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "feat: apply filter in tool call loop",
              "url": "https://github.com/open-webui/open-webui/issues/24500"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "issue: Cmd+r on Mac (refresh page) causes chat to generate a new respons",
              "url": "https://github.com/open-webui/open-webui/issues/24530"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.9.5",
              "url": "https://github.com/open-webui/open-webui/releases/tag/v0.9.5"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.9.4",
              "url": "https://github.com/open-webui/open-webui/releases/tag/v0.9.4"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.9.3",
              "url": "https://github.com/open-webui/open-webui/releases/tag/v0.9.3"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.9.2",
              "url": "https://github.com/open-webui/open-webui/releases/tag/v0.9.2"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.9.1",
              "url": "https://github.com/open-webui/open-webui/releases/tag/v0.9.1"
            }
          ],
          "status": "已收录 13 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "个人工作台",
      "desc": "User-friendly AI Interface (Supports Ollama, OpenAI API, ...)",
      "effort": "安装已验证",
      "forks": 19412,
      "icon": "notebook",
      "name": "open-webui 能力包",
      "risk": "可发布",
      "slug": "open-webui",
      "stars": 136324,
      "tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "浏览器自动化",
        "节点式流程编排",
        "评测体系"
      ],
      "thumb": "blue",
      "type": "MCP 配置"
    },
    "manual": {
      "markdown": "# https://github.com/open-webui/open-webui 项目说明书\n\n生成时间：2026-05-16 19:17:04 UTC\n\n## 目录\n\n- [Project Introduction](#project-introduction)\n- [Installation Guide](#installation-guide)\n- [Architecture Overview](#architecture-overview)\n- [Data Models](#data-models)\n- [API Routers](#api-routers)\n- [Retrieval System](#retrieval-system)\n- [Frontend Structure](#frontend-structure)\n- [Chat Interface](#chat-interface)\n- [Ollama Integration](#ollama-integration)\n- [RAG Pipeline](#rag-pipeline)\n\n<a id='project-introduction'></a>\n\n## Project Introduction\n\n### 相关页面\n\n相关主题：[Installation Guide](#installation-guide), [Architecture Overview](#architecture-overview)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/open-webui/open-webui/blob/main/README.md)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/requirements-min.txt](https://github.com/open-webui/open-webui/blob/main/backend/requirements-min.txt)\n- [src/lib/utils/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/index.ts)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n</details>\n\n# Project Introduction\n\nOpen WebUI is an extensible, self-hosted AI interface designed to provide a powerful and user-friendly chat experience for Large Language Models (LLMs). It serves as a comprehensive web-based frontend that seamlessly integrates with various LLM backends, enabling users to interact with AI models through a modern, feature-rich interface.\n\n## Overview\n\nOpen WebUI is an open-source project that prioritizes offline functionality and user privacy. The platform is built with extensibility in mind, allowing users to customize and extend its capabilities through a modular architecture. The project supports multiple installation methods and integrates with popular LLM providers like Ollama, OpenAI, and various other AI services.\n\nThe system operates as a full-stack application with a Svelte-based frontend and a Python FastAPI backend, communicating through RESTful APIs and WebSocket connections for real-time interactions.\n\n## Architecture\n\nOpen WebUI follows a client-server architecture with clear separation between the frontend presentation layer and the backend API layer.\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte)\"]\n        UI[User Interface]\n        State[State Management]\n        API[API Client]\n    end\n    \n    subgraph Backend[\"Backend (Python/FastAPI)\"]\n        Routes[API Routes]\n        Services[Business Logic]\n        DB[(Database)]\n        Auth[Authentication]\n    end\n    \n    subgraph External[\"External Services\"]\n        Ollama[Ollama]\n        OpenAI[OpenAI API]\n        RAG[RAG Providers]\n    end\n    \n    UI --> State\n    State --> API\n    API --> Routes\n    Routes --> Services\n    Services --> DB\n    Routes --> Auth\n    Services --> Ollama\n    Services --> OpenAI\n    Services --> RAG\n```\n\n### Frontend Layer\n\nThe frontend is built using Svelte and SvelteKit, providing a reactive and performant user interface. Key components include:\n\n| Component | Location | Purpose |\n|-----------|----------|---------|\n| Constants | `src/lib/constants.ts` | Application-wide configuration values |\n| Utilities | `src/lib/utils/index.ts` | Content processing and sanitization |\n| API Clients | `src/lib/apis/` | Communication with backend services |\n\n资料来源：[src/lib/constants.ts:1-20]()\n\nThe frontend defines API base URLs for various services:\n\n```typescript\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\nexport const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama`;\nexport const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai`;\nexport const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/audio`;\nexport const IMAGES_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/images`;\nexport const RETRIEVAL_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/retrieval`;\n```\n\n资料来源：[src/lib/constants.ts:8-15]()\n\n### Backend Layer\n\nThe backend is built with Python using FastAPI, providing a robust and scalable API layer. The backend handles authentication, data management, and communication with external AI services.\n\n#### Core Dependencies\n\n| Package | Version | Purpose |\n|---------|---------|---------|\n| fastapi | 0.135.1 | Web framework |\n| uvicorn | 0.41.0 | ASGI server |\n| pydantic | 2.12.5 | Data validation |\n| sqlalchemy | 2.0.48 | ORM framework |\n| python-socketio | 5.16.1 | WebSocket support |\n| pycrdt | 0.12.47 | CRDT for real-time collaboration |\n\n资料来源：[backend/requirements-min.txt:1-35]()\n\n## Features\n\nOpen WebUI provides a comprehensive set of features designed to enhance the AI chat experience:\n\n### Supported File Types\n\nThe system supports various document formats for upload and processing:\n\n| Category | File Types |\n|----------|------------|\n| Documents | PDF, EPUB, DOCX, TXT |\n| Code | Python, JavaScript, CSS, XML |\n| Data | CSV, Markdown |\n| Media | MP3, WAV (audio) |\n| Other | HTML, Octet-stream |\n\n资料来源：[src/lib/constants.ts:18-32]()\n\n### Key Capabilities\n\n1. **Multi-Model Support**: Engage with multiple AI models simultaneously through the MOA (Mixture of Agents) architecture\n2. **Code Interpreter**: Execute Python code in sandboxed environments using Pyodide or Jupyter\n3. **Voice Mode**: Voice-activated interactions with customizable prompts\n4. **RAG Integration**: Retrieval-augmented generation with support for 15+ search providers\n5. **Web Browsing**: Extract and integrate web content directly into conversations\n6. **Image Generation**: Integration with DALL-E, Gemini, ComfyUI, and AUTOMATIC1111\n7. **Role-Based Access Control (RBAC)**: Granular permission management\n\n## Configuration System\n\nOpen WebUI uses a persistent configuration system to manage application settings. Configuration values are stored in the database and can be overridden by environment variables.\n\n### Code Execution Configuration\n\n| Setting | Environment Variable | Default | Description |\n|---------|---------------------|---------|-------------|\n| ENABLE_CODE_EXECUTION | ENABLE_CODE_EXECUTION | True | Enable code execution feature |\n| CODE_EXECUTION_ENGINE | CODE_EXECUTION_ENGINE | pyodide | Execution engine (pyodide/jupyter) |\n| JUPYTER_URL | CODE_EXECUTION_JUPYTER_URL | - | Jupyter server URL |\n| JUPYTER_AUTH | CODE_EXECUTION_JUPYTER_AUTH | - | Jupyter authentication |\n\n资料来源：[backend/open_webui/config.py:1-50]()\n\n### Voice Mode Configuration\n\n| Parameter | Description |\n|-----------|-------------|\n| VOICE_MODE_PROMPT_TEMPLATE | Template for voice interaction prompts |\n| ENABLE_VOICE_MODE_PROMPT | Enable voice-specific prompt handling |\n\n## Security Features\n\n### Authentication System\n\nThe backend implements comprehensive authentication using:\n- JWT tokens via PyJWT\n- Argon2 password hashing\n- Session management with Redis support\n- Role-based access control (RBAC)\n\n### Content Processing\n\nThe system includes middleware for processing and sanitizing AI responses:\n\n```mermaid\ngraph LR\n    Response[AI Response] --> Middleware[Middleware Layer]\n    Middleware --> Sanitize[Content Sanitization]\n    Middleware --> CodeBlock[Code Block Processing]\n    Middleware --> Reasoning[Reasoning Display]\n    Sanitize --> Render[Rendered Response]\n    CodeBlock --> Render\n    Reasoning --> Render\n```\n\nThe middleware handles special content types including:\n- Code interpreter blocks\n- Reasoning/thinking blocks\n- HTML content rendering\n\n资料来源：[backend/open_webui/utils/middleware.py:1-40]()\n\n## Installation Methods\n\n### Python pip Installation\n\n```bash\npip install open-webui\nopen-webui serve\n```\n\nThe server runs on `http://localhost:8080` by default.\n\n### Docker Installation\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --add-host=host.docker.internal:host-gateway \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:latest\n```\n\n> [!IMPORTANT]\n> The volume mount `-v open-webui:/app/backend/data` is crucial for database persistence.\n\n### Development Branch\n\nFor testing unstable features:\n```bash\ndocker run -d -p 3000:8080 -v open-webui:/app/backend/data --name open-webui --add-host=host.docker.internal:host-gateway --restart always ghcr.io/open-webui/open-webui:dev\n```\n\n资料来源：[README.md:1-80]()\n\n## Technology Stack Summary\n\n| Layer | Technology | Key Libraries |\n|-------|------------|---------------|\n| Frontend Framework | Svelte/SvelteKit | - |\n| Backend Framework | Python/FastAPI | Pydantic, SQLAlchemy |\n| Database | SQLite/PostgreSQL | aiosqlite, psycopg |\n| Real-time | WebSocket | python-socketio, pycrdt |\n| Caching | Redis | starsessions |\n| Authentication | JWT/Argon2 | PyJWT, argon2-cffi |\n| HTTP Client | httpx | With SOCKS, HTTP/2 support |\n| Task Scheduling | APScheduler | - |\n\n## System Requirements\n\n- **Python Version**: 3.11+ (required for compatibility)\n- **Node.js**: For frontend development\n- **Database**: SQLite (default), PostgreSQL (production)\n- **Memory**: Minimum 4GB RAM recommended\n- **Storage**: Depends on models and data usage\n\n## Related Documentation\n\n- [Open WebUI Documentation](https://docs.openwebui.com/)\n- [Roadmap](https://docs.openwebui.com/roadmap/)\n- [Getting Started Guide](https://docs.openwebui.com/getting-started/)\n- [Updating Instructions](https://docs.openwebui.com/getting-started/updating)\n\n---\n\n<a id='installation-guide'></a>\n\n## Installation Guide\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/open-webui/open-webui/blob/main/README.md)\n- [backend/requirements-min.txt](https://github.com/open-webui/open-webui/blob/main/backend/requirements-min.txt)\n- [backend/requirements.txt](https://github.com/open-webui/open-webui/blob/main/backend/requirements.txt)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [backend/open_webui/env.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n</details>\n\n# Installation Guide\n\nOpen WebUI provides multiple installation methods to accommodate different use cases, from simple Docker deployments to development environments. This guide covers all supported installation approaches, configuration options, and environment variables required for a successful setup.\n\n## Prerequisites\n\n### System Requirements\n\n| Component | Minimum | Recommended |\n|-----------|---------|-------------|\n| Python | 3.11 | 3.11+ |\n| RAM | 4 GB | 8 GB+ |\n| Disk | 10 GB | 20 GB+ |\n| Docker | 20.10+ | Latest |\n| GPU | Optional | NVIDIA GPU with CUDA |\n\n### Required Dependencies\n\nThe backend requires the following core packages for basic operation:\n\n```\nfastapi==0.135.1\nuvicorn[standard]==0.41.0\npydantic==2.12.5\npython-multipart==0.0.22\nitsdangerous==2.2.0\npython-socketio==5.16.1\npython-jose==3.5.0\ncryptography\nsqlalchemy==2.0.48\naiosqlite==0.21.0\n```\n\n资料来源：[backend/requirements-min.txt:1-15](https://github.com/open-webui/open-webui/blob/main/backend/requirements-min.txt)\n\n## Installation Methods\n\n### Docker Installation (Recommended)\n\nDocker is the recommended installation method for production use. Open WebUI provides multiple official images with different configurations.\n\n#### Docker Image Variants\n\n| Tag | Description | Use Case |\n|-----|-------------|----------|\n| `main` | Base Open WebUI | Standard deployment |\n| `cuda` | With CUDA support | NVIDIA GPU acceleration |\n| `ollama` | Bundled with Ollama | Local model inference |\n| `dev` | Development build | Testing latest features |\n\n#### Basic Docker Installation\n\nFor connecting to Ollama on localhost:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  --add-host=host.docker.internal:host-gateway \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:main\n```\n\n资料来源：[README.md:42-47](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### NVIDIA GPU Support\n\nTo enable GPU acceleration:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  --gpus all \\\n  --add-host=host.docker.internal:host-gateway \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:cuda\n```\n\n资料来源：[README.md:53-59](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### Bundled Ollama Installation\n\nFor a streamlined setup with both Open WebUI and Ollama in a single container:\n\n**With GPU Support:**\n```bash\ndocker run -d -p 3000:8080 --gpus=all \\\n  -v ollama:/root/.ollama \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:ollama\n```\n\n**CPU Only:**\n```bash\ndocker run -d -p 3000:8080 \\\n  -v ollama:/root/.ollama \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:ollama\n```\n\n资料来源：[README.md:64-79](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### OpenAI API Only\n\nFor environments using only the OpenAI API:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -e OPENAI_API_KEY=your_secret_key \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:main\n```\n\n资料来源：[README.md:50-56](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### Remote Ollama Server\n\nTo connect to Ollama on a different server:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -e OLLAMA_BASE_URL=https://example.com \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:main\n```\n\n资料来源：[README.md:40-46](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n### Python pip Installation\n\nOpen WebUI can be installed directly via pip for environments without Docker.\n\n#### Requirements\n\n- Python 3.11 or higher\n- pip package manager\n\n#### Installation Steps\n\n1. Install Open WebUI package:\n```bash\npip install open-webui\n```\n\n2. Start the server:\n```bash\nopen-webui serve\n```\n\nThe server will be accessible at `http://localhost:8080`.\n\n资料来源：[README.md:12-25](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n### Development Installation\n\n#### Using the Dev Branch\n\n> [!WARNING]\n> The `:dev` branch contains unstable features. Use at your own risk.\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --add-host=host.docker.internal:host-gateway \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:dev\n```\n\n资料来源：[README.md:27-34](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Environment Configuration\n\n### Core Environment Variables\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `OLLAMA_BASE_URL` | Ollama server URL | `http://localhost:11434` |\n| `OPENAI_API_KEY` | OpenAI API key | - |\n| `WEBUI_SECRET_KEY` | Session encryption key | Auto-generated |\n| `WEBUI_SESSION_COOKIE_SECURE` | Secure cookie flag | `True` |\n| `WEBUI_SESSION_COOKIE_SAME_SITE` | Cookie SameSite policy | `Lax` |\n\n资料来源：[backend/open_webui/main.py:18-35](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Database Configuration\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `DATABASE_URL` | Database connection string | SQLite |\n| `ENABLE_DATABASE_ENCRYPTION` | Enable SQLite encryption | `False` |\n\n#### Supported Databases\n\n- **SQLite**: Default, requires no configuration\n- **PostgreSQL**: Set `DATABASE_URL` to PostgreSQL connection string\n- **Redis**: For session management and caching\n\n资料来源：[backend/open_webui/env.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n\n### Redis Configuration\n\n```bash\nREDIS_URL=redis://localhost:6379\nREDIS_KEY_PREFIX=open-webui\nREDIS_SENTINEL_HOSTS=host1:26379,host2:26379\nREDIS_SENTINEL_PORT=26379\n```\n\n资料来源：[backend/open_webui/main.py:15-18](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Security Configuration\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `ENABLE_SIGNUP_PASSWORD_CONFIRMATION` | Require password confirmation | `True` |\n| `WEBUI_AUTH_TRUSTED_EMAIL_HEADER` | Trusted email header for SSO | - |\n| `WEBUI_AUTH_SIGNOUT_REDIRECT_URL` | Signout redirect URL | - |\n\n资料来源：[backend/open_webui/main.py:36-38](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Audit Logging\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `ENABLE_AUDIT_GET_REQUESTS` | Log GET requests | `False` |\n| `AUDIT_INCLUDED_PATHS` | Paths to include | - |\n| `AUDIT_EXCLUDED_PATHS` | Paths to exclude | - |\n| `AUDIT_LOG_LEVEL` | Logging verbosity | `INFO` |\n\n资料来源：[backend/open_webui/env.py:12-15](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n\n### Observability\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `ENABLE_OTEL` | Enable OpenTelemetry | `False` |\n| `ENABLE_VERSION_UPDATE_CHECK` | Check for updates | `True` |\n\n资料来源：[backend/open_webui/main.py:48-51](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Data Persistence\n\n> [!IMPORTANT]\n> Always mount the volume `-v open-webui:/app/backend/data` to prevent database loss.\n\nThe data directory contains:\n- SQLite database file\n- Uploaded files\n- Configuration cache\n- User sessions (if Redis not used)\n\n```bash\n-v open-webui:/app/backend/data\n```\n\n资料来源：[README.md:19-22](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Offline Installation\n\nFor air-gapped environments, set the Hugging Face offline mode:\n\n```bash\nexport HF_HUB_OFFLINE=1\n```\n\n资料来源：[README.md:36-38](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Installation Architecture\n\n```mermaid\ngraph TD\n    A[User Request] --> B{Installation Method}\n    B -->|Docker| C[Official Docker Image]\n    B -->|pip| D[PyPI Package]\n    \n    C --> E{Configuration}\n    D --> E\n    \n    E -->|OLLAMA_BASE_URL| F[Ollama Server]\n    E -->|OPENAI_API_KEY| G[OpenAI API]\n    E -->|Database Config| H[(Database)]\n    \n    F --> I[Model Inference]\n    G --> J[API Processing]\n    \n    H --> K[Application State]\n    I --> L[Response]\n    J --> L\n    K --> L\n```\n\n## Docker Compose Installation\n\nFor production deployments, use Docker Compose with persistent storage:\n\n```yaml\nservices:\n  open-webui:\n    image: ghcr.io/open-webui/open-webui:main\n    ports:\n      - \"3000:8080\"\n    volumes:\n      - open-webui:/app/backend/data\n    environment:\n      - OLLAMA_BASE_URL=http://host.docker.internal:11434\n    extra_hosts:\n      - \"host.docker.internal:host-gateway\"\n    restart: unless-stopped\n\nvolumes:\n  open-webui:\n```\n\n## Troubleshooting\n\n### Common Issues\n\n| Issue | Solution |\n|-------|----------|\n| Connection refused to Ollama | Check `OLLAMA_BASE_URL` and ensure Ollama is running |\n| Database errors | Verify volume mount is correct |\n| GPU not detected | Ensure NVIDIA Container Toolkit is installed |\n| Port conflicts | Change host port mapping |\n\n### Verification\n\nAfter installation, verify the service is running:\n\n```bash\ncurl http://localhost:3000/api/v1/models\n```\n\nThe server should respond with available models from the configured backend.\n\n资料来源：[README.md:40-60](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Next Steps\n\nAfter successful installation:\n\n1. Access the web interface at `http://localhost:3000`\n2. Configure additional models and backends\n3. Set up user authentication and RBAC\n4. Configure retrieval and RAG pipelines\n5. Integrate additional tools and extensions\n\n---\n\n<a id='architecture-overview'></a>\n\n## Architecture Overview\n\n### 相关页面\n\n相关主题：[Data Models](#data-models), [API Routers](#api-routers), [Frontend Structure](#frontend-structure)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [backend/open_webui/socket/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/socket/main.py)\n- [src/routes/+layout.svelte](https://github.com/open-webui/open-webui/blob/main/src/routes/+layout.svelte)\n- [src/lib/apis/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/index.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/open_webui/env.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n</details>\n\n# Architecture Overview\n\nOpen WebUI is a self-hosted, extensible AI interface designed to provide a unified chat experience with various LLM backends. The architecture follows a modern full-stack pattern with a Python-based backend and a Svelte-based frontend, communicating via REST APIs and WebSocket connections.\n\n## System Architecture\n\nOpen WebUI employs a layered architecture that separates concerns between presentation, business logic, and data access:\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte/SvelteKit)\"]\n        UI[\"UI Components<br/>(+layout.svelte)\"]\n        Utils[\"Utilities<br/>(src/lib/utils)\"]\n        APIs[\"API Client<br/>(src/lib/apis)\"]\n        Const[\"Constants<br/>(src/lib/constants)\"]\n    end\n\n    subgraph Backend[\"Backend (Python/FastAPI)\"]\n        Main[\"Main Application<br/>(main.py)\"]\n        Socket[\"WebSocket Server<br/>(socket/main.py)\"]\n        Config[\"Configuration<br/>(config.py)\"]\n        Env[\"Environment<br/>(env.py)\"]\n        Middleware[\"Middleware<br/>(middleware.py)\"]\n        Retrieval[\"Retrieval System<br/>(retrieval/)\"]\n    end\n\n    subgraph External[\"External Services\"]\n        Ollama[\"Ollama API\"]\n        OpenAI[\"OpenAI API\"]\n        VectorDB[\"Vector Databases\"]\n        Redis[\"Redis Session Store\"]\n        DB[\"SQLite/PostgreSQL\"]\n    end\n\n    UI --> Utils\n    UI --> APIs\n    Utils --> Const\n    APIs --> Const\n    APIs --> Main\n    UI --> Socket\n    \n    Main --> Config\n    Main --> Env\n    Main --> Middleware\n    Main --> Retrieval\n    Main --> Socket\n    \n    Main --> Ollama\n    Main --> OpenAI\n    Main --> VectorDB\n    Main --> Redis\n    Main --> DB\n```\n\n## Directory Structure\n\nThe repository is organized into two main components:\n\n| Directory | Purpose |\n|-----------|---------|\n| `backend/` | Python/FastAPI backend application |\n| `src/` | Svelte/SvelteKit frontend application |\n\n### Backend Structure\n\n| Path | Description |\n|------|-------------|\n| `backend/open_webui/` | Main application package |\n| `backend/open_webui/main.py` | FastAPI application entry point |\n| `backend/open_webui/socket/main.py` | Socket.IO WebSocket handler |\n| `backend/open_webui/config.py` | Persistent configuration system |\n| `backend/open_webui/env.py` | Environment variable loading |\n| `backend/open_webui/utils/middleware.py` | Response processing middleware |\n| `backend/open_webui/retrieval/` | RAG and document retrieval system |\n\n### Frontend Structure\n\n| Path | Description |\n|------|-------------|\n| `src/routes/` | SvelteKit routes and page components |\n| `src/lib/` | Shared libraries and utilities |\n| `src/lib/apis/` | API client implementations |\n| `src/lib/utils/` | Utility functions |\n| `src/lib/constants.ts` | Application constants and configuration |\n\n## API Architecture\n\n### API Endpoint Structure\n\nOpen WebUI exposes multiple API bases for different services:\n\n```mermaid\ngraph LR\n    subgraph Gateway[\"API Gateway\"]\n        Base[\"/\"]\n    end\n    \n    subgraph Services[\"Service Endpoints\"]\n        API[\"/api/v1<br/>REST API\"]\n        Ollama[\"/ollama<br/>Ollama Proxy\"]\n        OpenAI[\"/openai<br/>OpenAI Proxy\"]\n        Audio[\"/api/v1/audio<br/>Audio Processing\"]\n        Images[\"/api/v1/images<br/>Image Processing\"]\n        Retrieval[\"/api/v1/retrieval<br/>RAG Retrieval\"]\n    end\n    \n    Base --> API\n    Base --> Ollama\n    Base --> OpenAI\n    Base --> Audio\n    Base --> Images\n    Base --> Retrieval\n```\n\n### API Constants Configuration\n\nAPI base URLs are defined in `src/lib/constants.ts`:\n\n| Constant | Default Value | Purpose |\n|----------|--------------|---------|\n| `WEBUI_BASE_URL` | Dynamic (dev/prod) | Base application URL |\n| `WEBUI_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1` | Main REST API |\n| `OLLAMA_API_BASE_URL` | `${WEBUI_BASE_URL}/ollama` | Ollama API proxy |\n| `OPENAI_API_BASE_URL` | `${WEBUI_BASE_URL}/openai` | OpenAI API proxy |\n| `AUDIO_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1/audio` | Audio processing |\n| `IMAGES_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1/images` | Image generation |\n| `RETRIEVAL_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1/retrieval` | RAG retrieval |\n\n资料来源：[src/lib/constants.ts:1-15]()\n\n### API Client Pattern\n\nThe frontend uses a consistent API client pattern implemented in `src/lib/apis/`:\n\n```typescript\n// Pattern used across all API clients\nconst res = await fetch(`${WEBUI_API_BASE_URL}/endpoint`, {\n    method: 'METHOD',\n    headers: {\n        Accept: 'application/json',\n        'Content-Type': 'application/json',\n        authorization: `Bearer ${token}`\n    },\n    body: JSON.stringify({ /* payload */ })\n})\n    .then(async (res) => {\n        if (!res.ok) throw await res.json();\n        return res.json();\n    });\n```\n\n资料来源：[src/lib/apis/knowledge/index.ts:1-35]()\n\n## Configuration System\n\n### Environment Setup\n\nThe backend loads configuration from environment variables and `.env` files using the following hierarchy defined in `backend/open_webui/env.py`:\n\n| Variable | Description |\n|----------|-------------|\n| `OPEN_WEBUI_DIR` | Application directory (location of `env.py`) |\n| `BACKEND_DIR` | Parent of `open_webui/` |\n| `BASE_DIR` | Repository root |\n| `DOCKER` | Docker environment flag |\n| `USE_CUDA_DOCKER` | CUDA/GPU acceleration flag |\n\n资料来源：[backend/open_webui/env.py:1-45]()\n\n### Persistent Configuration\n\nConfiguration values are stored persistently using the `PersistentConfig` system:\n\n```python\nENABLE_CODE_EXECUTION = PersistentConfig(\n    'ENABLE_CODE_EXECUTION',\n    'code_execution.enable',\n    os.environ.get('ENABLE_CODE_EXECUTION', 'True').lower() == 'true',\n)\n\nCODE_EXECUTION_ENGINE = PersistentConfig(\n    'CODE_EXECUTION_ENGINE',\n    'code_execution.engine',\n    os.environ.get('CODE_EXECUTION_ENGINE', 'pyodide'),\n)\n```\n\n资料来源：[backend/open_webui/config.py:1-50]()\n\n### Supported File Types\n\nThe application supports various file upload types:\n\n| Category | MIME Types |\n|----------|-----------|\n| Documents | `application/pdf`, `application/epub+zip`, `application/vnd.openxmlformats-officedocument.wordprocessingml.document` |\n| Text | `text/plain`, `text/csv`, `text/xml`, `text/html`, `text/x-python`, `text/css`, `text/markdown` |\n| Code | `text/x-python`, `text/css`, `application/x-javascript` |\n| Media | `audio/mpeg`, `audio/wav` |\n| Other | `application/octet-stream` |\n\n资料来源：[src/lib/constants.ts:20-35]()\n\n## WebSocket Communication\n\nReal-time communication uses Socket.IO for bidirectional messaging:\n\n```mermaid\nsequenceDiagram\n    participant Client as Frontend\n    participant Socket as Socket.IO Server\n    participant Main as Main Application\n    \n    Client->>Socket: Connect with auth token\n    Socket->>Main: Validate session\n    Main->>Socket: Session valid\n    Socket->>Client: Connection established\n    \n    Client->>Socket: Send message event\n    Socket->>Main: Forward message\n    Main->>Main: Process with LLM\n    Main->>Socket: Stream response\n    Socket->>Client: Stream chunks\n    \n    Client->>Socket: Disconnect\n    Socket->>Client: Connection closed\n```\n\n资料来源：[backend/open_webui/socket/main.py]()\n\n## Middleware Pipeline\n\nThe middleware system processes responses and transforms content for the frontend. The `build_output()` function in `backend/open_webui/utils/middleware.py` handles special content types:\n\n### Content Type Processing\n\n| Content Type | Rendering | Description |\n|-------------|-----------|-------------|\n| `reasoning` | `<details>` with thought process | Model reasoning display |\n| `code_interpreter` | `<details>` with code block | Code execution results |\n\n```python\nif item_type == 'open_webui:code_interpreter':\n    # Code interpreter needs to inspect/mutate prior accumulated content\n    content = '\\n'.join(parts)\n    content_stripped, original_whitespace = split_content_and_whitespace(content)\n    # ... processing logic\n    parts.append(\n        f'<details type=\"code_interpreter\" done=\"true\" duration=\"{duration or 0}\">\\n<summary>Analyzed</summary>\\n{display}\\n</details>'\n    )\n```\n\n资料来源：[backend/open_webui/utils/middleware.py:1-80]()\n\n### Deep Merge Utility\n\nThe middleware also provides a `deep_merge()` function for combining configuration:\n\n| Behavior | Description |\n|----------|-------------|\n| Dicts | Recursive merge |\n| Strings | Concatenation |\n| Others | Overwrite |\n\n资料来源：[backend/open_webui/utils/middleware.py:75-85]()\n\n## Frontend Application Structure\n\n### Layout System\n\nThe main layout is defined in `src/routes/+layout.svelte` which serves as the root component:\n\n```mermaid\ngraph TD\n    Layout[\"+layout.svelte<br/>Root Layout\"]\n    Splash[\"Splash Screen<br/>(#splash-screen)\"]\n    Progress[\"Progress Bar<br/>(#progress-bar)\"]\n    Logo[\"Logo Elements<br/>(#logo, #logo-her)\"]\n    Theme[\"Theme Detection<br/>(.dark, .her)\"]\n    \n    Layout --> Splash\n    Layout --> Progress\n    Layout --> Logo\n    Layout --> Theme\n```\n\n资料来源：[src/app.html:1-60]()\n\n### Utility Libraries\n\n| Library | Purpose |\n|---------|---------|\n| `src/lib/utils/index.ts` | Content processing, sanitization, Chinese language handling |\n| `src/lib/utils/codeHighlight.ts` | Code syntax highlighting with Shiki |\n| `src/lib/apis/index.ts` | API client exports |\n\n### Content Processing Pipeline\n\nThe `processResponseContent()` function handles special content transformations:\n\n```typescript\nexport const processResponseContent = (content: string) => {\n    content = processChineseContent(content);\n    return content.trim();\n};\n\nexport const sanitizeResponseContent = (content: string) => {\n    return content\n        .replace(/<\\|[a-z]*$/, '')\n        .replace(/<\\|[a-z]+\\|$/, '')\n        .replace(/<$/, '')\n        .replaceAll('<', '&lt;')\n        .replaceAll('>', '&gt;')\n        .replaceAll(/<\\|[a-z]+\\|>/g, ' ')\n        .trim();\n};\n```\n\n资料来源：[src/lib/utils/index.ts:1-50]()\n\n## Retrieval System\n\nThe RAG (Retrieval-Augmented Generation) system supports multiple document loaders and search engines:\n\n### Supported Document Sources\n\n| Source | Configuration |\n|--------|--------------|\n| External Document Loader | `EXTERNAL_DOCUMENT_LOADER_URL`, `EXTERNAL_DOCUMENT_LOADER_API_KEY` |\n| Apache TIKA | `TIKA_SERVER_URL` |\n| Docling | `DOCLING_SERVER_URL`, `DOCLING_API_KEY`, `DOCLING_PARAMS` |\n| Mistral OCR | `MISTRAL_OCR_API_BASE_URL`, `MISTRAL_OCR_API_KEY` |\n| PaddleOCR VL | `PADDLEOCR_VL_BASE_URL`, `PADDLEOCR_VL_TOKEN` |\n| MinerU | `MINERU_API_URL`, `MINERU_API_KEY`, `MINERU_PARAMS` |\n\n### Supported Search Providers\n\n| Provider | Notes |\n|----------|-------|\n| SearXNG | Self-hosted metasearch |\n| Google PSE | Programmable Search Engine |\n| Brave Search | Privacy-focused search |\n| Ollama Cloud | LLM provider search |\n| Azure AI Search | Enterprise search |\n\n资料来源：[backend/open_webui/retrieval/utils.py:1-60]()\n\n## Code Execution Engine\n\nOpen WebUI supports code execution with configurable backends:\n\n### Configuration Options\n\n| Setting | Default | Description |\n|---------|---------|-------------|\n| `ENABLE_CODE_EXECUTION` | `True` | Enable/disable code execution |\n| `CODE_EXECUTION_ENGINE` | `pyodide` | Execution engine (pyodide/jupyter) |\n| `CODE_EXECUTION_JUPYTER_URL` | `''` | Jupyter server URL |\n| `CODE_EXECUTION_JUPYTER_AUTH` | `''` | Jupyter authentication |\n| `CODE_EXECUTION_JUPYTER_AUTH_TOKEN` | `''` | Jupyter auth token |\n\n### Execution Environments\n\n| Engine | Environment | Constraints |\n|--------|-------------|-------------|\n| Pyodide | Browser-based | Cannot install packages, `pip install` unavailable |\n| Jupyter | External server | Requires URL and optional authentication |\n\n资料来源：[backend/open_webui/config.py:50-100]()\n\n## Technology Stack\n\n### Backend Dependencies\n\nKey packages from `backend/requirements-min.txt`:\n\n| Package | Version | Purpose |\n|---------|---------|---------|\n| `fastapi` | 0.135.1 | Web framework |\n| `uvicorn[standard]` | 0.41.0 | ASGI server |\n| `pydantic` | 2.12.5 | Data validation |\n| `python-multipart` | 0.0.22 | Form parsing |\n| `python-socketio` | 5.16.1 | WebSocket support |\n| `sqlalchemy` | 2.0.48 | ORM |\n| `aiosqlite` | 0.21.0 | Async SQLite |\n| `psycopg[binary]` | 3.2.9 | PostgreSQL driver |\n| `httpx[socks,http2,zstd,cli,brotli]` | 0.28.1 | HTTP client |\n| `redis` | latest | Session storage |\n| `pycrdt` | 0.12.47 | CRDT for collaboration |\n| ` RestrictedPython` | 8.1 | Safe Python execution |\n\n资料来源：[backend/requirements-min.txt:1-40]()\n\n### Frontend Architecture\n\n| Technology | Purpose |\n|------------|---------|\n| SvelteKit | Frontend framework |\n| TypeScript | Type safety |\n| Shiki | Code syntax highlighting |\n\n## Security Considerations\n\n### Authentication Flow\n\nThe system uses Bearer token authentication for API requests:\n\n```typescript\nheaders: {\n    authorization: `Bearer ${token}`\n}\n```\n\n### Role-Based Access Control (RBAC)\n\nOpen WebUI implements RBAC for:\n- Ollama endpoint access\n- Model creation/pulling rights\n- Knowledge base permissions\n\n资料来源：[README.md]()\n\n## Deployment Modes\n\n### Docker Deployment\n\n```bash\ndocker run -d -p 3000:8080 \\\n    -v open-webui:/app/backend/data \\\n    --name open-webui \\\n    --add-host=host.docker.internal:host-gateway \\\n    --restart always \\\n    ghcr.io/open-webui/open-webui:latest\n```\n\n### Python pip Installation\n\n```bash\npip install open-webui\nopen-webui serve\n```\n\n### Environment Variables\n\n| Variable | Values | Description |\n|----------|--------|-------------|\n| `DOCKER` | `True`/`False` | Docker environment detection |\n| `USE_CUDA_DOCKER` | `true`/`false` | GPU acceleration |\n| `HF_HUB_OFFLINE` | `1` | Offline mode (prevent downloads) |\n\n资料来源：[README.md](), [backend/open_webui/env.py:30-40]()\n\n---\n\n<a id='data-models'></a>\n\n## Data Models\n\n### 相关页面\n\n相关主题：[Architecture Overview](#architecture-overview), [API Routers](#api-routers)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/models/users.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/users.py)\n- [backend/open_webui/models/chats.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/chats.py)\n- [backend/open_webui/models/messages.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/messages.py)\n- [backend/open_webui/models/knowledge.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/knowledge.py)\n- [backend/open_webui/models/files.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/files.py)\n- [backend/open_webui/internal/db.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/internal/db.py)\n- [backend/open_webui/migrations/versions/7e5b5dc7342b_init.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/migrations/versions/7e5b5dc7342b_init.py)\n</details>\n\n# Data Models\n\n## Overview\n\nThe Open WebUI project implements a comprehensive data modeling layer that manages persistent storage for all core application entities. The data models are built using SQLAlchemy ORM and follow a structured approach to storing user interactions, configurations, and content within the application.\n\nThe data model architecture serves as the foundation for:\n\n- **User Management**: Authentication, authorization, and user preferences\n- **Chat Persistence**: Message history and conversation state\n- **Knowledge Bases**: RAG (Retrieval-Augmented Generation) document storage\n- **File Management**: Document uploads and attachments\n- **Access Control**: Permission management through groups and grants\n\n资料来源：[backend/open_webui/internal/db.py:1-50]()\n\n## Architecture Overview\n\nOpen WebUI uses a layered data access architecture where models are defined as SQLAlchemy ORM classes and accessed through service layers.\n\n```mermaid\ngraph TD\n    A[API Routers] --> B[Service Layer]\n    B --> C[Data Models]\n    C --> D[SQLAlchemy ORM]\n    D --> E[(SQLite Database)]\n    \n    F[ChatMessages Table] --> C\n    G[Chats Table] --> C\n    H[Users Table] --> C\n    I[Knowledge Table] --> C\n    J[Files Table] --> C\n```\n\n## Core Data Models\n\n### User Model\n\nThe User model manages user accounts, authentication, and preferences.\n\n```python\nclass UserModel(BaseModel):\n    id: str\n    name: str\n    email: Optional[str]\n    role: str  # admin, user, guest\n    email_verified: bool\n    created_at: datetime\n    updated_at: datetime\n    settings: dict\n    keys: list\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Unique user identifier (UUID) |\n| `name` | String | Display name |\n| `email` | String (nullable) | User email address |\n| `role` | Enum | User role: `admin`, `user`, `guest` |\n| `email_verified` | Boolean | Email verification status |\n| `created_at` | DateTime | Account creation timestamp |\n| `updated_at` | DateTime | Last modification timestamp |\n| `settings` | JSON | User preferences and configurations |\n\n资料来源：[backend/open_webui/models/users.py:1-100]()\n\n### Chat Model\n\nThe Chat model stores conversation sessions and their associated metadata.\n\n```mermaid\ngraph LR\n    A[User] -->|has many| B[Chats]\n    B -->|contains| C[Messages]\n    B -->|references| D[ChatMessages Table]\n    D -->|links to| E[Messages JSON]\n```\n\nThe Chat model structure:\n\n```python\nclass ChatModel(BaseModel):\n    id: str\n    user_id: str\n    title: str\n    chat: dict  # Contains history, messages, metadata\n    created_at: datetime\n    updated_at: datetime\n    share_id: Optional[str]\n    archived: bool\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Unique chat identifier |\n| `user_id` | String | Owner user ID |\n| `title` | String | Chat title |\n| `chat` | JSON | Full chat history and state |\n| `share_id` | String (nullable) | Public sharing identifier |\n| `archived` | Boolean | Archive status |\n\nThe `chat` field contains a nested JSON structure:\n\n```json\n{\n  \"history\": {\n    \"messages\": {\n      \"message_id\": {\n        \"id\": \"...\",\n        \"type\": \"human|ai|system\",\n        \"content\": \"...\",\n        \"created_at\": \"...\"\n      }\n    }\n  },\n  \"metadata\": {}\n}\n```\n\n资料来源：[backend/open_webui/models/chats.py:1-150]()\n\n### Message Model\n\nThe Message model represents individual messages within a chat conversation.\n\n```mermaid\ngraph TD\n    A[Message] --> B[type]\n    A --> C[content]\n    A --> D[role]\n    A --> E[timestamp]\n    \n    B --> F[human|ai|system|tool]\n    C --> G[text|images|files]\n```\n\n```python\nclass MessageModel(BaseModel):\n    id: str\n    chat_id: str\n    message_id: str\n    type: str  # human, ai, system, tool\n    role: str\n    content: str\n    files: list\n    images: list\n    created_at: datetime\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Unique message ID |\n| `chat_id` | String | Parent chat ID |\n| `message_id` | String | Message identifier within chat |\n| `type` | Enum | Message type |\n| `role` | String | Role: `user`, `assistant`, `system`, `tool` |\n| `content` | String | Message content |\n| `files` | List | Attached file references |\n| `images` | List | Embedded image data |\n\n资料来源：[backend/open_webui/models/messages.py:1-100]()\n\n### Knowledge Model\n\nThe Knowledge model manages RAG knowledge bases for document retrieval.\n\n```python\nclass KnowledgeModel(BaseModel):\n    id: str\n    user_id: str\n    name: str\n    description: str\n    created_at: datetime\n    updated_at: datetime\n    data: dict  # Contains documents and vectors\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Knowledge base ID |\n| `user_id` | String | Owner user ID |\n| `name` | String | Knowledge base name |\n| `description` | String | Knowledge base description |\n| `data` | JSON | Documents and vector embeddings |\n\n资料来源：[backend/open_webui/models/knowledge.py:1-100]()\n\n### File Model\n\nThe File model handles file uploads and attachments.\n\n```python\nclass FileModel(BaseModel):\n    id: str\n    user_id: str\n    filename: str\n    path: str\n    type: str\n    size: int\n    created_at: datetime\n    data: dict  # Metadata\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | File identifier |\n| `user_id` | String | Owner user ID |\n| `filename` | String | Original filename |\n| `path` | String | Storage path |\n| `type` | String | MIME type |\n| `size` | Integer | File size in bytes |\n| `data` | JSON | Additional metadata |\n\n资料来源：[backend/open_webui/models/files.py:1-100]()\n\n## Database Schema\n\n### Entity Relationship Diagram\n\n```mermaid\nerDiagram\n    USERS ||--o{ CHATS : \"owns\"\n    USERS ||--o{ FILES : \"owns\"\n    USERS ||--o{ KNOWLEDGE : \"owns\"\n    USERS ||--o{ MESSAGES : \"sends\"\n    \n    CHATS ||--o{ CHAT_MESSAGES : \"contains\"\n    CHAT_MESSAGES ||--|| MESSAGES : \"references\"\n    \n    KNOWLEDGE ||--o{ DOCUMENTS : \"contains\"\n    \n    USERS ||--o{ GROUPS : \"belongs to\"\n    GROUPS ||--o{ ACCESS_GRANTS : \"grants\"\n    \n    CHATS ||--o| SHARES : \"can be shared\"\n```\n\n### Database Tables\n\n| Table Name | Primary Key | Description |\n|------------|-------------|-------------|\n| `users` | `id` | User accounts and settings |\n| `chats` | `id` | Chat session storage |\n| `chat_messages` | `id, chat_id, message_id` | Normalized message storage |\n| `messages` | `id` | Message content (embedded in chats) |\n| `knowledge` | `id` | Knowledge base definitions |\n| `documents` | `id` | Knowledge base documents |\n| `files` | `id` | File metadata |\n| `folders` | `id` | Folder organization |\n| `groups` | `id` | User groups |\n| `access_grants` | `id` | Permission grants |\n| `memories` | `id` | User memory storage |\n| `channels` | `id` | Communication channels |\n| `notes` | `id` | User notes |\n\n资料来源：[backend/open_webui/migrations/versions/7e5b5dc7342b_init.py:1-500]()\n\n## Access Control Models\n\n### User Groups\n\n```python\nclass GroupModel(BaseModel):\n    id: str\n    name: str\n    description: str\n    created_at: datetime\n    user_id: str  # Creator/owner\n```\n\n### Access Grants\n\n```mermaid\ngraph TD\n    A[User] -->|belongs to| B[Groups]\n    B -->|grants| C[Access Grants]\n    C -->|applies to| D[Resource]\n    \n    D --> E[Model]\n    D --> F[Knowledge]\n    D --> G[Tool]\n    D --> H[Function]\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Grant identifier |\n| `user_id` | String | User receiving access |\n| `group_id` | String | Group granting access |\n| `resource_type` | Enum | Type: `model`, `knowledge`, `tool`, `function` |\n| `resource_id` | String | Target resource ID |\n| `permission` | String | Permission level: `read`, `write`, `admin` |\n\n资料来源：[backend/open_webui/utils/access_control/__init__.py:1-50]()\n\n## Service Layer Integration\n\n### Chat Service Pattern\n\nThe Chat model provides methods for message management:\n\n```python\nasync def get_messages_map_by_chat_id(id: str) -> dict:\n    \"\"\"Get message map for walking history.\"\"\"\n    \nasync def get_message_by_id_and_message_id(\n    id: str, \n    message_id: str\n) -> Optional[dict]:\n    \"\"\"Retrieve specific message from chat.\"\"\"\n    \nasync def upsert_message_to_chat_by_id_and_message_id(\n    id: str, \n    message_id: str, \n    message: dict\n) -> Optional[ChatModel]:\n    \"\"\"Update or insert message in chat.\"\"\"\n```\n\n### Message Sanitization\n\nBefore database operations, message content is sanitized to prevent issues:\n\n```python\ndef sanitize_text_for_db(text: str) -> str:\n    \"\"\"Remove null characters and invalid sequences.\"\"\"\n```\n\nThis ensures database compatibility and prevents JSON parsing errors when loading chat history.\n\n资料来源：[backend/open_webui/models/chats.py:100-180]()\n\n## Model Operations\n\n### CRUD Operations\n\n| Operation | Method | Description |\n|-----------|--------|-------------|\n| Create | `Model.create()` | Insert new record |\n| Read | `Model.get()` | Retrieve by ID |\n| Update | `Model.update()` | Modify existing record |\n| Delete | `Model.delete()` | Remove record |\n| List | `Model.get_all()` | Retrieve all records |\n| Filter | `Model.filter_by()` | Query with conditions |\n\n### Async Database Access\n\nOpen WebUI uses async database operations for improved performance:\n\n```python\nasync def get_chat_by_id(id: str) -> Optional[ChatModel]:\n    \"\"\"Async retrieval of chat by ID.\"\"\"\n    \nasync def upsert_message_to_chat_by_id_and_message_id(\n    id: str, \n    message_id: str, \n    message: dict\n) -> Optional[ChatModel]:\n    \"\"\"Async upsert operation.\"\"\"\n```\n\n## Data Storage Locations\n\n### Database File\n\nBy default, Open WebUI uses SQLite stored at:\n\n```\nbackend/data/webui.db\n```\n\n### File Storage\n\nUploaded files are stored in:\n\n```\nbackend/data/uploads/\n```\n\n### Configuration\n\nDatabase and storage paths are configured via environment variables:\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `DATA_DIR` | `backend/data` | Base data directory |\n| `DATABASE_URL` | `sqlite:///data/webui.db` | Database connection string |\n\n资料来源：[backend/open_webui/env.py:1-80]()\n\n## Migration System\n\nOpen WebUI uses Alembic for database migrations:\n\n```mermaid\ngraph LR\n    A[Migration Scripts] --> B[Alembic]\n    B --> C[Database Schema]\n    C --> D[Model Definitions]\n    D --> E[Application]\n```\n\nMigration files are located in:\n\n```\nbackend/open_webui/migrations/versions/\n```\n\n资料来源：[backend/open_webui/migrations/versions/7e5b5dc7342b_init.py:1-500]()\n\n## Summary\n\nThe Open WebUI data model layer provides a robust foundation for:\n\n1. **User Management**: Complete user lifecycle including authentication and authorization\n2. **Chat Persistence**: Flexible JSON-based chat storage with normalized message tables\n3. **Knowledge Management**: RAG-capable knowledge bases for document retrieval\n4. **File Handling**: Secure file upload and storage with metadata tracking\n5. **Access Control**: Fine-grained permissions through groups and resource grants\n\nThe architecture prioritizes:\n- **Performance**: Async database operations and message normalization\n- **Flexibility**: JSON-based storage for variable content structures\n- **Security**: Text sanitization and access control enforcement\n- **Extensibility**: Modular model design for future features\n\n---\n\n<a id='api-routers'></a>\n\n## API Routers\n\n### 相关页面\n\n相关主题：[Architecture Overview](#architecture-overview), [Data Models](#data-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [backend/open_webui/routers/pipelines.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/pipelines.py)\n- [backend/open_webui/routers/tasks.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/tasks.py)\n- [backend/open_webui/routers/prompts.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/prompts.py)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n- [backend/open_webui/utils/asgi_middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/asgi_middleware.py)\n</details>\n\n# API Routers\n\n## Overview\n\nThe Open WebUI project implements a comprehensive API routing architecture built on **FastAPI**. API Routers serve as the primary mechanism for organizing and exposing RESTful endpoints across the application. Each router encapsulates a specific functional domain (e.g., authentication, chat management, file handling, knowledge bases) and is mounted at a defined prefix under the `/api/v1/` base path.\n\nThe router architecture follows a modular design pattern where related endpoints are grouped into dedicated router modules located in `backend/open_webui/routers/`. This separation of concerns enables maintainability, testability, and clear API boundaries.\n\n资料来源：[backend/open_webui/main.py:1-60]()\n\n## Router Registration Architecture\n\n### Central Router Assembly\n\nAll routers are registered in `backend/open_webui/main.py` using FastAPI's `include_router()` method. Each router receives a unique URL prefix and OpenAPI tag for documentation and routing purposes.\n\n```python\napp.include_router(auths.router, prefix='/api/v1/auths', tags=['auths'])\napp.include_router(users.router, prefix='/api/v1/users', tags=['users'])\napp.include_router(chats.router, prefix='/api/v1/chats', tags=['chats'])\napp.include_router(models.router, prefix='/api/v1/models', tags=['models'])\napp.include_router(knowledge.router, prefix='/api/v1/knowledge', tags=['knowledge'])\napp.include_router(files.router, prefix='/api/v1/files', tags=['files'])\n```\n\n资料来源：[backend/open_webui/main.py:35-55]()\n\n### Router Prefix Mapping\n\n| Functional Domain | Router Module | API Prefix | OpenAPI Tag |\n|-------------------|---------------|------------|-------------|\n| Authentication | `auths` | `/api/v1/auths` | `auths` |\n| User Management | `users` | `/api/v1/users` | `users` |\n| Chat Operations | `chats` | `/api/v1/chats` | `chats` |\n| Model Management | `models` | `/api/v1/models` | `models` |\n| Knowledge Bases | `knowledge` | `/api/v1/knowledge` | `knowledge` |\n| File Handling | `files` | `/api/v1/files` | `files` |\n| Prompts | `prompts` | `/api/v1/prompts` | `prompts` |\n| Tools | `tools` | `/api/v1/tools` | `tools` |\n| Skills | `skills` | `/api/v1/skills` | `skills` |\n| Memories | `memories` | `/api/v1/memories` | `memories` |\n| Folders | `folders` | `/api/v1/folders` | `folders` |\n| Groups | `groups` | `/api/v1/groups` | `groups` |\n| Functions | `functions` | `/api/v1/functions` | `functions` |\n| Evaluations | `evaluations` | `/api/v1/evaluations` | `evaluations` |\n| Audio Processing | `audio` | `/api/v1/audio` | `audio` |\n| Image Processing | `images` | `/api/v1/images` | `images` |\n| Retrieval | `retrieval` | `/api/v1/retrieval` | `retrieval` |\n| Configurations | `configs` | `/api/v1/configs` | `configs` |\n| Channels | `channels` | `/api/v1/channels` | `channels` |\n| Notes | `notes` | `/api/v1/notes` | `notes` |\n| Tasks | `tasks` | `/api/v1/tasks` | `tasks` |\n| Utils | `utils` | `/api/v1/utils` | `utils` |\n| Terminals | `terminals` | `/api/v1/terminals` | `terminals` |\n| Automations | `automations` | `/api/v1/automations` | `automations` |\n| Calendars | `calendar` | `/api/v1/calendars` | `calendars` |\n| SCIM Identity | `scim` | `/api/v1/scim/v2` | `scim` |\n| Analytics | `analytics` | `/api/v1/analytics` | `analytics` |\n\n资料来源：[backend/open_webui/main.py:35-65]()\n\n## Request Flow and Middleware Pipeline\n\n### Middleware Stack\n\nThe API request lifecycle involves multiple middleware layers that process requests before they reach individual route handlers.\n\n```mermaid\ngraph TD\n    A[HTTP Request] --> B[ASGI Middleware]\n    B --> C[Authentication Middleware]\n    C --> D[Token Extraction<br/>API Key/Cookie/Bearer]\n    D --> E[Audit Logging Middleware<br/>Conditional]\n    E --> F[Pipeline Inlet Filter]\n    F --> G[Route Handler]\n    G --> H[Pipeline Outlet Filter]\n    H --> I[Response]\n```\n\n资料来源：[backend/open_webui/utils/asgi_middleware.py:1-30]()\n\n### Authentication Middleware\n\nThe ASGI middleware (`asgi_middleware.py`) handles credential extraction from multiple sources:\n\n1. **Bearer Token**: Extracted from `Authorization` header\n2. **Cookie Token**: Retrieved from `token` cookie\n3. **API Key**: Retrieved from custom header specified by `CUSTOM_API_KEY_HEADER` environment variable\n\nThe extracted credentials are stored in `request.state.token` for downstream route handlers.\n\n资料来源：[backend/open_webui/utils/asgi_middleware.py:12-40]()\n\n### Pipeline Filter System\n\nThe `pipelines.py` module implements a filter system that allows middleware-like processing at the inlet and outlet of request handling. This enables transformation and validation of payloads through user-defined pipeline stages.\n\n```python\ndef get_sorted_filters(model_id, models):\n    filters = [\n        model\n        for model in models.values()\n        if 'pipeline' in model\n        and 'type' in model['pipeline']\n        and model['pipeline']['type'] == 'filter'\n        and (\n            model['pipeline']['pipelines'] == ['*']\n            or any(model_id == target_model_id for target_model_id in model['pipeline']['pipelines'])\n        )\n    ]\n    sorted_filters = sorted(filters, key=lambda x: x['pipeline']['priority'])\n    return sorted_filters\n```\n\n资料来源：[backend/open_webui/routers/pipelines.py:30-45]()\n\n## Router Module Structure\n\n### Standard Router Pattern\n\nEach router module follows a consistent pattern:\n\n```python\nfrom fastapi import APIRouter, Depends, HTTPException, Request, status\nfrom pydantic import BaseModel\nfrom typing import Optional\nfrom open_webui.utils.auth import get_verified_user, get_admin_user\n\nrouter = APIRouter()\n\nclass EndpointForm(BaseModel):\n    # Request payload schema\n\n@router.post('/endpoint')\nasync def endpoint_handler(\n    request: Request,\n    form_data: EndpointForm,\n    user=Depends(get_verified_user)\n):\n    # Handler implementation\n```\n\n资料来源：[backend/open_webui/routers/prompts.py:1-30]()\n\n### Authentication Dependencies\n\n| Dependency | Purpose | Access Level |\n|------------|---------|--------------|\n| `get_verified_user` | Validates authenticated user | Authenticated users |\n| `get_admin_user` | Validates admin privileges | Admin only |\n\n资料来源：[backend/open_webui/routers/prompts.py:25-30]()\n\n## Core Router Modules\n\n### Tasks Router\n\nThe tasks router (`tasks.py`) handles asynchronous operations for chat-related tasks including title generation, follow-up generation, query generation, and image prompt generation.\n\n**Task Types Available:**\n\n| Task | Purpose | Template Function |\n|------|---------|-------------------|\n| Title Generation | Create chat titles | `title_generation_template()` |\n| Follow-up Generation | Generate follow-up questions | `follow_up_generation_template()` |\n| Query Generation | Create search queries | `query_generation_template()` |\n| Image Prompt Generation | Generate image prompts | `image_prompt_generation_template()` |\n| Autocomplete | Autocomplete suggestions | `autocomplete_generation_template()` |\n| Tags Generation | Generate content tags | `tags_generation_template()` |\n| Emoji Generation | Generate emoji suggestions | `emoji_generation_template()` |\n| MoA Response | Mixture of Agents response | `moa_response_generation_template()` |\n\n资料来源：[backend/open_webui/routers/tasks.py:1-40]()\n\n### Prompts Router\n\nThe prompts router manages user-defined prompt templates with command-based activation. It implements access control based on user roles and resource grants.\n\n**Access Control Logic:**\n\n```python\nwrite_access=(\n    (user.role == 'admin' and BYPASS_ADMIN_ACCESS_CONTROL)\n    or user.id == prompt.user_id\n    or await AccessGrants.has_access(\n        user_id=user.id,\n        resource_type='prompt',\n        resource_id=prompt.id,\n        permission='write',\n        db=db,\n    )\n)\n```\n\n资料来源：[backend/open_webui/routers/prompts.py:50-70]()\n\n## Conditional Router Loading\n\nSome routers are conditionally loaded based on configuration flags:\n\n### SCIM Router\n\nThe SCIM 2.0 router for identity management is enabled via the `ENABLE_SCIM` environment variable:\n\n```python\nif ENABLE_SCIM:\n    app.include_router(scim.router, prefix='/api/v1/scim/v2', tags=['scim'])\n```\n\n### Analytics Router\n\nThe analytics router is loaded when admin analytics are enabled:\n\n```python\nif ENABLE_ADMIN_ANALYTICS:\n    app.include_router(analytics.router, prefix='/api/v1/analytics', tags=['analytics'])\n```\n\n### Audit Logging Middleware\n\nAudit logging is conditionally applied based on the `AUDIT_LOG_LEVEL` configuration:\n\n```python\ntry:\n    audit_level = AuditLevel(AUDIT_LOG_LEVEL)\nexcept ValueError as e:\n    logger.error(f'Invalid audit level: {AUDIT_LOG_LEVEL}. Error: {e}')\n    audit_level = AuditLevel.NONE\n\nif audit_level != AuditLevel.NONE:\n    app.add_middleware(\n        AuditLoggingMiddleware,\n        audit_level=audit_level,\n        excluded_paths=AUDIT_EXCLUDED_PATHS,\n    )\n```\n\n资料来源：[backend/open_webui/main.py:55-70]()\n\n## Utility Functions and Helpers\n\n### Middleware Utility Imports\n\nThe `middleware.py` module aggregates utility functions from multiple sources for use by route handlers:\n\n```python\nfrom open_webui.utils.chat import generate_chat_completion\nfrom open_webui.utils.task import get_task_model_id, rag_template\nfrom open_webui.utils.tools import get_tools, get_terminal_tools\nfrom open_webui.utils.misc import (\n    deep_update, extract_urls, get_message_list,\n    add_or_update_system_message, merge_system_messages\n)\nfrom open_webui.utils.files import (\n    convert_markdown_base64_images,\n    get_file_url_from_base64,\n    get_image_base64_from_url,\n)\n```\n\n资料来源：[backend/open_webui/utils/middleware.py:1-35]()\n\n## Security Architecture\n\n### Token-Based Authentication\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant M as ASGI Middleware\n    participant R as Route Handler\n    \n    C->>M: Request + Credentials\n    M->>M: Extract Bearer/Cookie/API-Key\n    M->>R: Set request.state.token\n    R->>R: Verify with get_verified_user\n    alt Invalid Token\n        R-->>C: 401 Unauthorized\n    else Valid Token\n        R->>R: Process Request\n        R-->>C: Response\n    end\n```\n\n资料来源：[backend/open_webui/utils/asgi_middleware.py:20-50]()\n\n## Frontend API Integration\n\nThe frontend TypeScript codebase in `src/lib/apis/` provides typed interfaces for all major routers:\n\n| Router Domain | Frontend Module |\n|--------------|-----------------|\n| Knowledge Bases | `src/lib/apis/knowledge/index.ts` |\n| Skills | `src/lib/apis/skills/index.ts` |\n| OpenAI Config | `src/lib/apis/openai/index.ts` |\n| Tool Servers | `src/lib/apis/index.ts` |\n\nThe frontend uses `WEBUI_API_BASE_URL` constant (`${WEBUI_BASE_URL}/api/v1`) as the base for all API calls.\n\n资料来源：[src/lib/constants.ts:1-20]()\n\n## Summary\n\nThe API Routers system in Open WebUI implements a well-organized, FastAPI-based architecture with:\n\n- **Modular Design**: 26+ functional router modules organized by domain\n- **Consistent Patterns**: Standardized router structure with Pydantic models and authentication dependencies\n- **Middleware Pipeline**: Request processing through ASGI middleware, authentication, audit logging, and pipeline filters\n- **Conditional Loading**: Feature flags for SCIM, analytics, and audit logging\n- **Access Control**: Role-based and grant-based authorization at the router and endpoint levels\n- **Frontend Integration**: TypeScript API clients aligned with backend router structure\n\n---\n\n<a id='retrieval-system'></a>\n\n## Retrieval System\n\n### 相关页面\n\n相关主题：[Ollama Integration](#ollama-integration), [RAG Pipeline](#rag-pipeline)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/retrieval/vector/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/main.py)\n- [backend/open_webui/retrieval/vector/factory.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/factory.py)\n- [backend/open_webui/retrieval/web/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/web/main.py)\n- [backend/open_webui/retrieval/loaders/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/loaders/main.py)\n- [backend/open_webui/retrieval/vector/dbs/pgvector.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/dbs/pgvector.py)\n- [backend/open_webui/retrieval/web/duckduckgo.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/web/duckduckgo.py)\n</details>\n\n# Retrieval System\n\nThe Retrieval System in Open WebUI is a comprehensive framework for document loading, web searching, and vector-based information retrieval. It enables users to ingest documents, perform web searches, and leverage retrieval-augmented generation (RAG) capabilities to enhance LLM responses with contextual information.\n\n## Architecture Overview\n\nThe retrieval system is composed of three primary subsystems:\n\n```mermaid\ngraph TD\n    subgraph Retrieval[\"Retrieval System\"]\n        subgraph Loaders[\"Document Loaders\"]\n            PDF[PDF Loader]\n            OCR[OCR Loaders]\n            WebLoader[Web Loader]\n        end\n        \n        subgraph WebSearch[\"Web Search Providers\"]\n            SearXNG[SearXNG]\n            DuckDuckGo[DuckDuckGo]\n            GooglePSE[Google PSE]\n            Brave[Brave Search]\n            YouDC[You.com]\n        end\n        \n        subgraph VectorDB[\"Vector Stores\"]\n            Chroma[Chroma]\n            FAISS[FAISS]\n            Milvus[Milvus]\n            Qdrant[Qdrant]\n            PGVector[pgvector]\n        end\n    end\n    \n    API[API Router] --> Loaders\n    API --> WebSearch\n    API --> VectorDB\n```\n\n### Core Components\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| **Document Loaders** | Ingest various file formats into the system | `backend/open_webui/retrieval/loaders/` |\n| **Web Search** | Query external search engines for information | `backend/open_webui/retrieval/web/` |\n| **Vector Database** | Store and query embeddings for semantic search | `backend/open_webui/retrieval/vector/` |\n| **API Router** | Expose retrieval endpoints to the frontend | `backend/open_webui/routers/retrieval.py` |\n\n## Document Loaders\n\nThe document loader subsystem handles ingestion of various file formats into the retrieval pipeline.\n\n### Supported File Types\n\nThe system supports the following file types for upload and processing:\n\n| Category | MIME Types |\n|----------|------------|\n| Documents | PDF, EPUB, DOCX, TXT, CSV, XML, HTML, Markdown |\n| Code | Python, JavaScript, CSS |\n| Audio | MP3, WAV |\n| Images | PNG, JPG (with OCR) |\n\n资料来源：[src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n\n### OCR Processing\n\nFor scanned documents and images, Open WebUI supports multiple OCR engines:\n\n**PaddleOCR VL** is one of the supported OCR backends. It processes documents page-by-page, extracting text and returning structured `Document` objects with metadata.\n\n```python\n# Processing flow in paddleocr_vl.py\nfor i, page in enumerate(doc):\n    markdown_text = run_paddle_ocr(page)\n    cleaned_content = clean_markdown(markdown_text)\n    \n    documents.append(\n        Document(\n            page_content=cleaned_content,\n            metadata={\n                'page': i,\n                'page_label': i + 1,\n                'total_pages': total_pages,\n                'file_name': self.file_name,\n                'processing_engine': 'paddleocr-vl',\n            }\n        )\n    )\n```\n\n资料来源：[backend/open_webui/retrieval/loaders/paddleocr_vl.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/loaders/paddleocr_vl.py)\n\n### Configuration Options\n\nThe retrieval loaders are configured through the following environment variables:\n\n| Variable | Description |\n|----------|-------------|\n| `EXTERNAL_DOCUMENT_LOADER_URL` | URL for external document loader service |\n| `EXTERNAL_DOCUMENT_LOADER_API_KEY` | API key for external loader |\n| `TIKA_SERVER_URL` | Apache Tika server endpoint |\n| `DOCLING_SERVER_URL` | Docling OCR server endpoint |\n| `DOCLING_API_KEY` | API key for Docling service |\n| `DOCLING_PARAMS` | Additional Docling parameters |\n| `PDF_EXTRACT_IMAGES` | Enable image extraction from PDFs |\n| `PDF_LOADER_MODE` | PDF loading mode configuration |\n| `DOCUMENT_INTELLIGENCE_ENDPOINT` | Azure Document Intelligence endpoint |\n| `DOCUMENT_INTELLIGENCE_KEY` | Azure Document Intelligence API key |\n| `DOCUMENT_INTELLIGENCE_MODEL` | Model identifier for document processing |\n| `MISTRAL_OCR_API_BASE_URL` | Mistral OCR API base URL |\n| `MISTRAL_OCR_API_KEY` | Mistral OCR API key |\n| `PADDLEOCR_VL_BASE_URL` | PaddleOCR VL server URL |\n| `PADDLEOCR_VL_TOKEN` | Authentication token for PaddleOCR VL |\n| `MINERU_API_MODE` | MinerU API mode |\n| `MINERU_API_URL` | MinerU API endpoint |\n| `MINERU_API_KEY` | MinerU API key |\n| `MINERU_API_TIMEOUT` | MinerU API timeout in seconds |\n\n资料来源：[backend/open_webui/retrieval/utils.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/utils.py)\n\n## Web Search\n\nThe web search subsystem provides integration with multiple search providers for retrieving up-to-date information from the internet.\n\n### Supported Providers\n\n| Provider | Implementation | Features |\n|----------|----------------|----------|\n| **SearXNG** | Self-hosted meta-search engine | Privacy-focused, aggregated results |\n| **DuckDuckGo** | Public search API | No API key required |\n| **Google PSE** | Google Programmable Search | Requires API key |\n| **Brave Search** | Privacy-focused search | API-based |\n| **You.com** | AI-enhanced search | Rich snippets and descriptions |\n| **Tavily** | AI-optimized search | Structured outputs |\n| **Perplexity** | LLM-optimized search | Citations included |\n\n### Search Result Structure\n\nSearch results are normalized into a common `SearchResult` format:\n\n```python\n@dataclass\nclass SearchResult:\n    link: str           # URL of the result\n    title: str          # Title of the page\n    snippet: str        # Text snippet/summary\n```\n\n#### You.com Implementation\n\nThe You.com provider demonstrates the search result normalization:\n\n```python\ndef _build_snippet(result: dict) -> str:\n    \"\"\"Combine the description and snippets list into a single string.\"\"\"\n    parts: list[str] = []\n    \n    description = result.get('description')\n    if description:\n        parts.append(description)\n    \n    snippets = result.get('snippets')\n    if snippets and isinstance(snippets, list):\n        parts.extend(snippets)\n    \n    return '\\n\\n'.join(parts)\n```\n\n资料来源：[backend/open_webui/retrieval/web/ydc.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/web/ydc.py)\n\n### Web Loader Configuration\n\nThe web loader for content extraction supports the following configuration:\n\n| Setting | Description |\n|---------|-------------|\n| `ENABLE_WEB_LOADER_SSL_VERIFICATION` | Enable SSL certificate verification |\n| `WEB_LOADER_CONCURRENT_REQUESTS` | Rate limiting for concurrent requests |\n| `WEB_SEARCH_TRUST_ENV` | Trust environment variables for requests |\n| `BYPASS_WEB_SEARCH_WEB_LOADER` | Skip content extraction, use snippets only |\n| `BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL` | Skip embedding and retrieval stages |\n\n资料来源：[backend/open_webui/routers/retrieval.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/retrieval.py)\n\n### Web Search Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant API as /api/v1/retrieval/web/search\n    participant SearchProvider as Search Provider\n    participant WebLoader as Web Loader\n    participant VectorDB as Vector Store\n    \n    Client->>API: POST /search {query, urls}\n    API->>SearchProvider: Execute search queries\n    SearchProvider-->>API: Raw search results\n    API->>WebLoader: Extract content from URLs\n    WebLoader-->>API: Document objects\n    API->>VectorDB: Store documents\n    VectorDB-->>API: Collection confirmation\n    API-->>Client: {status, collection_name, files}\n```\n\n## Vector Database Integration\n\nThe vector database subsystem handles storage and retrieval of document embeddings for semantic search.\n\n### Supported Vector Stores\n\n| Database | Implementation | Use Case |\n|----------|----------------|----------|\n| **Chroma** | `chromadb` | Lightweight, local-first |\n| **FAISS** | `faiss-cpu`/`faiss-gpu` | Large-scale similarity search |\n| **Milvus** | `pymilvus` | Cloud-native, scalable |\n| **Qdrant** | `qdrant-client` | High-performance, hybrid search |\n| **pgvector** | `psycopg2` | PostgreSQL extension for vectors |\n\n### Vector Factory Pattern\n\nThe system uses a factory pattern to instantiate vector databases:\n\n```python\nclass VectorStoreFactory:\n    @staticmethod\n    def get_vector_store(config: Config) -> VectorStore:\n        provider = config.VECTOR_DB\n        if provider == \"chromadb\":\n            return ChromaDBStore()\n        elif provider == \"pgvector\":\n            return PGVectorStore()\n        # ... other providers\n```\n\n资料来源：[backend/open_webui/retrieval/vector/factory.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/factory.py)\n\n### pgvector Implementation\n\nFor PostgreSQL-based vector storage:\n\n```python\nclass PGVectorStore:\n    def __init__(self, connection_string: str, embedding_dim: int = 1536):\n        self.conn = psycopg2.connect(connection_string)\n        self.embedding_dim = embedding_dim\n    \n    def insert(self, collection: str, documents: list[Document]):\n        # Insert vectors with pgvector extension\n```\n\n资料来源：[backend/open_webui/retrieval/vector/dbs/pgvector.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/dbs/pgvector.py)\n\n## API Endpoints\n\nThe retrieval system exposes REST API endpoints through the router.\n\n### Web Search Endpoint\n\n```\nPOST /api/v1/retrieval/web/search\n```\n\n**Request Body:**\n```json\n{\n  \"query\": \"search query string\",\n  \"collection_name\": \"optional_collection\",\n  \"retrieval_enabled\": true,\n  \"k\": 5\n}\n```\n\n**Response:**\n```json\n{\n  \"status\": true,\n  \"collection_name\": \"web_20240115_abc123\",\n  \"filenames\": [\"python.org\", \"wikipedia.org\"],\n  \"content\": \"extracted content...\",\n  \"sources\": [\n    {\"url\": \"https://python.org\", \"content\": \"...\"}\n  ]\n}\n```\n\n### File Upload and Processing\n\n```\nPOST /api/v1/retrieval/upload\n```\n\nHandles file uploads, runs document loaders, and stores in the configured vector database.\n\n资料来源：[backend/open_webui/routers/retrieval.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/retrieval.py)\n\n## Configuration Reference\n\n### Environment Variables\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `VECTOR_DB` | `chroma` | Vector database provider |\n| `RAG_TOP_K` | `5` | Number of top results to retrieve |\n| `RAG_RELEVANCE_THRESHOLD` | `0.0` | Minimum relevance score threshold |\n| `WEB_SEARCH_ENABLED` | `True` | Enable web search functionality |\n\n### Frontend API URLs\n\nThe frontend communicates with these API base URLs:\n\n```typescript\nexport const RETRIEVAL_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/retrieval`;\n```\n\n资料来源：[src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n\n## Data Flow\n\n```mermaid\ngraph LR\n    subgraph Input[\"Input Sources\"]\n        Files[Uploaded Files]\n        WebSearch[Web Search]\n        URLs[Direct URLs]\n    end\n    \n    subgraph Processing[\"Processing Pipeline\"]\n        Loaders[Document Loaders]\n        Chunks[Text Chunking]\n        Embed[Embedding Model]\n    end\n    \n    subgraph Storage[\"Storage\"]\n        Vector[Vector Store]\n        Meta[Metadata Store]\n    end\n    \n    subgraph Query[\"Query Processing\"]\n        QueryEmb[Query Embedding]\n        Similarity[Similarity Search]\n        Rerank[Reranking]\n    end\n    \n    Files --> Loaders\n    WebSearch --> Loaders\n    URLs --> Loaders\n    Loaders --> Chunks\n    Chunks --> Embed\n    Embed --> Vector\n    \n    Query --> QueryEmb\n    QueryEmb --> Similarity\n    Similarity --> Rerank\n    Rerank --> Context[LLM Context]\n```\n\n## Error Handling\n\nThe retrieval system implements comprehensive error handling:\n\n| Error Type | HTTP Code | Message |\n|------------|-----------|---------|\n| Web search failure | 400 | `WEB_SEARCH_ERROR` with exception details |\n| No results found | 404 | `No results found from web search` |\n| Loader failure | 500 | Loader-specific error message |\n| Vector store error | 500 | Database connection or query errors |\n\n资料来源：[backend/open_webui/routers/retrieval.py:1-50](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/retrieval.py)\n\n## Extension Points\n\nThe retrieval system is designed for extensibility:\n\n1. **Custom Document Loaders**: Implement the `DocumentLoader` interface in `loaders/`\n2. **New Search Providers**: Add provider class in `web/` following the `SearchProvider` protocol\n3. **Vector Store Adapters**: Implement `VectorStore` abstract class in `vector/dbs/`\n4. **Embedding Models**: Configure through `EMBEDDING_MODEL` setting\n\n---\n\n<a id='frontend-structure'></a>\n\n## Frontend Structure\n\n### 相关页面\n\n相关主题：[Chat Interface](#chat-interface), [Architecture Overview](#architecture-overview)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [src/lib/utils/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/index.ts)\n- [src/lib/utils/codeHighlight.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/codeHighlight.ts)\n- [src/lib/apis/knowledge/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/knowledge/index.ts)\n- [src/lib/apis/skills/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/skills/index.ts)\n- [src/app.html](https://github.com/open-webui/open-webui/blob/main/src/app.html)\n</details>\n\n# Frontend Structure\n\n## Overview\n\nOpen WebUI uses a modern SvelteKit-based frontend architecture built with TypeScript. The frontend is responsible for the user interface, real-time chat interactions, multimedia handling, and communication with the backend API. The application runs as a Single Page Application (SPA) with server-side rendering capabilities provided by SvelteKit.\n\n**技术栈**\n\n| Layer | Technology |\n|-------|------------|\n| Framework | SvelteKit |\n| Language | TypeScript |\n| Styling | CSS (with custom properties) |\n| State Management | Svelte Stores |\n| API Communication | Fetch API |\n| Internationalization | i18n module |\n| Code Highlighting | Shiki |\n| Build Tool | Vite (via SvelteKit) |\n\n资料来源：[src/lib/constants.ts:1]()\n\n---\n\n## Directory Structure\n\n```\nsrc/\n├── lib/\n│   ├── components/       # Reusable UI components\n│   ├── apis/              # API client modules\n│   ├── stores/            # Svelte stores for state management\n│   ├── utils/             # Utility functions\n│   ├── i18n/              # Internationalization\n│   └── constants.ts       # Application constants\n├── routes/                # SvelteKit page routes\n└── app.html               # HTML template\n```\n\n资料来源：[src/lib/index.ts]()\n\n---\n\n## Core Modules\n\n### Constants Module\n\nThe `constants.ts` file centralizes all application-wide configuration values and constants.\n\n```typescript\nexport const APP_NAME = 'Open WebUI';\nexport const WEBUI_HOSTNAME = browser ? (dev ? `${location.hostname}:8080` : ``) : '';\nexport const WEBUI_BASE_URL = browser ? (dev ? `http://${WEBUI_HOSTNAME}` : ``) : ``;\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\n```\n\n**Key Constants**\n\n| Constant | Purpose |\n|----------|---------|\n| `APP_NAME` | Application display name |\n| `WEBUI_HOSTNAME` | Dynamic hostname resolution |\n| `WEBUI_BASE_URL` | Base URL for the application |\n| `WEBUI_API_BASE_URL` | Backend API endpoint |\n| `OLLAMA_API_BASE_URL` | Ollama integration endpoint |\n| `OPENAI_API_BASE_URL` | OpenAI-compatible API endpoint |\n| `AUDIO_API_BASE_URL` | Audio processing endpoint |\n| `RETRIEVAL_API_BASE_URL` | RAG retrieval endpoint |\n| `SUPPORTED_FILE_TYPE` | List of accepted file types |\n\n资料来源：[src/lib/constants.ts:1-20]()\n\n**Supported File Types**\n\nThe application supports the following file types for uploads:\n\n| Category | Types |\n|----------|-------|\n| Documents | PDF, EPUB, DOCX, plain text |\n| Code | Python, CSS, JavaScript, Markdown |\n| Data | CSV, XML, JSON |\n| Media | MP3, WAV (audio) |\n\n### API Client Architecture\n\nThe frontend uses modular API clients located in `src/lib/apis/`. Each domain has its own dedicated API module.\n\n#### Knowledge API Client\n\nThe Knowledge API client handles knowledge base operations:\n\n```typescript\nexport const createNewKnowledge = async (\n    token: string,\n    name: string,\n    description: string,\n    accessGrants: object[]\n) => {\n    const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/create`, {\n        method: 'POST',\n        headers: {\n            Accept: 'application/json',\n            'Content-Type': 'application/json',\n            authorization: `Bearer ${token}`\n        },\n        body: JSON.stringify({\n            name: name,\n            description: description,\n            access_grants: accessGrants\n        })\n    });\n    return res.json();\n};\n```\n\n#### Skills API Client\n\nThe Skills API client manages skill-related operations with consistent error handling:\n\n```typescript\nexport const deleteSkillById = async (token: string, id: string) => {\n    const res = await fetch(`${WEBUI_API_BASE_URL}/skills/id/${id}/delete`, {\n        method: 'DELETE',\n        headers: {\n            Accept: 'application/json',\n            'Content-Type': 'application/json',\n            authorization: `Bearer ${token}`\n        }\n    });\n    return res.json();\n};\n```\n\n资料来源：[src/lib/apis/knowledge/index.ts:1-45]()\n资料来源：[src/lib/apis/skills/index.ts]()\n\n#### API Communication Pattern\n\nAll API clients follow a consistent pattern:\n\n1. Construct URL with query parameters\n2. Set required headers (Content-Type, Authorization)\n3. Handle responses with proper error handling\n4. Return JSON data or throw detailed errors\n\n---\n\n## Content Processing Pipeline\n\n### Response Content Utilities\n\nThe frontend includes sophisticated text processing utilities for handling various content types:\n\n```typescript\nexport const sanitizeResponseContent = (content: string) => {\n    return content\n        .replace(/<\\|[a-z]*$/, '')\n        .replace(/<\\|[a-z]+\\|$/, '')\n        .replace(/<$/, '')\n        .replaceAll('<', '&lt;')\n        .replaceAll('>', '&gt;')\n        .replaceAll(/<\\|[a-z]+\\|>/g, ' ')\n        .trim();\n};\n```\n\n### Text Extraction Utilities\n\nThe frontend provides text extraction for audio processing:\n\n| Function | Purpose |\n|----------|---------|\n| `extractSentencesForAudio()` | Splits text into audio-ready sentences |\n| `extractParagraphsForAudio()` | Extracts paragraphs for audio playback |\n\n资料来源：[src/lib/utils/index.ts:150-200]()\n\n---\n\n## Code Highlighting System\n\n### Shiki Integration\n\nOpen WebUI uses Shiki for syntax highlighting with a bundled language configuration:\n\n```typescript\nconst KNOWN_LANG_IDS = new Set([\n    'ada', 'awk', 'bat', 'c', 'cmake', 'clojure', 'cpp', 'crystal',\n    'css', 'd', 'dart', 'diff', 'elixir', 'elm', 'erlang', 'fish',\n    'gleam', 'glsl', 'go', 'groovy', 'haml', 'haskell', 'hlsl',\n    'html', 'ini', 'java', 'javascript', 'json', 'json5', 'jsonc',\n    'jsx', 'julia', 'kotlin', 'latex', 'less', 'lisp', 'log', 'lua',\n    'make', 'markdown', 'matlab', 'mdx', 'mojo', 'nim', 'nix',\n    'nushell', 'ocaml', 'pascal', 'perl', 'php', 'postcss',\n    'powershell', 'prisma', 'python', 'r', 'ruby', 'rust', 'scala',\n    'scheme', 'scss', 'shellscript', 'sql', 'swift', 'toml',\n    'tsx', 'typescript', 'vb', 'xml', 'yaml', 'zig'\n]);\n```\n\n资料来源：[src/lib/utils/codeHighlight.ts]()\n\n**Supported Languages (Partial List)**\n\n| Category | Languages |\n|----------|-----------|\n| Systems | C, C++, Rust, Go, Zig |\n| Web | JavaScript, TypeScript, HTML, CSS, JSX, TSX |\n| Scripting | Python, Ruby, PHP, Perl, Lua |\n| Data | SQL, JSON, YAML, TOML |\n| Functional | Haskell, Elixir, Clojure, Scheme |\n| Markup | Markdown, LaTeX, XML, HTML |\n\n---\n\n## HTML Template Structure\n\n### Application Shell\n\nThe `app.html` file provides the base HTML structure with splash screen support:\n\n```html\n<!doctype html>\n<html lang=\"en\">\n    <head>\n        <meta charset=\"utf-8\" />\n        <link rel=\"icon\" type=\"image/png\" href=\"/static/favicon.png\" />\n        <link rel=\"manifest\" href=\"/manifest.json\" />\n        <meta name=\"viewport\" content=\"width=device-width, initial-scale=1...\" />\n        <meta name=\"theme-color\" content=\"#171717\" />\n        <script src=\"/static/loader.js\" defer></script>\n        <link rel=\"stylesheet\" href=\"/static/custom.css\" />\n    </head>\n</html>\n```\n\n### Theme Support\n\nThe application supports multiple themes including a special \"her\" theme:\n\n```css\nhtml.dark #splash-screen {\n    background: #000;\n}\n\nhtml.her #splash-screen {\n    background: #983724;\n}\n\nhtml.her #logo {\n    display: none;\n}\n\nhtml.her #logo-her {\n    display: block;\n    filter: invert(1);\n}\n```\n\n资料来源：[src/app.html]()\n\n---\n\n## State Management Architecture\n\n### Store-Based State Management\n\nThe frontend uses Svelte's reactive stores for global state management. Stores are defined in `src/lib/stores/index.ts` and provide reactive access to:\n\n- User authentication state\n- Chat history and messages\n- Model configurations\n- UI preferences\n- Application settings\n\n### Store Access Pattern\n\n```typescript\nimport { storeName } from '$lib/stores';\n\n// Reactive subscription\n$: result = storeName.subscribe(value => {\n    // Handle value changes\n});\n\n// Or using Svelte's auto-subscription\n$: value = $storeName;\n```\n\n---\n\n## Internationalization\n\n### i18n Module\n\nInternationalization is handled through `src/lib/i18n/index.ts` which provides:\n\n- Language detection and switching\n- Translation loading\n- Locale-specific formatting\n- Fallback language support\n\n---\n\n## Frontend Architecture Diagram\n\n```mermaid\ngraph TB\n    subgraph \"Presentation Layer\"\n        UI[UI Components<br/>src/lib/components]\n        Routes[SvelteKit Routes<br/>src/routes]\n    end\n\n    subgraph \"Logic Layer\"\n        Stores[State Management<br/>src/lib/stores]\n        Utils[Utilities<br/>src/lib/utils]\n        i18n[Internationalization<br/>src/lib/i18n]\n    end\n\n    subgraph \"Integration Layer\"\n        APIs[API Clients<br/>src/lib/apis]\n        Constants[Constants<br/>src/lib/constants]\n    end\n\n    subgraph \"External Services\"\n        Backend[Backend API<br/>/api/v1]\n        Ollama[Ollama API<br/>/ollama]\n    end\n\n    UI --> Routes\n    Routes --> Stores\n    UI --> Stores\n    Routes --> APIs\n    APIs --> Constants\n    Stores --> Utils\n    APIs --> Backend\n    Constants --> Ollama\n```\n\n---\n\n## Build Configuration\n\n### SvelteKit Configuration\n\nThe SvelteKit configuration (`svelte.config.js`) defines:\n\n- Adapter settings for deployment\n- Preprocessing options\n- Build optimizations\n- SSR configuration\n\nThe frontend is built with Vite under the hood, providing:\n\n- Hot Module Replacement (HMR)\n- Code splitting\n- Tree shaking\n- TypeScript support\n\n---\n\n## Request/Response Flow\n\n```mermaid\nsequenceDiagram\n    participant User as User Interface\n    participant Store as Svelte Store\n    participant API as API Client\n    participant Backend as Backend API\n\n    User->>Store: Trigger action\n    Store->>API: Call API function\n    API->>API: Set headers & body\n    API->>Backend: HTTP Request\n    Backend-->>API: JSON Response\n    API-->>Store: Processed data\n    Store-->>User: Update UI\n```\n\n---\n\n## Summary\n\nThe Open WebUI frontend is built on a clean, modular architecture that separates concerns through dedicated directories for components, API clients, stores, utilities, and internationalization. The TypeScript-based codebase provides type safety while SvelteKit enables both server-side rendering and client-side hydration. The constant module centralizes configuration, making environment-specific settings easy to manage across different deployment scenarios.\n\n---\n\n<a id='chat-interface'></a>\n\n## Chat Interface\n\n### 相关页面\n\n相关主题：[Ollama Integration](#ollama-integration), [Frontend Structure](#frontend-structure)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/lib/stores/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/stores/index.ts)\n- [src/lib/utils/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/index.ts)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/models/chats.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/chats.py)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n</details>\n\n# Chat Interface\n\n## Overview\n\nThe Chat Interface is the core user-facing component of Open WebUI, providing an interactive environment for conversations with AI models. It handles message composition, response rendering, conversation state management, and integration with various backend services including Ollama, OpenAI-compatible APIs, and code execution engines.\n\nThe interface is built with SvelteKit on the frontend and Python/FastAPI on the backend, enabling real-time streaming responses, multi-model conversations, and rich content rendering including markdown, code blocks, and embedded media.\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte)\"]\n        Chat[Chat.svelte]\n        MessageInput[MessageInput.svelte]\n        Message[Message.svelte]\n        Markdown[Markdown.svelte]\n        ModelSelector[ModelSelector.svelte]\n        Navbar[Navbar.svelte]\n    end\n    \n    subgraph StateManagement[\"State Management\"]\n        Stores[index.ts - Svelte Stores]\n    end\n    \n    subgraph Backend[\"Backend (Python/FastAPI)\"]\n        ChatModel[models/chats.py]\n        Config[config.py]\n        Middleware[middleware.py]\n    end\n    \n    Chat --> Stores\n    MessageInput --> Stores\n    Message --> Stores\n    Chat --> MessageInput\n    Chat --> Message\n    Message --> Markdown\n    Stores --> ChatModel\n    Stores --> Middleware\n```\n\n## State Management\n\nThe chat interface relies heavily on Svelte stores for reactive state management. These stores maintain the current conversation state, UI visibility flags, and application-wide settings.\n\n### Core Chat Stores\n\nAll chat-related state is managed through Svelte writable stores defined in `src/lib/stores/index.ts`:\n\n| Store | Type | Purpose |\n|-------|------|---------|\n| `chatId` | `Writable<string>` | Current active chat identifier |\n| `chatTitle` | `Writable<string>` | Title of the current chat |\n| `chats` | `Writable<null>` | Cached chat objects |\n| `pinnedChats` | `Writable<Chat[]>` | Pinned conversations |\n| `models` | `Writable<Model[]>` | Available AI models |\n| `chatRequestQueues` | `Writable<Record<string, QueueItem[]>>` | Request queue management |\n\n资料来源：[src/lib/stores/index.ts:53-58]()\n\n### UI Visibility Stores\n\nThe interface uses boolean stores to control component visibility:\n\n| Store | Type | Purpose |\n|-------|------|---------|\n| `showSidebar` | `Writable<boolean>` | Sidebar visibility |\n| `showSettings` | `Writable<boolean>` | Settings panel visibility |\n| `showShortcuts` | `Writable<boolean>` | Keyboard shortcuts overlay |\n| `showControls` | `Writable<boolean>` | Chat controls visibility |\n| `showEmbeds` | `Writable<boolean>` | Embedded content display |\n| `showArtifacts` | `Writable<boolean>` | Code artifacts panel |\n\n资料来源：[src/lib/stores/index.ts:22-30]()\n\n### Audio and Transcription Stores\n\n| Store | Type | Purpose |\n|-------|------|---------|\n| `audioQueue` | `Writable<AudioQueue \\| null>` | TTS audio queue |\n| `TTSWorker` | `Writable<Worker \\| null>` | Text-to-speech web worker |\n\n## Message Processing Pipeline\n\n### Content Sanitization\n\nBefore rendering, message content undergoes sanitization to prevent XSS attacks and normalize special tokens:\n\n```typescript\nexport const sanitizeResponseContent = (content: string) => {\n    return content\n        .replace(/<\\|[a-z]*$/, '')\n        .replace(/<\\|[a-z]+\\|$/, '')\n        .replace(/<$/, '')\n        .replaceAll('<', '&lt;')\n        .replaceAll('>', '&gt;')\n        .replaceAll(/<\\|[a-z]+\\|>/g, ' ')\n        .trim();\n};\n```\n\n资料来源：[src/lib/utils/index.ts:180-189]()\n\n### Content Processing for Chinese Text\n\nThe system includes special handling for Chinese content to address markdown and LaTeX formatting issues:\n\n```typescript\nfunction processChineseContent(content: string): string {\n    if (!/[\\u4e00-\\u9fa5]/.test(content)) return content;\n    const lines = content.split('\\n');\n    const processedLines = lines.map((line) => {\n        // Chinese-specific processing logic\n    });\n    return processedLines.join('\\n');\n}\n```\n\n资料来源：[src/lib/utils/index.ts:195-208]()\n\n### Sentence and Paragraph Extraction\n\nFor audio processing (text-to-speech), messages are split into appropriate segments:\n\n```typescript\nexport const extractSentencesForAudio = (text: string) => {\n    return extractSentences(text).reduce((mergedTexts, currentText) => {\n        const lastIndex = mergedTexts.length - 1;\n        if (lastIndex >= 0) {\n            const previousText = mergedTexts[lastIndex];\n            const wordCount = previousText.split(/\\s+/).length;\n            const charCount = previousText.length;\n            if (wordCount < 4 || charCount < 50) {\n                mergedTexts[lastIndex] = previousText + ' ' + currentText;\n            } else {\n                mergedTexts.push(currentText);\n            }\n        }\n        return mergedTexts;\n    }, []);\n};\n```\n\n资料来源：[src/lib/utils/index.ts:300-319]()\n\n## Chat Data Models\n\n### Backend Chat Model\n\nThe backend defines chat structures in `backend/open_webui/models/chats.py`:\n\n```python\nclass ChatModel:\n    async def get_message_list(self, id: str) -> Optional[dict]:\n        \"\"\"Message map for walking history.\n        \n        Prefer chat_message rows to avoid loading the large chat\n        JSON blob; fall back to embedded history when no rows exist\n        (legacy chats).\n        \"\"\"\n        messages_map = await ChatMessages.get_messages_map_by_chat_id(id)\n        if messages_map is not None:\n            return messages_map\n        \n        # Fall back to embedded JSON blob for legacy chats\n        chat = await self.get_chat_by_id(id)\n        if chat is None:\n            return None\n        \n        return chat.chat.get('history', {}).get('messages', {}) or {}\n```\n\n资料来源：[backend/open_webui/models/chats.py:1-25]()\n\n### Message Structure\n\nMessages support both normalized storage (via `chat_message` rows) and legacy embedded JSON format:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | `string` | Unique message identifier |\n| `parentId` | `string \\| null` | Parent message ID for threading |\n| `childrenIds` | `string[]` | Child message IDs |\n| `role` | `user \\| assistant` | Message author role |\n| `content` | `string` | Message content |\n| `model` | `string` | Model used for assistant responses |\n| `timestamp` | `number` | Unix timestamp of creation |\n| `done` | `boolean` | Whether response is complete |\n\n## Configuration and Prompt Templates\n\n### Voice Mode Configuration\n\nVoice mode settings are configurable via environment variables:\n\n| Config Key | Environment Variable | Default | Description |\n|------------|---------------------|---------|-------------|\n| `ENABLE_VOICE_MODE_PROMPT` | `ENABLE_VOICE_MODE_PROMPT` | `True` | Enable voice mode prompt |\n| `VOICE_MODE_PROMPT_TEMPLATE` | `VOICE_MODE_PROMPT_TEMPLATE` | `''` | Custom voice prompt template |\n\n资料来源：[backend/open_webui/config.py:1-20]()\n\n### Code Interpreter Configuration\n\nThe chat interface integrates code execution capabilities:\n\n| Config Key | Environment Variable | Default | Description |\n|------------|---------------------|---------|-------------|\n| `ENABLE_CODE_EXECUTION` | `ENABLE_CODE_EXECUTION` | `True` | Enable code execution |\n| `CODE_EXECUTION_ENGINE` | `CODE_EXECUTION_ENGINE` | `pyodide` | Execution engine (pyodide/jupyter) |\n| `CODE_EXECUTION_JUPYTER_URL` | `CODE_EXECUTION_JUPYTER_URL` | `''` | Jupyter server URL |\n| `CODE_EXECUTION_JUPYTER_AUTH` | `CODE_EXECUTION_JUPYTER_AUTH` | `''` | Jupyter authentication |\n\n资料来源：[backend/open_webui/config.py:35-60]()\n\n### Prompt Generation Templates\n\nThe system uses configurable prompt templates for various tasks:\n\n| Template | Purpose |\n|----------|---------|\n| `DEFAULT_MOA_GENERATION_PROMPT_TEMPLATE` | Multi-model answer synthesis |\n| `IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE` | Image generation prompt creation |\n| `FOLLOW_UP_GENERATION_PROMPT_TEMPLATE` | Suggesting follow-up questions |\n\n## Code Interpreter Integration\n\n### Backend Middleware Rendering\n\nThe backend middleware handles code interpreter rendering in the streaming response pipeline:\n\n```python\nelif item_type == 'open_webui:code_interpreter':\n    # Code interpreter needs to inspect/mutate prior accumulated content\n    # to strip trailing unclosed code fences\n    content = '\\n'.join(parts)\n    content_stripped, original_whitespace = split_content_and_whitespace(content)\n    if is_opening_code_block(content_stripped):\n        content = content_stripped.rstrip('`').rstrip() + original_whitespace\n    else:\n        content = content_stripped + original_whitespace\n    \n    # Render as <details> block for frontend Collapsible\n    code = item.get('code', '').strip()\n    lang = item.get('lang', 'python')\n    status = item.get('status', 'in_progress')\n```\n\n资料来源：[backend/open_webui/utils/middleware.py:1-30]()\n\n### Code Block Rendering\n\nMessages with code content are wrapped in collapsible elements:\n\n```mermaid\ngraph LR\n    A[Message Content] --> B{Contains Code?}\n    B -->|Yes| C[Wrap in Details Block]\n    B -->|No| D[Render as Markdown]\n    C --> E[\"<details type='code'>\"]\n    E --> F[\"<summary>Code Block</summary>\"]\n    E --> G[\"```python<br>code```\"]\n```\n\n### Pyodide Environment\n\nFor browser-based code execution:\n\n- Runs via Pyodide in the browser for fast execution\n- No package installation available (`pip install`, `subprocess`, `micropip.install()` disabled)\n- Uses web worker for background processing\n\n## Chat History and Import\n\n### OpenAI Chat Import\n\nThe system supports importing chats from OpenAI's export format:\n\n```typescript\nexport const convertOpenAIMessages = (convo) => {\n    const messages = [];\n    const uniqueModels = new Set();\n    let lastId = null;\n    \n    for (const message_id in mapping) {\n        const message = mapping[message_id];\n        // Skip system and tool messages\n        if (role === 'system' || role === 'tool') {\n            continue;\n        }\n        \n        const new_chat = {\n            id: message_id,\n            parentId: lastId,\n            role: role !== 'user' ? 'assistant' : 'user',\n            content: extractOpenAIMessageContent(message['message']),\n            model,\n            done: true\n        };\n    }\n    return { messages, id, title, timestamp };\n};\n```\n\n资料来源：[src/lib/utils/index.ts:220-260]()\n\n### Validation\n\nImported chats undergo validation:\n\n```typescript\nconst validateChat = (chat) => {\n    const messages = chat.messages;\n    if (messages.length === 0) return false;\n    \n    for (const message of messages) {\n        if (typeof message.content !== 'string') {\n            return false;\n        }\n    }\n    return true;\n};\n```\n\n## API Integration\n\n### Base URLs Configuration\n\nThe frontend configures API endpoints in `src/lib/constants.ts`:\n\n| Constant | Value | Purpose |\n|----------|-------|---------|\n| `OLLAMA_API_BASE_URL` | `/ollama` | Ollama model API |\n| `OPENAI_API_BASE_URL` | `/openai` | OpenAI-compatible API |\n| `AUDIO_API_BASE_URL` | `/api/v1/audio` | Audio processing |\n| `IMAGES_API_BASE_URL` | `/api/v1/images` | Image generation |\n| `RETRIEVAL_API_BASE_URL` | `/api/v1/retrieval` | RAG/retrieval |\n\n资料来源：[src/lib/constants.ts:7-15]()\n\n### WebUI API Configuration\n\n```typescript\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\n```\n\n## Message Queue Management\n\nThe system handles concurrent chat requests through queue management:\n\n```typescript\nexport const chatRequestQueues: Writable<\n    Record<string, { id: string; prompt: string; files: any[] }[]>\n> = writable({});\n```\n\nThis allows multiple concurrent conversations with queued message processing per chat ID.\n\n## Supported File Types\n\nThe chat interface supports file attachments for context:\n\n| Category | MIME Types |\n|----------|------------|\n| Documents | PDF, EPUB, DOCX, TXT, CSV, XML |\n| Code | Python, CSS, JavaScript, HTML |\n| Audio | MPEG, WAV, OGG |\n| Images | PNG, JPEG, GIF, WebP |\n| Archives | ZIP |\n\n资料来源：[src/lib/constants.ts:20-30]()\n\n## User Interface Components\n\n### Chat Component Hierarchy\n\n```mermaid\ngraph TD\n    Chat[Chat.svelte] --> Navbar[Navbar.svelte]\n    Chat --> MessageInput[MessageInput.svelte]\n    Chat --> Messages[Messages Container]\n    Messages --> Message[Message.svelte]\n    Message --> Markdown[Markdown.svelte]\n    Message --> ModelSelector[ModelSelector.svelte]\n```\n\n### Sidebar and Navigation\n\n| Component | Store | Purpose |\n|-----------|-------|---------|\n| Sidebar | `showSidebar` | Navigation and chat list |\n| Search | `showSearch` | Search across chats |\n| Archived Chats | `showArchivedChats` | View archived conversations |\n| Folders | `folders`, `selectedFolder` | Organize chats |\n\n## Theming and Styling\n\nThe chat interface supports dynamic theming:\n\n```typescript\nexport const theme = writable('system');\n```\n\nTheme application is handled via CSS variables and the `html.dark` class:\n\n```css\nhtml.dark #splash-screen {\n    background: #000;\n}\n\nhtml.her #splash-screen {\n    background: #983724;\n}\n```\n\n资料来源：[src/app.html:1-30]()\n\n## Summary\n\nThe Chat Interface in Open WebUI represents a sophisticated integration of frontend Svelte components and Python backend services. Key architectural elements include:\n\n1. **Reactive State Management** - Svelte stores maintain real-time UI and chat state\n2. **Message Processing Pipeline** - Content sanitization, language-specific processing, and format conversion\n3. **Multi-Model Support** - Configurable model selection and parallel conversation capabilities\n4. **Code Execution** - Integrated code interpreter with Pyodide and Jupyter support\n5. **Rich Content Rendering** - Markdown, code blocks, images, and audio\n6. **Import/Export** - OpenAI chat format compatibility\n7. **Voice Integration** - TTS and voice input capabilities\n\nThe modular design allows each component to be independently configured while maintaining a cohesive user experience across different interaction modes.\n\n---\n\n<a id='ollama-integration'></a>\n\n## Ollama Integration\n\n### 相关页面\n\n相关主题：[RAG Pipeline](#rag-pipeline), [Chat Interface](#chat-interface)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/routers/ollama.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n- [backend/open_webui/routers/openai.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/openai.py)\n- [backend/open_webui/utils/models.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/models.py)\n- [src/lib/apis/ollama/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/ollama/index.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n</details>\n\n# Ollama Integration\n\n## Overview\n\nThe Ollama Integration is a core component of Open WebUI that enables seamless communication between the frontend application and local Ollama instances. This integration provides a unified interface for managing, accessing, and interacting with LLM models hosted locally through Ollama, supporting both native Ollama API calls and OpenAI-compatible endpoints.\n\nOllama serves as the primary backend inference engine for Open WebUI, allowing users to run large language models entirely on their local hardware without relying on cloud-based services.\n\n## Architecture Overview\n\nThe Ollama Integration follows a proxy pattern where the backend server acts as an intermediary, forwarding requests from the frontend to Ollama instances while applying access controls, model routing, and API transformations.\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte)\"]\n        UI[User Interface]\n        API_CLIENT[API Client<br/>src/lib/apis/ollama/index.ts]\n    end\n\n    subgraph Backend[\"Backend Server (Python/FastAPI)\"]\n        OLLAMA_ROUTER[Ollama Router<br/>routers/ollama.py]\n        OPENAI_ROUTER[OpenAI Router<br/>routers/openai.py]\n        MODEL_UTILS[Model Utilities<br/>utils/models.py]\n        CONFIG[Configuration<br/>config.py]\n    end\n\n    subgraph OllamaInstances[\"Ollama Instances\"]\n        OLLAMA_LOCAL[Local Ollama<br/>localhost:11434]\n        OLLAMA_CUSTOM[Custom Ollama<br/>Configured URLs]\n    end\n\n    UI --> API_CLIENT\n    API_CLIENT -->|HTTP Requests| OLLAMA_ROUTER\n    API_CLIENT -->|OpenAI-compatible| OPENAI_ROUTER\n    OLLAMA_ROUTER --> MODEL_UTILS\n    OLLAMA_ROUTER --> CONFIG\n    OLLAMA_ROUTER -->|Native API| OLLAMA_LOCAL\n    OLLAMA_ROUTER -->|Native API| OLLAMA_CUSTOM\n    OPENAI_ROUTER -->|v1/chat/completions| OLLAMA_LOCAL\n    OPENAI_ROUTER -->|v1/chat/completions| OLLAMA_CUSTOM\n\n    style Frontend fill:#e1f5fe\n    style Backend fill:#f3e5f5\n    style OllamaInstances fill:#fff3e0\n```\n\n## Core Components\n\n### Backend Router (routers/ollama.py)\n\nThe Ollama router (`backend/open_webui/routers/ollama.py`) handles all native Ollama API operations. It provides endpoints for model management, chat completions, and model operations.\n\n**Primary Endpoints:**\n\n| Endpoint | Method | Description |\n|----------|--------|-------------|\n| `/api/chat` | POST | Send chat completion requests |\n| `/api/generate` | POST | Generate text with model |\n| `/api/tags` | GET | List available models |\n| `/api/pull` | POST | Pull a new model |\n| `/api/push` | POST | Push a model to registry |\n| `/api/delete` | DELETE | Delete a model |\n| `/api/create` | POST | Create a new model |\n| `/config` | GET/POST | Get/update Ollama configuration |\n| `/verify` | POST | Verify connection to Ollama |\n| `/v1/chat/completions` | POST | OpenAI-compatible chat endpoint |\n| `/v1/models` | GET | OpenAI-compatible models list |\n| `/v1/messages` | POST | Anthropic-compatible messages endpoint |\n| `/v1/responses` | POST | Ollama Responses API endpoint |\n\n资料来源：[backend/open_webui/routers/ollama.py:1-500](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n### Model Resolution and URL Selection\n\nThe system supports multiple Ollama instances through a URL index system. When a request is made, the router resolves the appropriate Ollama instance based on model configuration.\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Router\n    participant Config\n    participant Ollama\n    \n    Client->>Router: POST /api/chat {model: \"llama2\"}\n    Router->>Config: get_ollama_url(model, url_idx)\n    Config->>Config: Check model-to-URL mapping\n    Config->>Config: Check url_idx or default\n    Config-->>Router: (url, url_idx)\n    Router->>Ollama: Forward request to url\n    Ollama-->>Router: Response\n    Router-->>Client: Forwarded response\n```\n\nThe `get_ollama_url` function performs the following resolution logic:\n\n1. If `url_idx` is provided, use the corresponding URL from `OLLAMA_BASE_URLS`\n2. Check model-specific URL mappings stored in `OLLAMA_MODELS`\n3. Fall back to the primary `OLLAMA_BASE_URL`\n\n资料来源：[backend/open_webui/routers/ollama.py:100-200](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n### Prefix ID Handling\n\nFor multi-tenant deployments, the system supports `prefix_id` configuration. When a prefix is configured, model names are automatically transformed:\n\n```python\nprefix_id = api_config.get('prefix_id', None)\nif prefix_id:\n    payload['model'] = payload['model'].replace(f'{prefix_id}.', '')\n```\n\nThis allows users to use short model names (e.g., `llama2`) while the backend automatically transforms them to prefixed names (e.g., `tenant1.llama2`) for the Ollama API.\n\n资料来源：[backend/open_webui/routers/ollama.py:200-220](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Configuration\n\n### Environment Variables\n\nThe Ollama integration is configured through environment variables in `backend/open_webui/config.py`:\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `ENABLE_OLLAMA_API` | `True` | Enable/disable Ollama API |\n| `OLLAMA_API_BASE_URL` | `http://localhost:11434/api` | Primary Ollama API URL |\n| `OLLAMA_BASE_URL` | Auto-derived | Base URL for Ollama connections |\n| `USE_OLLAMA_DOCKER` | `false` | Use all-in-one Docker container |\n| `K8S_FLAG` | Empty | Kubernetes deployment flag |\n\n```python\nENABLE_OLLAMA_API = PersistentConfig(\n    'ENABLE_OLLAMA_API',\n    'ollama.enable',\n    os.environ.get('ENABLE_OLLAMA_API', 'True').lower() == 'true',\n)\n\nOLLAMA_API_BASE_URL = os.environ.get('OLLAMA_API_BASE_URL', 'http://localhost:11434/api')\n```\n\n资料来源：[backend/open_webui/config.py:1-100](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n\n### Port Fallback Resolution\n\nThe configuration includes automatic port fallback logic for environments where the default Ollama port (11434) might be blocked:\n\n```python\ndef _resolve_ollama_base_url(url: str) -> str:\n    \"\"\"If the default Ollama port (11434) is unreachable, try the fallback port (12434).\"\"\"\n    # Checks port 11434 first, then falls back to 12434 if unreachable\n```\n\nThis enables seamless operation in environments like certain corporate networks or containerized setups where only specific ports are accessible.\n\n资料来源：[backend/open_webui/config.py:50-80](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n\n### Docker and Kubernetes Handling\n\nThe configuration adapts to different deployment scenarios:\n\n```python\nif OLLAMA_BASE_URL == '/ollama' and not K8S_FLAG:\n    if USE_OLLAMA_DOCKER.lower() == 'true':\n        OLLAMA_BASE_URL = 'http://localhost:11434'\n    else:\n        OLLAMA_BASE_URL = 'http://host.docker.internal:11434'\nelif K8S_FLAG:\n    OLLAMA_BASE_URL = 'http://ollama-service.open-webui.svc.cluster.local:11434'\n```\n\n资料来源：[backend/open_webui/config.py:40-50](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n\n## API Compatibility Layers\n\n### OpenAI-Compatible API\n\nThe Ollama router provides OpenAI-compatible endpoints that translate requests to Ollama's API format:\n\n**Endpoint:** `POST /ollama/v1/chat/completions`\n\nThe system transforms OpenAI-format requests into Ollama-native format:\n\n```python\npayload = apply_model_params_to_body_openai(params, payload)\npayload = await apply_system_prompt_to_body(system, payload, metadata, user)\n```\n\nThis transformation includes:\n- Converting OpenAI parameter names to Ollama format\n- Applying model-specific parameter modifications\n- Injecting system prompts from user metadata\n\n资料来源：[backend/open_webui/routers/ollama.py:150-180](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n### Anthropic-Compatible API\n\nSupport for Anthropic's `/v1/messages` endpoint is provided through the Responses API:\n\n**Endpoint:** `POST /ollama/v1/messages`\n\n```python\n@router.post('/v1/messages')\nasync def generate_anthropic_messages(\n    request: Request,\n    form_data: dict,\n    url_idx: Optional[int] = None,\n    user=Depends(get_verified_user),\n):\n    \"\"\"\n    Proxy for Ollama's Anthropic-compatible /v1/messages endpoint.\n    Forwards the request as-is to the Ollama backend.\n    \"\"\"\n```\n\nThe request is forwarded to Ollama's `/v1/responses` endpoint with appropriate streaming headers.\n\n资料来源：[backend/open_webui/routers/ollama.py:250-280](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Frontend Integration\n\n### API Client (src/lib/apis/ollama/index.ts)\n\nThe frontend provides a TypeScript API client for communicating with the backend Ollama proxy:\n\n**Key Functions:**\n\n| Function | Purpose |\n|----------|---------|\n| `deleteModel()` | Delete a model from Ollama |\n| `pullModel()` | Pull a new model with progress tracking |\n| `verifyOllamaConnection()` | Test connectivity to Ollama instance |\n| `getOllamaConfig()` | Retrieve current Ollama configuration |\n\n```typescript\nexport const pullModel = async (token: string, tagName: string, urlIdx: number | null = null) => {\n    const controller = new AbortController();\n    const res = await fetch(\n        `${OLLAMA_API_BASE_URL}/api/pull${urlIdx !== null ? `/${urlIdx}` : ''}`,\n        {\n            signal: controller.signal,\n            method: 'POST',\n            headers: {\n                'Content-Type': 'application/json',\n                Authorization: `Bearer ${token}`\n            },\n            body: JSON.stringify({ name: tagName })\n        }\n    );\n    return res;\n};\n```\n\n资料来源：[src/lib/apis/ollama/index.ts:1-150](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/ollama/index.ts)\n\n### API Base URL Configuration\n\nFrontend constants define the base URLs for API communication:\n\n```typescript\nexport const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama`;\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\n```\n\nThe system automatically configures the base URL based on environment:\n- **Development:** `http://hostname:8080`\n- **Production:** Uses the configured domain\n\n资料来源：[src/lib/constants.ts:1-30](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n\n## Request Flow\n\n```mermaid\ngraph LR\n    A[User Request] --> B[Frontend API Client]\n    B --> C[Backend Router]\n    \n    C --> D{Request Type?}\n    \n    D -->|Native Ollama| E[Native API Handler]\n    D -->|OpenAI Format| F[OpenAI-Compatible Handler]\n    D -->|Anthropic Format| G[Anthropic-Compatible Handler]\n    \n    E --> H[Model Resolution]\n    F --> H\n    G --> H\n    \n    H --> I[Access Control Check]\n    I --> J{Model Access Allowed?}\n    \n    J -->|Yes| K[Forward to Ollama]\n    J -->|No| L[HTTP 403 Forbidden]\n    \n    K --> M[Ollama Instance]\n    M --> N[Response]\n    N --> O[Stream/Return to Client]\n```\n\n## Model Management\n\n### Model Registration\n\nModels discovered from Ollama instances are registered in the application state:\n\n```python\napp.state.OLLAMA_MODELS = {}\n```\n\nEach model entry contains:\n- `urls`: Array of Ollama instance URLs where the model is available\n- `details`: Model metadata (size, capabilities, etc.)\n\n资料来源：[backend/open_webui/main.py:100-120](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Model Access Control\n\nBefore forwarding requests, the system checks user access permissions:\n\n```python\nmodel_info = await Models.get_model_by_id(model_id)\nif model_info:\n    if model_info.base_model_id:\n        payload['model'] = model_info.base_model_id\n    await check_model_access(user, model_info)\nelse:\n    await check_model_access(user, None)\n```\n\n资料来源：[backend/open_webui/routers/ollama.py:130-150](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Error Handling\n\n### Connection Verification\n\nThe system provides a connection verification endpoint for testing Ollama connectivity:\n\n```typescript\nexport const verifyOllamaConnection = async (token: string = '', connection: dict = {}) => {\n    const res = await fetch(`${OLLAMA_API_BASE_URL}/verify`, {\n        method: 'POST',\n        headers: {\n            Authorization: `Bearer ${token}`,\n            'Content-Type': 'application/json'\n        },\n        body: JSON.stringify({ ...connection })\n    });\n    return res;\n};\n```\n\n资料来源：[src/lib/apis/ollama/index.ts:150-180](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/ollama/index.ts)\n\n### Error Messages\n\nCommon error scenarios include:\n\n| Scenario | HTTP Status | Error Message |\n|----------|-------------|---------------|\n| Ollama API Disabled | 503 | `OLLAMA_API_DISABLED` |\n| Model Not Found | 400 | `MODEL_NOT_FOUND` |\n| Network Problem | Various | `Ollama: Network Problem` |\n| Invalid Config | 500 | `DEFAULT(e)` |\n\n资料来源：[backend/open_webui/routers/ollama.py:50-80](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Summary\n\nThe Ollama Integration provides a robust, flexible bridge between Open WebUI and local Ollama instances. Key features include:\n\n- **Multi-instance support** through URL indexing\n- **API compatibility layers** for OpenAI and Anthropic formats\n- **Automatic port fallback** for network flexibility\n- **Access control** integration for model permissions\n- **Prefix-based multi-tenancy** support\n- **Streaming support** for real-time responses\n- **Docker and Kubernetes** deployment optimizations\n\nThis integration enables users to run powerful LLM models entirely locally while maintaining a modern, feature-rich web interface for interaction.\n\n---\n\n<a id='rag-pipeline'></a>\n\n## RAG Pipeline\n\n### 相关页面\n\n相关主题：[Ollama Integration](#ollama-integration), [Retrieval System](#retrieval-system)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/routers/knowledge.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/knowledge.py)\n- [backend/open_webui/retrieval/utils.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/utils.py)\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n</details>\n\n# RAG Pipeline\n\nRetrieval-Augmented Generation (RAG) Pipeline in Open WebUI enables users to upload documents, process them into searchable vector embeddings, and augment LLM responses with relevant context from user knowledge bases.\n\n## Architecture Overview\n\nThe RAG Pipeline consists of multiple integrated components that work together to provide document retrieval and context augmentation capabilities.\n\n```mermaid\ngraph TD\n    A[User Upload] --> B[Document Processing]\n    B --> C[Text Extraction]\n    C --> D[Chunking]\n    D --> E[Embedding Generation]\n    E --> F[Vector Storage]\n    F --> G[Retrieval]\n    G --> H[Context Injection]\n    H --> I[LLM Response]\n    \n    J[Knowledge Management] --> K[Access Control]\n    K --> F\n```\n\n### Core Components\n\n| Component | Location | Purpose |\n|-----------|----------|---------|\n| Knowledge Router | `backend/open_webui/routers/knowledge.py` | REST API endpoints for knowledge management |\n| Retrieval Utils | `backend/open_webui/retrieval/utils.py` | Document loading and text extraction |\n| API Router | `backend/open_webui/main.py` | Registers retrieval endpoints |\n| Frontend | `src/lib/components/workspace/Knowledge.svelte` | UI for knowledge management |\n\n资料来源：[backend/open_webui/main.py:17-30]()\n\n## Supported Document Types\n\nOpen WebUI supports a wide range of document formats through configurable document loaders.\n\n```typescript\n// src/lib/constants.ts\nexport const SUPPORTED_FILE_TYPE = [\n    'application/epub+zip',\n    'application/pdf',\n    'text/plain',\n    'text/csv',\n    'text/xml',\n    'text/html',\n    'text/x-python',\n    'text/css',\n    'application/vnd.openxmlformats-officedocument.wordprocessingml.document',\n    'application/octet-stream',\n    'application/x-javascript',\n    'text/markdown',\n    'audio/mpeg',\n    'audio/wav',\n    'video/mp4',\n    'video/mpeg'\n];\n```\n\n资料来源：[src/lib/constants.ts:16-30]()\n\n## Document Processing Pipeline\n\n### Text Extraction\n\nThe retrieval utility module handles document parsing through multiple backends:\n\n```python\n# backend/open_webui/retrieval/utils.py\ndef _extract_text_from_binary_response(request, response, url):\n    \"\"\"Download response body to a temp file and extract text using the Loader pipeline.\"\"\"\n    import mimetypes\n    import tempfile\n    import urllib.parse\n```\n\n### Supported Document Loaders\n\n| Loader | Purpose | Configuration |\n|--------|---------|---------------|\n| TIKA Server | Apache Tika for generic document parsing | `TIKA_SERVER_URL` |\n| DOCLING | Advanced PDF and document processing | `DOCLING_SERVER_URL`, `DOCLING_API_KEY` |\n| PDF Loader | Configurable PDF extraction | `PDF_LOADER_MODE`, `PDF_EXTRACT_IMAGES` |\n| Document Intelligence | Azure AI document analysis | `DOCUMENT_INTELLIGENCE_ENDPOINT` |\n| Mistral OCR | OCR for scanned documents | `MISTRAL_OCR_API_BASE_URL` |\n| PaddleOCR VL | Visual language OCR | `PADDLEOCR_VL_BASE_URL` |\n| MinerU | Chinese document processing | `MINERU_API_MODE`, `MINERU_API_URL` |\n\n资料来源：[backend/open_webui/retrieval/utils.py:1-25]()\n\n## Knowledge Management API\n\n### Endpoints Overview\n\nThe knowledge router provides CRUD operations for managing user knowledge bases.\n\n```\n/api/v1/knowledge          - List and create knowledge bases\n/api/v1/knowledge/{id}    - Get, update, delete specific knowledge\n/api/v1/knowledge/{id}/file/add     - Add file to knowledge base\n/api/v1/knowledge/{id}/search       - Search within knowledge base\n```\n\n### Access Control\n\nKnowledge resources are protected by role-based access control:\n\n```python\n# backend/open_webui/routers/knowledge.py\nif not (\n    user.role == 'admin'\n    or knowledge.user_id == user.id\n    or await AccessGrants.has_access(\n        user_id=user.id,\n        resource_type='knowledge',\n        resource_id=knowledge.id,\n        permission='read',\n        db=db,\n    )\n):\n    raise HTTPException(\n        status_code=status.HTTP_400_BAD_REQUEST,\n        detail=ERROR_MESSAGES.ACCESS_PROHIBITED,\n    )\n```\n\n资料来源：[backend/open_webui/routers/knowledge.py:40-55]()\n\n### Search Functionality\n\nThe search endpoint supports pagination and filtering:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `page` | int | 1 | Page number (minimum 1) |\n| `query` | string | - | Search query text |\n| `view_option` | string | - | Filter option |\n| `order_by` | string | - | Sort field |\n| `direction` | string | - | Sort direction |\n\n```python\npage = max(page, 1)\nlimit = 30\nskip = (page - 1) * limit\n\nfilter = {}\nif query:\n    filter['query'] = query\n```\n\n资料来源：[backend/open_webui/routers/knowledge.py:57-70]()\n\n## Configuration Options\n\n### Environment Variables\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `EXTERNAL_DOCUMENT_LOADER_URL` | - | External document loader endpoint |\n| `EXTERNAL_DOCUMENT_LOADER_API_KEY` | - | API key for external loader |\n| `TIKA_SERVER_URL` | - | Apache Tika server URL |\n| `DOCLING_SERVER_URL` | - | Docling server endpoint |\n| `DOCLING_API_KEY` | - | Docling API authentication |\n| `PDF_LOADER_MODE` | - | PDF extraction mode |\n| `PDF_EXTRACT_IMAGES` | - | Enable image extraction from PDFs |\n| `DOCUMENT_INTELLIGENCE_ENDPOINT` | - | Azure AI endpoint |\n| `DOCUMENT_INTELLIGENCE_KEY` | - | Azure AI API key |\n| `MISTRAL_OCR_API_BASE_URL` | - | Mistral OCR service URL |\n| `MISTRAL_OCR_API_KEY` | - | Mistral OCR authentication |\n| `PADDLEOCR_VL_BASE_URL` | - | PaddleOCR endpoint |\n| `PADDLEOCR_VL_TOKEN` | - | PaddleOCR token |\n| `MINERU_API_MODE` | - | MinerU processing mode |\n| `MINERU_API_URL` | - | MinerU API endpoint |\n| `MINERU_API_KEY` | - | MinerU API key |\n| `MINERU_API_TIMEOUT` | - | MinerU request timeout |\n\n资料来源：[backend/open_webui/config.py:1-25]()\n\n## Data Flow\n\n```mermaid\nsequenceDiagram\n    participant U as User\n    participant F as Frontend\n    participant API as Knowledge API\n    participant DL as Document Loader\n    participant VC as Vector Cache\n    participant LLM as LLM\n    \n    U->>F: Upload Document\n    F->>API: POST /api/v1/knowledge/{id}/file/add\n    API->>DL: Extract Text\n    DL-->>API: Raw Text Content\n    API->>VC: Generate Embeddings\n    VC-->>API: Vector Embeddings\n    API-->>F: Success Response\n    \n    U->>F: Query with RAG\n    F->>API: POST /api/v1/retrieval\n    API->>VC: Search Vectors\n    VC-->>API: Relevant Chunks\n    API-->>F: Augmented Context\n    F->>LLM: Prompt + Context\n    LLM-->>U: Generated Response\n```\n\n## Frontend Integration\n\nThe knowledge management interface is implemented as a Svelte component:\n\n- Location: `src/lib/components/workspace/Knowledge.svelte`\n- Provides file upload, management, and search UI\n- Communicates with backend via REST API\n\n### API Base URLs\n\n```typescript\n// src/lib/constants.ts\nexport const RETRIEVAL_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/retrieval`;\nexport const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/audio`;\nexport const IMAGES_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/images`;\n```\n\n资料来源：[src/lib/constants.ts:9-13]()\n\n## Dependencies\n\nKey Python packages for RAG functionality:\n\n| Package | Version | Purpose |\n|---------|---------|---------|\n| `sqlalchemy` | 2.0.48 | Database ORM |\n| `requests` | 2.33.1 | HTTP client |\n| `httpx` | 0.28.1 | Async HTTP with HTTP/2 support |\n| `aiofiles` | - | Async file operations |\n| `redis` | - | Vector caching |\n| `pycrdt` | 0.12.47 | CRDT operations |\n\n资料来源：[backend/requirements-min.txt:1-35]()\n\n## Error Handling\n\nThe system uses centralized error messages:\n\n```python\nERROR_MESSAGES.NOT_FOUND = \"Knowledge base not found\"\nERROR_MESSAGES.ACCESS_PROHIBITED = \"Access denied to this knowledge base\"\n```\n\n## Best Practices\n\n1. **Document Preparation**: Use supported formats for optimal extraction quality\n2. **Chunking Strategy**: Configure appropriate chunk sizes based on use case\n3. **Access Control**: Leverage RBAC to protect sensitive knowledge bases\n4. **Loader Selection**: Choose appropriate document loader based on document complexity\n5. **Resource Management**: Monitor vector storage size for large knowledge bases\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：open-webui/open-webui\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:701547123 | https://github.com/open-webui/open-webui | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | release_recency=unknown\n\n<!-- canonical_name: open-webui/open-webui; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "open-webui",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:701547123",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/open-webui/open-webui"
        },
        {
          "evidence_id": "art_24eeae53acef4d4d93d4a4da03164209",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/open-webui/open-webui#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "open-webui 说明书",
      "toc": [
        "https://github.com/open-webui/open-webui 项目说明书",
        "目录",
        "Project Introduction",
        "Overview",
        "Architecture",
        "Features",
        "Configuration System",
        "Security Features",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "3660bc00fd807deced3400a63bfa6db47811a3bb",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "pyproject.toml",
      "Dockerfile",
      "package.json",
      "README.md",
      "uv.lock",
      "docs/SECURITY.md",
      "src/app.d.ts",
      "src/lib/shortcuts.ts",
      "src/lib/dayjs.js",
      "src/lib/constants.ts",
      "src/lib/index.ts",
      "src/lib/emoji-shortcodes.json",
      "src/lib/emoji-groups.json",
      "src/routes/+layout.js",
      "src/lib/workers/kokoro.worker.ts",
      "src/lib/workers/pyodide.worker.ts",
      "src/lib/workers/KokoroWorker.ts",
      "src/lib/stores/index.ts",
      "src/lib/utils/safeImageUrl.ts",
      "src/lib/utils/connections.ts",
      "src/lib/utils/audio.ts",
      "src/lib/utils/codeHighlight.ts",
      "src/lib/utils/_template_old.ts",
      "src/lib/utils/index.ts",
      "src/lib/utils/codemirror.ts",
      "src/lib/utils/onedrive-file-picker.ts",
      "src/lib/utils/text-scale.ts",
      "src/lib/utils/google-drive-picker.ts",
      "src/lib/utils/excelToTable.ts",
      "src/lib/utils/csp.ts",
      "src/lib/utils/pptxToHtml.ts",
      "src/lib/constants/permissions.ts",
      "src/lib/i18n/index.ts",
      "src/lib/apis/index.ts",
      "src/lib/types/index.ts",
      "src/lib/pyodide/pyodideKernel.worker.ts",
      "src/lib/pyodide/pyodideKernel.ts",
      "src/lib/components/notes/utils.ts",
      "src/lib/components/common/RichTextInput/listDragHandlePlugin.js",
      "src/lib/components/common/RichTextInput/Collaboration.ts"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# open-webui - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 open-webui 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **正在使用 Claude/Codex/Cursor/Gemini 等宿主 AI 的开发者**：README 或插件配置提到多个宿主 AI。 证据：`README.md` Claim：`clm_0002` supported 0.86\n\n## 它能做什么\n\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n## 怎么开始\n\n- `pip install open-webui` 证据：`README.md` Claim：`clm_0003` supported 0.86\n\n## 继续前判断卡\n\n- **当前建议**：仅建议沙盒试装\n- **为什么**：项目存在安装命令、宿主配置或本地写入线索，不建议直接进入主力环境，应先在隔离环境试装。\n\n### 30 秒判断\n\n- **现在怎么做**：仅建议沙盒试装\n- **最小安全下一步**：先跑 Prompt Preview；若仍要安装，只在隔离环境试装\n- **先别相信**：真实输出质量不能在安装前相信。\n- **继续会触碰**：命令执行、本地环境或项目文件、宿主 AI 上下文\n\n### 现在可以相信\n\n- **适合人群线索：正在使用 Claude/Codex/Cursor/Gemini 等宿主 AI 的开发者**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0002` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n- **存在 Quick Start / 安装命令线索**（supported）：可以相信项目文档出现过启动或安装入口；不要因此直接在主力环境运行。 证据：`README.md` Claim：`clm_0003` supported 0.86\n\n### 现在还不能相信\n\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n- **安装命令是否需要网络、权限或全局写入？**（unverified）：这影响企业环境和个人环境的安装风险。 证据：`README.md`\n\n### 继续会触碰什么\n\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`README.md`\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`README.md`\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：用安装前交互式试用判断工作方式是否匹配，不需要授权或改环境。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0004` inferred 0.45\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`README.md` Claim：`clm_0005` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n### 上下文规模\n\n- 文件总数：4906\n- 重要文件覆盖：40/4906\n- 证据索引条目：80\n- 角色 / Skill 条目：6\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 open-webui 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 open-webui 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 open-webui 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 6 个角色 / Skill / 项目文档条目。\n\n- **Open WebUI 👋**（project_doc）：! GitHub stars https://img.shields.io/github/stars/open-webui/open-webui?style=social ! GitHub forks https://img.shields.io/github/forks/open-webui/open-webui?style=social ! GitHub watchers https://img.shields.io/github/watchers/open-webui/open-webui?style=social ! GitHub repo size https://img.shields.io/github/repo-size/open-webui/open-webui ! GitHub language count https://img.shields.io/github/languages/count/open… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`README.md`\n- **Security Policy**（project_doc）：Our primary goal is to ensure the protection and confidentiality of sensitive data stored by users on open-webui. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`docs/SECURITY.md`\n- **Pull Request Checklist**（project_doc）：Note to first-time contributors: Please open a discussion post in Discussions https://github.com/open-webui/open-webui/discussions to discuss your idea/fix with the community before creating a pull request, and describe your changes before submitting a pull request. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`.github/pull_request_template.md`\n- **Changelog**（project_doc）：All notable changes to this project will be documented in this file. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`CHANGELOG.md`\n- **Contributor Covenant Code of Conduct**（project_doc）：Contributor Covenant Code of Conduct 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`CODE_OF_CONDUCT.md`\n- **Open WebUI Troubleshooting Guide**（project_doc）：Understanding the Open WebUI Architecture 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`TROUBLESHOOTING.md`\n\n## 证据索引\n\n- 共索引 80 条证据。\n\n- **Open WebUI 👋**（documentation）：! GitHub stars https://img.shields.io/github/stars/open-webui/open-webui?style=social ! GitHub forks https://img.shields.io/github/forks/open-webui/open-webui?style=social ! GitHub watchers https://img.shields.io/github/watchers/open-webui/open-webui?style=social ! GitHub repo size https://img.shields.io/github/repo-size/open-webui/open-webui ! GitHub language count https://img.shields.io/github/languages/count/open-webui/open-webui ! GitHub top language https://img.shields.io/github/languages/top/open-webui/open-webui ! GitHub last commit https://img.shields.io/github/last-commit/open-webui/open-webui?color=red ! Discord https://img.shields.io/badge/Discord-Open WebUI-blue?logo=discord&log… 证据：`README.md`\n- **Package**（package_manifest）：{ \"name\": \"open-webui\", \"version\": \"0.9.5\", \"private\": true, \"scripts\": { \"dev\": \"npm run pyodide:fetch && vite dev --host\", \"dev:5050\": \"npm run pyodide:fetch && vite dev --port 5050\", \"build\": \"npm run pyodide:fetch && vite build\", \"build:watch\": \"npm run pyodide:fetch && vite build --watch\", \"preview\": \"vite preview\", \"check\": \"svelte-kit sync && svelte-check --tsconfig ./tsconfig.json\", \"check:watch\": \"svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch\", \"lint\": \"npm run lint:frontend ; npm run lint:types ; npm run lint:backend\", \"lint:frontend\": \"eslint . --fix\", \"lint:types\": \"npm run check\", \"lint:backend\": \"pylint backend/\", \"format\": \"prettier --plugin-search-dir --… 证据：`package.json`\n- **License**（source_file）：Copyright c 2023- Open WebUI Inc. Created by Timothy Jaeryang Baek All rights reserved. 证据：`LICENSE`\n- **Security Policy**（documentation）：Our primary goal is to ensure the protection and confidentiality of sensitive data stored by users on open-webui. 证据：`docs/SECURITY.md`\n- **Pull Request Checklist**（documentation）：Note to first-time contributors: Please open a discussion post in Discussions https://github.com/open-webui/open-webui/discussions to discuss your idea/fix with the community before creating a pull request, and describe your changes before submitting a pull request. 证据：`.github/pull_request_template.md`\n- **Changelog**（documentation）：All notable changes to this project will be documented in this file. 证据：`CHANGELOG.md`\n- **Contributor Covenant Code of Conduct**（documentation）：Contributor Covenant Code of Conduct 证据：`CODE_OF_CONDUCT.md`\n- **Open WebUI Troubleshooting Guide**（documentation）：Understanding the Open WebUI Architecture 证据：`TROUBLESHOOTING.md`\n- **Tsconfig**（structured_config）：{ \"extends\": \"../tsconfig.json\", \"compilerOptions\": { \"inlineSourceMap\": true, \"sourceMap\": false } } 证据：`cypress/tsconfig.json`\n- **Manifest**（structured_config）：{} 证据：`static/manifest.json`\n- **Tsconfig**（structured_config）：{ \"extends\": \"./.svelte-kit/tsconfig.json\", \"compilerOptions\": { \"allowJs\": true, \"checkJs\": true, \"esModuleInterop\": true, \"forceConsistentCasingInFileNames\": true, \"resolveJsonModule\": true, \"skipLibCheck\": true, \"sourceMap\": true, \"strict\": true } // Path aliases are handled by https://kit.svelte.dev/docs/configuration alias // // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes // from the referenced tsconfig.json - TypeScript does not merge them in } 证据：`tsconfig.json`\n- **Emoji Groups**（structured_config）：{ \"Smileys & Emotion\": \"1F600\", \"1F603\", \"1F604\", \"1F601\", \"1F606\", \"1F605\", \"1F923\", \"1F602\", \"1F642\", \"1F643\", \"1FAE0\", \"1F609\", \"1F60A\", \"1F607\", \"1F970\", \"1F60D\", \"1F929\", \"1F618\", \"1F617\", \"263A-FE0F\", \"263A\", \"1F61A\", \"1F619\", \"1F972\", \"1F60B\", \"1F61B\", \"1F61C\", \"1F92A\", \"1F61D\", \"1F911\", \"1F917\", \"1F92D\", \"1FAE2\", \"1FAE3\", \"1F92B\", \"1F914\", \"1FAE1\", \"1F910\", \"1F928\", \"1F610\", \"1F611\", \"1F636\", \"1FAE5\", \"1F636-200D-1F32B-FE0F\", \"1F636-200D-1F32B\", \"1F60F\", \"1F612\", \"1F644\", \"1F62C\", \"1F62E-200D-1F4A8\", \"1F925\", \"1FAE8\", \"1F642-200D-2194-FE0F\", \"1F642-200D-2194\", \"1F642-200D-2195-FE0F\", \"1F642-200D-2195\", \"1F60C\", \"1F614\", \"1F62A\", \"1F924\", \"1F634\", \"1FAE9\", \"1F637\", \"1F912\", \"1F915\",… 证据：`src/lib/emoji-groups.json`\n- **Emoji Shortcodes**（structured_config）：{ \"2049\": \"interrobang\", \"2122\": \"tm\", \"2139\": \"information source\", \"2194\": \"left right arrow\", \"2195\": \"arrow up down\", \"2196\": \"arrow upper left\", \"2197\": \"arrow upper right\", \"2198\": \"arrow lower right\", \"2199\": \"arrow lower left\", \"2328\": \"keyboard\", \"2600\": \"sunny\", \"2601\": \"cloud\", \"2602\": \"umbrella\", \"2603\": \"snowman\", \"2604\": \"comet\", \"2611\": \"ballot box with check\", \"2614\": \"umbrella with rain drops\", \"2615\": \"coffee\", \"2618\": \"shamrock\", \"2620\": \"skull and crossbones\", \"2622\": \"radioactive sign\", \"2623\": \"biohazard sign\", \"2626\": \"orthodox cross\", \"2638\": \"wheel of dharma\", \"2639\": \"white frowning face\", \"2640\": \"female sign\", \"2642\": \"male sign\", \"2648\": \"aries\", \"2649\": \"taurus… 证据：`src/lib/emoji-shortcodes.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' أو '-1' لا توجد انتهاء\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" sh webui.sh --api مثال \", \" latest \": \" الأخير \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ نماذج }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{… 证据：`src/lib/i18n/locales/ar-BH/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 لعدم وجود حد، أو عدد صحيح موجب لحد معين\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"الحروف 's'، 'm'، 'h'، 'd'، 'w' أو '-1' لعدم انتهاء الصلاحية.\", \" e.g. sh webui.sh --api --api-auth username password \": \" مثال: sh webui.sh --api --api-auth اسم المستخدم كلمة المرور \", \" e.g. sh webui.sh --api \": \" مثال: تشغيل الأمر: sh webui.sh --api \", \" latest \": \" أحدث \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"النماذج: {{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted l… 证据：`src/lib/i18n/locales/ar/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"Limitsiz üçün -1, yaxud konkret limit üçün müsbət tam ədəd daxil edin\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"Vaxt məhdudiyyəti olmaması üçün 's', 'm', 'h', 'd', 'w' və ya '-1'.\", \" e.g. sh webui.sh --api --api-auth username password \": \" məs. sh webui.sh --api --api-auth istifadəçiadı şifrə \", \" e.g. sh webui.sh --api \": \" məs. sh webui.sh --api \", \" latest \": \" ən sonuncu \", \" leave blank for to use commercial endpoint \": \" kommersiya son nöqtəsindən istifadə etmək üçün boş buraxın \", \" Last dddd at h:mm A\": \" Sonuncu dddd saat h:mm A\", \" Today at h:mm A\": \" Bu gün saat h:mm A\", \" Yesterday at h:mm A\": \" Dü… 证据：`src/lib/i18n/locales/az-AZ/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 за липса на ограничение или положително цяло число за определено ограничение.\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' или '-1' за неограничен срок.\", \" e.g. sh webui.sh --api --api-auth username password \": \" напр. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" напр. sh webui.sh --api \", \" latest \": \" последна \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extra… 证据：`src/lib/i18n/locales/bg-BG/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' অথবা অনির্দিষ্টকাল মেয়াদের জন্য '-1' \", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" যেমন sh webui.sh --api \", \" latest \": \" সর্বশেষ \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ মডেল}}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}}… 证据：`src/lib/i18n/locales/bn-BD/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 ནི་ཚད་མེད་པའི་ཆེད་དམ། ཡང་ན་ཧྲིལ་གྲངས་དགོས་ངེས་ཤིག་ཚད་བཀག་ངེས་ཅན་ཞིག་གི་ཆེད་དུ།\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ཡང་ན་ '-1' དུས་ཚོད་རྫོགས་མི་དགོས་པའི་ཆེད་དུ།\", \" e.g. sh webui.sh --api --api-auth username password \": \" དཔེར་ན། sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" དཔེར་ན། sh webui.sh --api \", \" latest \": \" ཆེས་གསར། \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\":… 证据：`src/lib/i18n/locales/bo-TB/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ili '-1' za bez isteka.\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" npr. sh webui.sh --api \", \" latest \": \" najnovije \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modeli }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\"… 证据：`src/lib/i18n/locales/bs-BA/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 per a cap límit, o un nombre positiu per a un límit específic\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' o '-1' perquè no caduqui mai.\", \" e.g. sh webui.sh --api --api-auth username password \": \" p. ex. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" p. ex. sh webui.sh --api \", \" latest \": \" últim \", \" leave blank for to use commercial endpoint \": \" deixa-ho buit per utilitzar un punt d'accés comercial \", \" Last dddd at h:mm A\": \" Darrer dddd a h:mm A\", \" Today at h:mm A\": \" Avui a les h:mm A\", \" Yesterday at h:mm A\": \" Ahir a les h:mm A\", \"{{ models }}\"… 证据：`src/lib/i18n/locales/ca-ES/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' o '-1' para walay expiration.\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" pananglitan sh webui.sh --api \", \" latest \": \"\", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{count… 证据：`src/lib/i18n/locales/ceb-PH/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 pro žádný limit, nebo kladné celé číslo pro specifický limit\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' nebo '-1' pro žádné vypršení.\", \" e.g. sh webui.sh --api --api-auth username password \": \" např. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" např. sh webui.sh --api \", \" latest \": \" nejnovější \", \" leave blank for to use commercial endpoint \": \" ponechte prázdné pro použití komerčního koncového bodu \", \" Last dddd at h:mm A\": \" Naposledy dddd v h:mm A\", \" Today at h:mm A\": \" Dnes v h:mm A\", \" Yesterday at h:mm A\": \" Včera v h:mm A\", \"{{ models }}\":… 证据：`src/lib/i18n/locales/cs-CZ/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 for ingen grænse, eller et positivt heltal for en specifik grænse\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' eller '-1' for ingen udløb\", \" e.g. sh webui.sh --api --api-auth username password \": \" f.eks. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" f.eks. sh webui.sh --api \", \" latest \": \" seneste \", \" leave blank for to use commercial endpoint \": \" lad stå tom for at bruge kommercielt endpoint \", \" Last dddd at h:mm A\": \" Sidste dddd kl. h:mm A\", \" Today at h:mm A\": \" I dag kl. h:mm A\", \" Yesterday at h:mm A\": \" I går kl. h:mm A\", \"{{ models }}\": \"{{… 证据：`src/lib/i18n/locales/da-DK/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 für kein Limit oder eine positive ganze Zahl für ein spezifisches Limit\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' oder '-1' für kein Ablaufdatum.\", \" e.g. sh webui.sh --api --api-auth username password \": \" z. B. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" z. B. sh webui.sh --api \", \" latest \": \" neueste \", \" leave blank for to use commercial endpoint \": \" leer lassen, um kommerziellen Endpunkt zu verwenden \", \" Last dddd at h:mm A\": \" Letzten dddd um h:mm A\", \" Today at h:mm A\": \" Heute um h:mm A\", \" Yesterday at h:mm A\": \" Gestern um h:mm A\", \"{{… 证据：`src/lib/i18n/locales/de-DE/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' or '-1' for no expire. Much permanent, very wow.\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" such e.g. sh webui.sh --api \", \" latest \": \" much latest \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\",… 证据：`src/lib/i18n/locales/dg-DG/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 για απεριόριστο, ή έναν θετικό ακέραιο για ένα συγκεκριμένο όριο\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ή '-1' για απεριόριστο.\", \" e.g. sh webui.sh --api --api-auth username password \": \" π.χ. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" π.χ. sh webui.sh --api \", \" latest \": \" τελευταία \", \" leave blank for to use commercial endpoint \": \" αφήστε κενό για να χρησιμοποιήσετε εμπορικό endpoint \", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}}… 证据：`src/lib/i18n/locales/el-GR/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \"\", \" latest \": \"\", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{count}} selected one\": \"\", \"{{count}} selected other\": \"\", \"{{COUNT}} Sources\": \"\", \"{{CO… 证据：`src/lib/i18n/locales/en-GB/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \"\", \" latest \": \"\", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{count}} selected one\": \"\", \"{{count}} selected other\": \"\", \"{{COUNT}} Sources\": \"\", \"{{CO… 证据：`src/lib/i18n/locales/en-US/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 para ilimitado, o un número entero positivo para un límite específico.\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' o '-1' para evitar expiración.\", \" e.g. sh webui.sh --api --api-auth username password \": \" p.ej. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" p.ej. sh webui.sh --api \", \" latest \": \" la última \", \" leave blank for to use commercial endpoint \": \" dejar vacío para usar el endpoint comercial \", \" Last dddd at h:mm A\": \" Último dddd a las h:mm A\", \" Today at h:mm A\": \" Hoy a las h:mm A\", \" Yesterday at h:mm A\": \" Ayer a las h:mm A\", \"{{ model… 证据：`src/lib/i18n/locales/es-ES/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 piirangu puudumisel või positiivne täisarv konkreetse piirangu jaoks\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' või '-1' aegumiseta.\", \" e.g. sh webui.sh --api --api-auth username password \": \" nt sh webui.sh --api --api-auth kasutajanimi parool \", \" e.g. sh webui.sh --api \": \" nt sh webui.sh --api \", \" latest \": \" uusim \", \" leave blank for to use commercial endpoint \": \" jäta tühjaks, et kasutada kommertslõpp-punkti \", \" Last dddd at h:mm A\": \" Eelmisel dddd kell h:mm A\", \" Today at h:mm A\": \" Täna kell h:mm A\", \" Yesterday at h:mm A\": \" Eile kell h:mm A\", \"{{ models }}\": \"{{ mudelid… 证据：`src/lib/i18n/locales/et-EE/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' edo '-1' iraungitzerik ez izateko.\", \" e.g. sh webui.sh --api --api-auth username password \": \" adib. sh webui.sh --api --api-auth erabiltzaile pasahitza \", \" e.g. sh webui.sh --api \": \" adib. sh webui.sh --api \", \" latest \": \" azkena \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{… 证据：`src/lib/i18n/locales/eu-ES/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 برای بدون محدودیت، یا یک عدد مثبت برای محدودیت مشخص\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' یا '-1' برای غیر فعال کردن انقضا.\", \" e.g. sh webui.sh --api --api-auth username password \": \" مثال: sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" e.g. sh webui.sh --api \", \" latest \": \" آخرین \", \" leave blank for to use commercial endpoint \": \" برای استفاده از نقطه پایانی تجاری خالی بگذارید \", \" Last dddd at h:mm A\": \" آخرین dddd در h:mm A\", \" Today at h:mm A\": \" امروز در h:mm A\", \" Yesterday at h:mm A\": \" دیروز در h:mm A\", \"{{ models }}\": \"{{ models }}\", \"{… 证据：`src/lib/i18n/locales/fa-IR/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 rajoituksetta tai positiivinen kokonaisluku enimmäismääräksi\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' tai '-1' jottei vanhene.\", \" e.g. sh webui.sh --api --api-auth username password \": \" esim. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" esim. sh webui.sh --api \", \" latest \": \" uusin \", \" leave blank for to use commercial endpoint \": \" Jätä tyhjäksi, jos haluat käyttää kaupallista päätettä \", \" Last dddd at h:mm A\": \" Viimeisin dddd h:mm A\", \" Today at h:mm A\": \" Tänään h:mm A\", \" Yesterday at h:mm A\": \" Eilen h:mm A\", \"{{ models }}\": \"{{ mallit }}… 证据：`src/lib/i18n/locales/fi-FI/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \"\", \" latest \": \"\", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{count}} selected one\": \"\", \"{{count}} selected many\": \"\", \"{{count}} selected other\": \"\",… 证据：`src/lib/i18n/locales/fil-PH/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 pour aucune limite, ou un entier positif pour une limite spécifique\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \" 's', 'm', 'h', 'd', 'w' ou '-1' pour une durée illimitée.\", \" e.g. sh webui.sh --api --api-auth username password \": \" par ex. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" par exemple sh webui.sh --api \", \" latest \": \" dernière version \", \" leave blank for to use commercial endpoint \": \"laisser vide pour l'utilisation d'un point d'extension commercial\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"… 证据：`src/lib/i18n/locales/fr-CA/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 pour aucune limite, ou un entier positif pour une limite spécifique\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \" 's', 'm', 'h', 'd', 'w' ou '-1' pour une durée illimitée.\", \" e.g. sh webui.sh --api --api-auth username password \": \" par ex. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" par exemple sh webui.sh --api \", \" latest \": \" dernière version \", \" leave blank for to use commercial endpoint \": \"laisser vide pour l'utilisation d'un point d'extension commercial\", \" Last dddd at h:mm A\": \"dddd dernier à H h mm\", \" Today at h:mm A\": \" Aujourd'hui à H h mm\", \" Yesterday at h:mm… 证据：`src/lib/i18n/locales/fr-FR/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 para ilimitado, ou un número enteiro positivo para un límite específico.\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' o '-1' para evitar expiración.\", \" e.g. sh webui.sh --api --api-auth username password \": \" p.ej. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" p.ej. sh webui.sh --api \", \" latest \": \" O mais recente \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} ext… 证据：`src/lib/i18n/locales/gl-ES/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' או '-1' ללא תפוגה.\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" למשל sh webui.sh --api \", \" latest \": \" האחרון \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ מודלים }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{cou… 证据：`src/lib/i18n/locales/he-IL/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' or '-1' बिना किसी समाप्ति के\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" e.g. sh webui.sh --api \", \" latest \": \" latest \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ मॉडल }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\"… 证据：`src/lib/i18n/locales/hi-IN/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ili '-1' za bez isteka.\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" npr. sh webui.sh --api \", \" latest \": \" najnovije \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modeli }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\"… 证据：`src/lib/i18n/locales/hr-HR/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 a korlátlanhoz, vagy pozitív egész szám egy konkrét limithoz\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' vagy '-1' ha nincs lejárat.\", \" e.g. sh webui.sh --api --api-auth username password \": \" pl. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" pl. sh webui.sh --api \", \" latest \": \" legújabb \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modellek }}\", \"{{COUNT}} Available Tools\": \"{{COUNT}} Elérhető eszköz\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} e… 证据：`src/lib/i18n/locales/hu-HU/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' atau '-1' untuk tidak ada kedaluwarsa.\", \" e.g. sh webui.sh --api --api-auth username password \": \" contoh: sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" contoh: sh webui.sh --api \", \" latest \": \" terbaru \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\"… 证据：`src/lib/i18n/locales/id-ID/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 gan teorainn, nó slánuimhir dheimhneach le haghaidh teorann sonrach\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' nó '-1' gan aon éag.\", \" e.g. sh webui.sh --api --api-auth username password \": \" m.sh. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" m.sh. sh webui.sh --api \", \" latest \": \" is déanaí \", \" leave blank for to use commercial endpoint \": \" fág bán le haghaidh críochphointe tráchtála a úsáid \", \" Last dddd at h:mm A\": \" Deireanach dddd ag h:mm A\", \" Today at h:mm A\": \" Inniu ag h:mm A\", \" Yesterday at h:mm A\": \" Inné ag h:mm A\", \"{{ models }}\": \"… 证据：`src/lib/i18n/locales/ie-GA/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 per nessun limite, o un numero intero positivo per un limite specifico\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' o '-1' per nessuna scadenza.\", \" e.g. sh webui.sh --api --api-auth username password \": \" p.e. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" p.e. sh webui.sh --api \", \" latest \": \" ultima \", \" leave blank for to use commercial endpoint \": \" lascia vuoto per utilizzare l'endpoint commerciale \", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modelli }}\", \"{{COUNT}} Available Tools\": \"{{COUN… 证据：`src/lib/i18n/locales/it-IT/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1で無制限、または正の整数で制限を指定\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' または '-1' で無期限。\", \" e.g. sh webui.sh --api --api-auth username password \": \" 例: sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" 例: sh webui.sh --api \", \" latest \": \" 最新 \", \" leave blank for to use commercial endpoint \": \" 商用エンドポイントを使用する場合は空欄のままにしてください \", \" Last dddd at h:mm A\": \"dddd h:mm A\", \" Today at h:mm A\": \" 今日の h:mm A\", \" Yesterday at h:mm A\": \" 昨日の h:mm A\", \"{{ models }}\": \"{{ モデル }}\", \"{{COUNT}} Available Tools\": \"{{COUNT}} 個の有効なツール\", \"{{COUNT}} characters\": \"{{COUNT}} 文字\", \"{{COUNT}} e… 证据：`src/lib/i18n/locales/ja-JP/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 ლიმიტის გამოსართავად, ან დადებითი მთელი რიცხვი კონკრეტული ლიმიტისთვის\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ან '-1' - უვადოსთვის.\", \" e.g. sh webui.sh --api --api-auth username password \": \" მაგ: sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" მაგ: sh webui.sh --api \", \" latest \": \" უახლესი \", \" leave blank for to use commercial endpoint \": \" დატოვეთ ცარიელი ფასიანი ბოლოწერტილის გამოსაყენებლად \", \" Last dddd at h:mm A\": \" ბოლო dddd დრო h:mm A\", \" Today at h:mm A\": \" დღეს, h:mm A\", \" Yesterday at h:mm A\": \" გუშინ, h:mm A\", \"{{ models }}\": \"{{ მოდელებ… 证据：`src/lib/i18n/locales/ka-GE/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 war talast, neɣ aɣerwaḍ ilaw i talast tusdidt\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' neɣ '-1' i wakud war tilas.\", \" e.g. sh webui.sh --api --api-auth username password \": \" amedya. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" amedya sh webui.sh --api \", \" latest \": \" Lqem aneggaru \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \" Aneggaru dddd ɣef h:mm A\", \" Today at h:mm A\": \" Ass-a ɣef h:mm A\", \" Yesterday at h:mm A\": \" Iḍelli ɣef h:mm A\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"Amḍan n yifec… 证据：`src/lib/i18n/locales/kab-DZ/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \"\", \" latest \": \"\", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}} Rows\": \"\", \"{{count}} selected other\": \"\", \"{{COUNT}} Sources\": \"\", \"{{COUNT}} words\": \"\", \"{{COUNT}}d… 证据：`src/lib/i18n/locales/ko-KR/translation.json`\n- **Languages**（structured_config）：{ \"code\": \"en-US\", \"title\": \"English US \" }, { \"code\": \"en-GB\", \"title\": \"English GB \" }, { \"code\": \"ar\", \"title\": \"Arabic العربية \" }, { \"code\": \"ar-BH\", \"title\": \"Arabic Bahrain \" }, { \"code\": \"az-AZ\", \"title\": \"Azərbaycanca\" }, { \"code\": \"eu-ES\", \"title\": \"Basque Euskara \" }, { \"code\": \"bn-BD\", \"title\": \"Bengali বাংলা \" }, { \"code\": \"bs-BA\", \"title\": \"Bosanski Latinica\" }, { \"code\": \"bo-TB\", \"title\": \"Tibetan བོད \" }, { \"code\": \"bg-BG\", \"title\": \"Bulgarian български \" }, { \"code\": \"ca-ES\", \"title\": \"Catalan català \" }, { \"code\": \"ceb-PH\", \"title\": \"Cebuano Filipino \" }, { \"code\": \"hr-HR\", \"title\": \"Croatian Hrvatski \" }, { \"code\": \"cs-CZ\", \"title\": \"Czech čeština \" }, { \"code\": \"da-DK\",… 证据：`src/lib/i18n/locales/languages.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' arba '-1' kad neišteitų iš galiojimo.\", \" e.g. sh webui.sh --api --api-auth username password \": \" pvz. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" pvz. sh webui.sh --api \", \" latest \": \" naujausias \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ models }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{… 证据：`src/lib/i18n/locales/lt-LT/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 bez ierobežojuma vai pozitīvs skaitlis konkrētam ierobežojumam\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' vai '-1' bez derīguma termiņa.\", \" e.g. sh webui.sh --api --api-auth username password \": \" piem., sh webui.sh --api --api-auth lietotājvārds parole \", \" e.g. sh webui.sh --api \": \" piem., sh webui.sh --api \", \" latest \": \" jaunākā \", \" leave blank for to use commercial endpoint \": \" atstājiet tukšu, lai izmantotu komerciālo galapunktu \", \" Last dddd at h:mm A\": \" Pagājušā dddd plkst. H:mm\", \" Today at h:mm A\": \" Šodien plkst. H:mm\", \" Yesterday at h:mm A\": \" Vakar plkst. H:mm\", \"{… 证据：`src/lib/i18n/locales/lv-LV/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 untuk tanpa had, atau integer positif untuk had tertentu\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' or '-1' untuk tiada tempoh luput.\", \" e.g. sh webui.sh --api --api-auth username password \": \" contoh sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" contoh sh webui.sh --api \", \" latest \": \" terkini \", \" leave blank for to use commercial endpoint \": \" biarkan kosong untuk menggunakan titik akhir komersial \", \" Last dddd at h:mm A\": \" Terakhir dddd pada h:mm A\", \" Today at h:mm A\": \" Hari ini pada h:mm A\", \" Yesterday at h:mm A\": \" Semalam pada h:mm A\", \"{… 证据：`src/lib/i18n/locales/ms-MY/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 angir ingen grense, eller angi et positivt heltall for en bestemt grense\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 't', 'd', 'u' eller '-1' for ingen utløp.\", \" e.g. sh webui.sh --api --api-auth username password \": \" f.eks. sh webui.sh --api --api-auth brukernavn passord \", \" e.g. sh webui.sh --api \": \" f.eks. sh webui.sh --api \", \" latest \": \" siste \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modeller }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted… 证据：`src/lib/i18n/locales/nb-NO/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 voor geen limiet, of een positief getal voor een specifieke limiet\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w', of '-1' voor geen vervaldatum.\", \" e.g. sh webui.sh --api --api-auth username password \": \" bv. sh webui.sh --api --api-auth gebruikersnaam wachtwoord \", \" e.g. sh webui.sh --api \": \" bv. sh webui.sh --api \", \" latest \": \" nieuwste \", \" leave blank for to use commercial endpoint \": \" laat leeg om een commercieel endpoint te gebruiken \", \" Last dddd at h:mm A\": \" Vorige dddd om h:mm A\", \" Today at h:mm A\": \" Vandaag om h:mm A\", \" Yesterday at h:mm A\": \" Gisteren om h:mm A\", \"{{… 证据：`src/lib/i18n/locales/nl-NL/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'ਸ', 'ਮ', 'ਘੰ', 'ਦ', 'ਹਫ਼ਤਾ' ਜਾਂ '-1' ਬਿਨਾ ਮਿਆਦ ਦੇ।\", \" e.g. sh webui.sh --api --api-auth username password \": \"\", \" e.g. sh webui.sh --api \": \" ਉਦਾਹਰਣ ਦੇ ਤੌਰ ਤੇ sh webui.sh --api \", \" latest \": \" ਤਾਜ਼ਾ \", \" leave blank for to use commercial endpoint \": \"\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ ਮਾਡਲ }}\", \"{{COUNT}} Available Tools\": \"\", \"{{COUNT}} characters\": \"\", \"{{COUNT}} extracted lines\": \"\", \"{{COUNT}} files\": \"\", \"{{COUNT}} hidden lines\": \"\", \"{{COUNT}} members\": \"\", \"{{COUNT}} Replies\": \"\", \"{{COUNT}}… 证据：`src/lib/i18n/locales/pa-IN/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 oznacza brak limitu, liczba dodatnia oznacza konkretny limit\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' lub '-1' dla braku wygaśnięcia.\", \" e.g. sh webui.sh --api --api-auth username password \": \" np. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" np. sh webui.sh --api \", \" latest \": \" najnowszy \", \" leave blank for to use commercial endpoint \": \" pozostaw puste, aby użyć komercyjnego punktu końcowego \", \" Last dddd at h:mm A\": \" Ostatnio dddd o H:mm\", \" Today at h:mm A\": \" Dziś o H:mm\", \" Yesterday at h:mm A\": \" Wczoraj o H:mm\", \"{{ models }}\": \"{{ mod… 证据：`src/lib/i18n/locales/pl-PL/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 para nenhum limite ou um inteiro positivo para um limite específico\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ou '-1' para sem expiração.\", \" e.g. sh webui.sh --api --api-auth username password \": \" por exemplo, sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" por exemplo, sh webui.sh --api \", \" latest \": \" mais recente \", \" leave blank for to use commercial endpoint \": \" deixe em branco para usar o endpoint comercial \", \" Last dddd at h:mm A\": \" Último dddd em h:mm A\", \" Today at h:mm A\": \" Hoje às h:mm A\", \" Yesterday at h:mm A\": \" Ontem às h:mm A\", \"{… 证据：`src/lib/i18n/locales/pt-BR/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 para sem limite, ou um inteiro positivo para um limite específico\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' ou '-1' para nenhuma expiração.\", \" e.g. sh webui.sh --api --api-auth username password \": \" por exemplo, sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" por exemplo, sh webui.sh --api \", \" latest \": \" mais recente \", \" leave blank for to use commercial endpoint \": \"deixe em branco para utilizar o endpoint comercial\", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modelos }}\", \"{{COUNT}} Availab… 证据：`src/lib/i18n/locales/pt-PT/translation.json`\n- **Translation**（structured_config）：{ \"-1 for no limit, or a positive integer for a specific limit\": \"-1 pentru nelimitat sau un număr întreg pozitiv pentru o limită specifică\", \"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.\": \"'s', 'm', 'h', 'd', 'w' sau '-1' fără expirare.\", \" e.g. sh webui.sh --api --api-auth username password \": \" de ex. sh webui.sh --api --api-auth username password \", \" e.g. sh webui.sh --api \": \" de ex. sh webui.sh --api \", \" latest \": \" ultimul \", \" leave blank for to use commercial endpoint \": \" lăsați necompletat pentru a folosi endpoint-ul comercial \", \" Last dddd at h:mm A\": \"\", \" Today at h:mm A\": \"\", \" Yesterday at h:mm A\": \"\", \"{{ models }}\": \"{{ modele }}\", \"{{COUNT}} Available Tools\": \"\"… 证据：`src/lib/i18n/locales/ro-RO/translation.json`\n- 其余 20 条证据见 `AI_CONTEXT_PACK.json` 或 `EVIDENCE_INDEX.json`。\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`README.md`, `package.json`, `LICENSE`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`README.md`, `package.json`, `LICENSE`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **Project Introduction**：importance `high`\n  - source_paths: README.md, pyproject.toml, backend/open_webui/__init__.py\n- **Installation Guide**：importance `high`\n  - source_paths: Dockerfile, docker-compose.yaml, .env.example, backend/start.sh, run.sh\n- **Architecture Overview**：importance `high`\n  - source_paths: backend/open_webui/main.py, backend/open_webui/socket/main.py, src/routes/+layout.svelte, src/lib/apis/index.ts\n- **Data Models**：importance `high`\n  - source_paths: backend/open_webui/models/users.py, backend/open_webui/models/chats.py, backend/open_webui/models/messages.py, backend/open_webui/models/knowledge.py, backend/open_webui/models/files.py\n- **API Routers**：importance `high`\n  - source_paths: backend/open_webui/routers/auths.py, backend/open_webui/routers/chats.py, backend/open_webui/routers/models.py, backend/open_webui/routers/retrieval.py, backend/open_webui/routers/files.py\n- **Retrieval System**：importance `high`\n  - source_paths: backend/open_webui/retrieval/vector/main.py, backend/open_webui/retrieval/vector/factory.py, backend/open_webui/retrieval/web/main.py, backend/open_webui/retrieval/loaders/main.py, backend/open_webui/retrieval/vector/dbs/pgvector.py\n- **Frontend Structure**：importance `medium`\n  - source_paths: src/lib/components, src/routes, src/lib/apis, src/lib/i18n/index.ts, src/lib/stores/index.ts\n- **Chat Interface**：importance `high`\n  - source_paths: src/lib/components/chat/Chat.svelte, src/lib/components/chat/MessageInput.svelte, src/lib/components/chat/Messages/Message.svelte, src/lib/components/chat/Messages/Markdown.svelte, src/lib/components/chat/ModelSelector.svelte\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `3660bc00fd807deced3400a63bfa6db47811a3bb`\n- inspected_files: `pyproject.toml`, `Dockerfile`, `package.json`, `README.md`, `uv.lock`, `docs/SECURITY.md`, `src/app.d.ts`, `src/lib/shortcuts.ts`, `src/lib/dayjs.js`, `src/lib/constants.ts`, `src/lib/index.ts`, `src/lib/emoji-shortcodes.json`, `src/lib/emoji-groups.json`, `src/routes/+layout.js`, `src/lib/workers/kokoro.worker.ts`, `src/lib/workers/pyodide.worker.ts`, `src/lib/workers/KokoroWorker.ts`, `src/lib/stores/index.ts`, `src/lib/utils/safeImageUrl.ts`, `src/lib/utils/connections.ts`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 能力判断依赖假设\n\n- Trigger: README/documentation is current enough for a first validation pass.\n- Host AI rule: 将假设转成下游验证清单。\n- Why it matters: 假设不成立时，用户拿不到承诺的能力。\n- Evidence: capability.assumptions | github_repo:701547123 | https://github.com/open-webui/open-webui | README/documentation is current enough for a first validation pass.\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 维护活跃度未知\n\n- Trigger: 未记录 last_activity_observed。\n- Host AI rule: 补 GitHub 最近 commit、release、issue/PR 响应信号。\n- Why it matters: 新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- Evidence: evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | last_activity_observed missing\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 下游验证发现风险项\n\n- Trigger: no_demo\n- Host AI rule: 进入安全/权限治理复核队列。\n- Why it matters: 下游已经要求复核，不能在页面中弱化。\n- Evidence: downstream_validation.risk_items | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 存在评分风险\n\n- Trigger: no_demo\n- Host AI rule: 把风险写入边界卡，并确认是否需要人工复核。\n- Why it matters: 风险会影响是否适合普通用户安装。\n- Evidence: risks.scoring_risks | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: issue/PR 响应质量未知\n\n- Trigger: issue_or_pr_quality=unknown。\n- Host AI rule: 抽样最近 issue/PR，判断是否长期无人处理。\n- Why it matters: 用户无法判断遇到问题后是否有人维护。\n- Evidence: evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | issue_or_pr_quality=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 发布节奏不明确\n\n- Trigger: release_recency=unknown。\n- Host AI rule: 确认最近 release/tag 和 README 安装命令是否一致。\n- Why it matters: 安装命令和文档可能落后于代码，用户踩坑概率升高。\n- Evidence: evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | release_recency=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：open-webui/open-webui\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：mcp_host, chatgpt\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 能力判断依赖假设（medium）：假设不成立时，用户拿不到承诺的能力。 建议检查：将假设转成下游验证清单。\n- 维护活跃度未知（medium）：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 下游验证发现风险项（medium）：下游已经要求复核，不能在页面中弱化。 建议检查：进入安全/权限治理复核队列。\n- 存在评分风险（medium）：风险会影响是否适合普通用户安装。 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- issue/PR 响应质量未知（low）：用户无法判断遇到问题后是否有人维护。 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/open-webui/open-webui 项目说明书\n\n生成时间：2026-05-16 19:17:04 UTC\n\n## 目录\n\n- [Project Introduction](#project-introduction)\n- [Installation Guide](#installation-guide)\n- [Architecture Overview](#architecture-overview)\n- [Data Models](#data-models)\n- [API Routers](#api-routers)\n- [Retrieval System](#retrieval-system)\n- [Frontend Structure](#frontend-structure)\n- [Chat Interface](#chat-interface)\n- [Ollama Integration](#ollama-integration)\n- [RAG Pipeline](#rag-pipeline)\n\n<a id='project-introduction'></a>\n\n## Project Introduction\n\n### 相关页面\n\n相关主题：[Installation Guide](#installation-guide), [Architecture Overview](#architecture-overview)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/open-webui/open-webui/blob/main/README.md)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/requirements-min.txt](https://github.com/open-webui/open-webui/blob/main/backend/requirements-min.txt)\n- [src/lib/utils/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/index.ts)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n</details>\n\n# Project Introduction\n\nOpen WebUI is an extensible, self-hosted AI interface designed to provide a powerful and user-friendly chat experience for Large Language Models (LLMs). It serves as a comprehensive web-based frontend that seamlessly integrates with various LLM backends, enabling users to interact with AI models through a modern, feature-rich interface.\n\n## Overview\n\nOpen WebUI is an open-source project that prioritizes offline functionality and user privacy. The platform is built with extensibility in mind, allowing users to customize and extend its capabilities through a modular architecture. The project supports multiple installation methods and integrates with popular LLM providers like Ollama, OpenAI, and various other AI services.\n\nThe system operates as a full-stack application with a Svelte-based frontend and a Python FastAPI backend, communicating through RESTful APIs and WebSocket connections for real-time interactions.\n\n## Architecture\n\nOpen WebUI follows a client-server architecture with clear separation between the frontend presentation layer and the backend API layer.\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte)\"]\n        UI[User Interface]\n        State[State Management]\n        API[API Client]\n    end\n    \n    subgraph Backend[\"Backend (Python/FastAPI)\"]\n        Routes[API Routes]\n        Services[Business Logic]\n        DB[(Database)]\n        Auth[Authentication]\n    end\n    \n    subgraph External[\"External Services\"]\n        Ollama[Ollama]\n        OpenAI[OpenAI API]\n        RAG[RAG Providers]\n    end\n    \n    UI --> State\n    State --> API\n    API --> Routes\n    Routes --> Services\n    Services --> DB\n    Routes --> Auth\n    Services --> Ollama\n    Services --> OpenAI\n    Services --> RAG\n```\n\n### Frontend Layer\n\nThe frontend is built using Svelte and SvelteKit, providing a reactive and performant user interface. Key components include:\n\n| Component | Location | Purpose |\n|-----------|----------|---------|\n| Constants | `src/lib/constants.ts` | Application-wide configuration values |\n| Utilities | `src/lib/utils/index.ts` | Content processing and sanitization |\n| API Clients | `src/lib/apis/` | Communication with backend services |\n\n资料来源：[src/lib/constants.ts:1-20]()\n\nThe frontend defines API base URLs for various services:\n\n```typescript\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\nexport const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama`;\nexport const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai`;\nexport const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/audio`;\nexport const IMAGES_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/images`;\nexport const RETRIEVAL_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/retrieval`;\n```\n\n资料来源：[src/lib/constants.ts:8-15]()\n\n### Backend Layer\n\nThe backend is built with Python using FastAPI, providing a robust and scalable API layer. The backend handles authentication, data management, and communication with external AI services.\n\n#### Core Dependencies\n\n| Package | Version | Purpose |\n|---------|---------|---------|\n| fastapi | 0.135.1 | Web framework |\n| uvicorn | 0.41.0 | ASGI server |\n| pydantic | 2.12.5 | Data validation |\n| sqlalchemy | 2.0.48 | ORM framework |\n| python-socketio | 5.16.1 | WebSocket support |\n| pycrdt | 0.12.47 | CRDT for real-time collaboration |\n\n资料来源：[backend/requirements-min.txt:1-35]()\n\n## Features\n\nOpen WebUI provides a comprehensive set of features designed to enhance the AI chat experience:\n\n### Supported File Types\n\nThe system supports various document formats for upload and processing:\n\n| Category | File Types |\n|----------|------------|\n| Documents | PDF, EPUB, DOCX, TXT |\n| Code | Python, JavaScript, CSS, XML |\n| Data | CSV, Markdown |\n| Media | MP3, WAV (audio) |\n| Other | HTML, Octet-stream |\n\n资料来源：[src/lib/constants.ts:18-32]()\n\n### Key Capabilities\n\n1. **Multi-Model Support**: Engage with multiple AI models simultaneously through the MOA (Mixture of Agents) architecture\n2. **Code Interpreter**: Execute Python code in sandboxed environments using Pyodide or Jupyter\n3. **Voice Mode**: Voice-activated interactions with customizable prompts\n4. **RAG Integration**: Retrieval-augmented generation with support for 15+ search providers\n5. **Web Browsing**: Extract and integrate web content directly into conversations\n6. **Image Generation**: Integration with DALL-E, Gemini, ComfyUI, and AUTOMATIC1111\n7. **Role-Based Access Control (RBAC)**: Granular permission management\n\n## Configuration System\n\nOpen WebUI uses a persistent configuration system to manage application settings. Configuration values are stored in the database and can be overridden by environment variables.\n\n### Code Execution Configuration\n\n| Setting | Environment Variable | Default | Description |\n|---------|---------------------|---------|-------------|\n| ENABLE_CODE_EXECUTION | ENABLE_CODE_EXECUTION | True | Enable code execution feature |\n| CODE_EXECUTION_ENGINE | CODE_EXECUTION_ENGINE | pyodide | Execution engine (pyodide/jupyter) |\n| JUPYTER_URL | CODE_EXECUTION_JUPYTER_URL | - | Jupyter server URL |\n| JUPYTER_AUTH | CODE_EXECUTION_JUPYTER_AUTH | - | Jupyter authentication |\n\n资料来源：[backend/open_webui/config.py:1-50]()\n\n### Voice Mode Configuration\n\n| Parameter | Description |\n|-----------|-------------|\n| VOICE_MODE_PROMPT_TEMPLATE | Template for voice interaction prompts |\n| ENABLE_VOICE_MODE_PROMPT | Enable voice-specific prompt handling |\n\n## Security Features\n\n### Authentication System\n\nThe backend implements comprehensive authentication using:\n- JWT tokens via PyJWT\n- Argon2 password hashing\n- Session management with Redis support\n- Role-based access control (RBAC)\n\n### Content Processing\n\nThe system includes middleware for processing and sanitizing AI responses:\n\n```mermaid\ngraph LR\n    Response[AI Response] --> Middleware[Middleware Layer]\n    Middleware --> Sanitize[Content Sanitization]\n    Middleware --> CodeBlock[Code Block Processing]\n    Middleware --> Reasoning[Reasoning Display]\n    Sanitize --> Render[Rendered Response]\n    CodeBlock --> Render\n    Reasoning --> Render\n```\n\nThe middleware handles special content types including:\n- Code interpreter blocks\n- Reasoning/thinking blocks\n- HTML content rendering\n\n资料来源：[backend/open_webui/utils/middleware.py:1-40]()\n\n## Installation Methods\n\n### Python pip Installation\n\n```bash\npip install open-webui\nopen-webui serve\n```\n\nThe server runs on `http://localhost:8080` by default.\n\n### Docker Installation\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --add-host=host.docker.internal:host-gateway \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:latest\n```\n\n> [!IMPORTANT]\n> The volume mount `-v open-webui:/app/backend/data` is crucial for database persistence.\n\n### Development Branch\n\nFor testing unstable features:\n```bash\ndocker run -d -p 3000:8080 -v open-webui:/app/backend/data --name open-webui --add-host=host.docker.internal:host-gateway --restart always ghcr.io/open-webui/open-webui:dev\n```\n\n资料来源：[README.md:1-80]()\n\n## Technology Stack Summary\n\n| Layer | Technology | Key Libraries |\n|-------|------------|---------------|\n| Frontend Framework | Svelte/SvelteKit | - |\n| Backend Framework | Python/FastAPI | Pydantic, SQLAlchemy |\n| Database | SQLite/PostgreSQL | aiosqlite, psycopg |\n| Real-time | WebSocket | python-socketio, pycrdt |\n| Caching | Redis | starsessions |\n| Authentication | JWT/Argon2 | PyJWT, argon2-cffi |\n| HTTP Client | httpx | With SOCKS, HTTP/2 support |\n| Task Scheduling | APScheduler | - |\n\n## System Requirements\n\n- **Python Version**: 3.11+ (required for compatibility)\n- **Node.js**: For frontend development\n- **Database**: SQLite (default), PostgreSQL (production)\n- **Memory**: Minimum 4GB RAM recommended\n- **Storage**: Depends on models and data usage\n\n## Related Documentation\n\n- [Open WebUI Documentation](https://docs.openwebui.com/)\n- [Roadmap](https://docs.openwebui.com/roadmap/)\n- [Getting Started Guide](https://docs.openwebui.com/getting-started/)\n- [Updating Instructions](https://docs.openwebui.com/getting-started/updating)\n\n---\n\n<a id='installation-guide'></a>\n\n## Installation Guide\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/open-webui/open-webui/blob/main/README.md)\n- [backend/requirements-min.txt](https://github.com/open-webui/open-webui/blob/main/backend/requirements-min.txt)\n- [backend/requirements.txt](https://github.com/open-webui/open-webui/blob/main/backend/requirements.txt)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [backend/open_webui/env.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n</details>\n\n# Installation Guide\n\nOpen WebUI provides multiple installation methods to accommodate different use cases, from simple Docker deployments to development environments. This guide covers all supported installation approaches, configuration options, and environment variables required for a successful setup.\n\n## Prerequisites\n\n### System Requirements\n\n| Component | Minimum | Recommended |\n|-----------|---------|-------------|\n| Python | 3.11 | 3.11+ |\n| RAM | 4 GB | 8 GB+ |\n| Disk | 10 GB | 20 GB+ |\n| Docker | 20.10+ | Latest |\n| GPU | Optional | NVIDIA GPU with CUDA |\n\n### Required Dependencies\n\nThe backend requires the following core packages for basic operation:\n\n```\nfastapi==0.135.1\nuvicorn[standard]==0.41.0\npydantic==2.12.5\npython-multipart==0.0.22\nitsdangerous==2.2.0\npython-socketio==5.16.1\npython-jose==3.5.0\ncryptography\nsqlalchemy==2.0.48\naiosqlite==0.21.0\n```\n\n资料来源：[backend/requirements-min.txt:1-15](https://github.com/open-webui/open-webui/blob/main/backend/requirements-min.txt)\n\n## Installation Methods\n\n### Docker Installation (Recommended)\n\nDocker is the recommended installation method for production use. Open WebUI provides multiple official images with different configurations.\n\n#### Docker Image Variants\n\n| Tag | Description | Use Case |\n|-----|-------------|----------|\n| `main` | Base Open WebUI | Standard deployment |\n| `cuda` | With CUDA support | NVIDIA GPU acceleration |\n| `ollama` | Bundled with Ollama | Local model inference |\n| `dev` | Development build | Testing latest features |\n\n#### Basic Docker Installation\n\nFor connecting to Ollama on localhost:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  --add-host=host.docker.internal:host-gateway \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:main\n```\n\n资料来源：[README.md:42-47](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### NVIDIA GPU Support\n\nTo enable GPU acceleration:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  --gpus all \\\n  --add-host=host.docker.internal:host-gateway \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:cuda\n```\n\n资料来源：[README.md:53-59](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### Bundled Ollama Installation\n\nFor a streamlined setup with both Open WebUI and Ollama in a single container:\n\n**With GPU Support:**\n```bash\ndocker run -d -p 3000:8080 --gpus=all \\\n  -v ollama:/root/.ollama \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:ollama\n```\n\n**CPU Only:**\n```bash\ndocker run -d -p 3000:8080 \\\n  -v ollama:/root/.ollama \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:ollama\n```\n\n资料来源：[README.md:64-79](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### OpenAI API Only\n\nFor environments using only the OpenAI API:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -e OPENAI_API_KEY=your_secret_key \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:main\n```\n\n资料来源：[README.md:50-56](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n#### Remote Ollama Server\n\nTo connect to Ollama on a different server:\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -e OLLAMA_BASE_URL=https://example.com \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:main\n```\n\n资料来源：[README.md:40-46](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n### Python pip Installation\n\nOpen WebUI can be installed directly via pip for environments without Docker.\n\n#### Requirements\n\n- Python 3.11 or higher\n- pip package manager\n\n#### Installation Steps\n\n1. Install Open WebUI package:\n```bash\npip install open-webui\n```\n\n2. Start the server:\n```bash\nopen-webui serve\n```\n\nThe server will be accessible at `http://localhost:8080`.\n\n资料来源：[README.md:12-25](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n### Development Installation\n\n#### Using the Dev Branch\n\n> [!WARNING]\n> The `:dev` branch contains unstable features. Use at your own risk.\n\n```bash\ndocker run -d -p 3000:8080 \\\n  -v open-webui:/app/backend/data \\\n  --name open-webui \\\n  --add-host=host.docker.internal:host-gateway \\\n  --restart always \\\n  ghcr.io/open-webui/open-webui:dev\n```\n\n资料来源：[README.md:27-34](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Environment Configuration\n\n### Core Environment Variables\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `OLLAMA_BASE_URL` | Ollama server URL | `http://localhost:11434` |\n| `OPENAI_API_KEY` | OpenAI API key | - |\n| `WEBUI_SECRET_KEY` | Session encryption key | Auto-generated |\n| `WEBUI_SESSION_COOKIE_SECURE` | Secure cookie flag | `True` |\n| `WEBUI_SESSION_COOKIE_SAME_SITE` | Cookie SameSite policy | `Lax` |\n\n资料来源：[backend/open_webui/main.py:18-35](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Database Configuration\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `DATABASE_URL` | Database connection string | SQLite |\n| `ENABLE_DATABASE_ENCRYPTION` | Enable SQLite encryption | `False` |\n\n#### Supported Databases\n\n- **SQLite**: Default, requires no configuration\n- **PostgreSQL**: Set `DATABASE_URL` to PostgreSQL connection string\n- **Redis**: For session management and caching\n\n资料来源：[backend/open_webui/env.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n\n### Redis Configuration\n\n```bash\nREDIS_URL=redis://localhost:6379\nREDIS_KEY_PREFIX=open-webui\nREDIS_SENTINEL_HOSTS=host1:26379,host2:26379\nREDIS_SENTINEL_PORT=26379\n```\n\n资料来源：[backend/open_webui/main.py:15-18](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Security Configuration\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `ENABLE_SIGNUP_PASSWORD_CONFIRMATION` | Require password confirmation | `True` |\n| `WEBUI_AUTH_TRUSTED_EMAIL_HEADER` | Trusted email header for SSO | - |\n| `WEBUI_AUTH_SIGNOUT_REDIRECT_URL` | Signout redirect URL | - |\n\n资料来源：[backend/open_webui/main.py:36-38](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Audit Logging\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `ENABLE_AUDIT_GET_REQUESTS` | Log GET requests | `False` |\n| `AUDIT_INCLUDED_PATHS` | Paths to include | - |\n| `AUDIT_EXCLUDED_PATHS` | Paths to exclude | - |\n| `AUDIT_LOG_LEVEL` | Logging verbosity | `INFO` |\n\n资料来源：[backend/open_webui/env.py:12-15](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n\n### Observability\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `ENABLE_OTEL` | Enable OpenTelemetry | `False` |\n| `ENABLE_VERSION_UPDATE_CHECK` | Check for updates | `True` |\n\n资料来源：[backend/open_webui/main.py:48-51](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Data Persistence\n\n> [!IMPORTANT]\n> Always mount the volume `-v open-webui:/app/backend/data` to prevent database loss.\n\nThe data directory contains:\n- SQLite database file\n- Uploaded files\n- Configuration cache\n- User sessions (if Redis not used)\n\n```bash\n-v open-webui:/app/backend/data\n```\n\n资料来源：[README.md:19-22](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Offline Installation\n\nFor air-gapped environments, set the Hugging Face offline mode:\n\n```bash\nexport HF_HUB_OFFLINE=1\n```\n\n资料来源：[README.md:36-38](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Installation Architecture\n\n```mermaid\ngraph TD\n    A[User Request] --> B{Installation Method}\n    B -->|Docker| C[Official Docker Image]\n    B -->|pip| D[PyPI Package]\n    \n    C --> E{Configuration}\n    D --> E\n    \n    E -->|OLLAMA_BASE_URL| F[Ollama Server]\n    E -->|OPENAI_API_KEY| G[OpenAI API]\n    E -->|Database Config| H[(Database)]\n    \n    F --> I[Model Inference]\n    G --> J[API Processing]\n    \n    H --> K[Application State]\n    I --> L[Response]\n    J --> L\n    K --> L\n```\n\n## Docker Compose Installation\n\nFor production deployments, use Docker Compose with persistent storage:\n\n```yaml\nservices:\n  open-webui:\n    image: ghcr.io/open-webui/open-webui:main\n    ports:\n      - \"3000:8080\"\n    volumes:\n      - open-webui:/app/backend/data\n    environment:\n      - OLLAMA_BASE_URL=http://host.docker.internal:11434\n    extra_hosts:\n      - \"host.docker.internal:host-gateway\"\n    restart: unless-stopped\n\nvolumes:\n  open-webui:\n```\n\n## Troubleshooting\n\n### Common Issues\n\n| Issue | Solution |\n|-------|----------|\n| Connection refused to Ollama | Check `OLLAMA_BASE_URL` and ensure Ollama is running |\n| Database errors | Verify volume mount is correct |\n| GPU not detected | Ensure NVIDIA Container Toolkit is installed |\n| Port conflicts | Change host port mapping |\n\n### Verification\n\nAfter installation, verify the service is running:\n\n```bash\ncurl http://localhost:3000/api/v1/models\n```\n\nThe server should respond with available models from the configured backend.\n\n资料来源：[README.md:40-60](https://github.com/open-webui/open-webui/blob/main/README.md)\n\n## Next Steps\n\nAfter successful installation:\n\n1. Access the web interface at `http://localhost:3000`\n2. Configure additional models and backends\n3. Set up user authentication and RBAC\n4. Configure retrieval and RAG pipelines\n5. Integrate additional tools and extensions\n\n---\n\n<a id='architecture-overview'></a>\n\n## Architecture Overview\n\n### 相关页面\n\n相关主题：[Data Models](#data-models), [API Routers](#api-routers), [Frontend Structure](#frontend-structure)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [backend/open_webui/socket/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/socket/main.py)\n- [src/routes/+layout.svelte](https://github.com/open-webui/open-webui/blob/main/src/routes/+layout.svelte)\n- [src/lib/apis/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/index.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/open_webui/env.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/env.py)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n</details>\n\n# Architecture Overview\n\nOpen WebUI is a self-hosted, extensible AI interface designed to provide a unified chat experience with various LLM backends. The architecture follows a modern full-stack pattern with a Python-based backend and a Svelte-based frontend, communicating via REST APIs and WebSocket connections.\n\n## System Architecture\n\nOpen WebUI employs a layered architecture that separates concerns between presentation, business logic, and data access:\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte/SvelteKit)\"]\n        UI[\"UI Components<br/>(+layout.svelte)\"]\n        Utils[\"Utilities<br/>(src/lib/utils)\"]\n        APIs[\"API Client<br/>(src/lib/apis)\"]\n        Const[\"Constants<br/>(src/lib/constants)\"]\n    end\n\n    subgraph Backend[\"Backend (Python/FastAPI)\"]\n        Main[\"Main Application<br/>(main.py)\"]\n        Socket[\"WebSocket Server<br/>(socket/main.py)\"]\n        Config[\"Configuration<br/>(config.py)\"]\n        Env[\"Environment<br/>(env.py)\"]\n        Middleware[\"Middleware<br/>(middleware.py)\"]\n        Retrieval[\"Retrieval System<br/>(retrieval/)\"]\n    end\n\n    subgraph External[\"External Services\"]\n        Ollama[\"Ollama API\"]\n        OpenAI[\"OpenAI API\"]\n        VectorDB[\"Vector Databases\"]\n        Redis[\"Redis Session Store\"]\n        DB[\"SQLite/PostgreSQL\"]\n    end\n\n    UI --> Utils\n    UI --> APIs\n    Utils --> Const\n    APIs --> Const\n    APIs --> Main\n    UI --> Socket\n    \n    Main --> Config\n    Main --> Env\n    Main --> Middleware\n    Main --> Retrieval\n    Main --> Socket\n    \n    Main --> Ollama\n    Main --> OpenAI\n    Main --> VectorDB\n    Main --> Redis\n    Main --> DB\n```\n\n## Directory Structure\n\nThe repository is organized into two main components:\n\n| Directory | Purpose |\n|-----------|---------|\n| `backend/` | Python/FastAPI backend application |\n| `src/` | Svelte/SvelteKit frontend application |\n\n### Backend Structure\n\n| Path | Description |\n|------|-------------|\n| `backend/open_webui/` | Main application package |\n| `backend/open_webui/main.py` | FastAPI application entry point |\n| `backend/open_webui/socket/main.py` | Socket.IO WebSocket handler |\n| `backend/open_webui/config.py` | Persistent configuration system |\n| `backend/open_webui/env.py` | Environment variable loading |\n| `backend/open_webui/utils/middleware.py` | Response processing middleware |\n| `backend/open_webui/retrieval/` | RAG and document retrieval system |\n\n### Frontend Structure\n\n| Path | Description |\n|------|-------------|\n| `src/routes/` | SvelteKit routes and page components |\n| `src/lib/` | Shared libraries and utilities |\n| `src/lib/apis/` | API client implementations |\n| `src/lib/utils/` | Utility functions |\n| `src/lib/constants.ts` | Application constants and configuration |\n\n## API Architecture\n\n### API Endpoint Structure\n\nOpen WebUI exposes multiple API bases for different services:\n\n```mermaid\ngraph LR\n    subgraph Gateway[\"API Gateway\"]\n        Base[\"/\"]\n    end\n    \n    subgraph Services[\"Service Endpoints\"]\n        API[\"/api/v1<br/>REST API\"]\n        Ollama[\"/ollama<br/>Ollama Proxy\"]\n        OpenAI[\"/openai<br/>OpenAI Proxy\"]\n        Audio[\"/api/v1/audio<br/>Audio Processing\"]\n        Images[\"/api/v1/images<br/>Image Processing\"]\n        Retrieval[\"/api/v1/retrieval<br/>RAG Retrieval\"]\n    end\n    \n    Base --> API\n    Base --> Ollama\n    Base --> OpenAI\n    Base --> Audio\n    Base --> Images\n    Base --> Retrieval\n```\n\n### API Constants Configuration\n\nAPI base URLs are defined in `src/lib/constants.ts`:\n\n| Constant | Default Value | Purpose |\n|----------|--------------|---------|\n| `WEBUI_BASE_URL` | Dynamic (dev/prod) | Base application URL |\n| `WEBUI_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1` | Main REST API |\n| `OLLAMA_API_BASE_URL` | `${WEBUI_BASE_URL}/ollama` | Ollama API proxy |\n| `OPENAI_API_BASE_URL` | `${WEBUI_BASE_URL}/openai` | OpenAI API proxy |\n| `AUDIO_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1/audio` | Audio processing |\n| `IMAGES_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1/images` | Image generation |\n| `RETRIEVAL_API_BASE_URL` | `${WEBUI_BASE_URL}/api/v1/retrieval` | RAG retrieval |\n\n资料来源：[src/lib/constants.ts:1-15]()\n\n### API Client Pattern\n\nThe frontend uses a consistent API client pattern implemented in `src/lib/apis/`:\n\n```typescript\n// Pattern used across all API clients\nconst res = await fetch(`${WEBUI_API_BASE_URL}/endpoint`, {\n    method: 'METHOD',\n    headers: {\n        Accept: 'application/json',\n        'Content-Type': 'application/json',\n        authorization: `Bearer ${token}`\n    },\n    body: JSON.stringify({ /* payload */ })\n})\n    .then(async (res) => {\n        if (!res.ok) throw await res.json();\n        return res.json();\n    });\n```\n\n资料来源：[src/lib/apis/knowledge/index.ts:1-35]()\n\n## Configuration System\n\n### Environment Setup\n\nThe backend loads configuration from environment variables and `.env` files using the following hierarchy defined in `backend/open_webui/env.py`:\n\n| Variable | Description |\n|----------|-------------|\n| `OPEN_WEBUI_DIR` | Application directory (location of `env.py`) |\n| `BACKEND_DIR` | Parent of `open_webui/` |\n| `BASE_DIR` | Repository root |\n| `DOCKER` | Docker environment flag |\n| `USE_CUDA_DOCKER` | CUDA/GPU acceleration flag |\n\n资料来源：[backend/open_webui/env.py:1-45]()\n\n### Persistent Configuration\n\nConfiguration values are stored persistently using the `PersistentConfig` system:\n\n```python\nENABLE_CODE_EXECUTION = PersistentConfig(\n    'ENABLE_CODE_EXECUTION',\n    'code_execution.enable',\n    os.environ.get('ENABLE_CODE_EXECUTION', 'True').lower() == 'true',\n)\n\nCODE_EXECUTION_ENGINE = PersistentConfig(\n    'CODE_EXECUTION_ENGINE',\n    'code_execution.engine',\n    os.environ.get('CODE_EXECUTION_ENGINE', 'pyodide'),\n)\n```\n\n资料来源：[backend/open_webui/config.py:1-50]()\n\n### Supported File Types\n\nThe application supports various file upload types:\n\n| Category | MIME Types |\n|----------|-----------|\n| Documents | `application/pdf`, `application/epub+zip`, `application/vnd.openxmlformats-officedocument.wordprocessingml.document` |\n| Text | `text/plain`, `text/csv`, `text/xml`, `text/html`, `text/x-python`, `text/css`, `text/markdown` |\n| Code | `text/x-python`, `text/css`, `application/x-javascript` |\n| Media | `audio/mpeg`, `audio/wav` |\n| Other | `application/octet-stream` |\n\n资料来源：[src/lib/constants.ts:20-35]()\n\n## WebSocket Communication\n\nReal-time communication uses Socket.IO for bidirectional messaging:\n\n```mermaid\nsequenceDiagram\n    participant Client as Frontend\n    participant Socket as Socket.IO Server\n    participant Main as Main Application\n    \n    Client->>Socket: Connect with auth token\n    Socket->>Main: Validate session\n    Main->>Socket: Session valid\n    Socket->>Client: Connection established\n    \n    Client->>Socket: Send message event\n    Socket->>Main: Forward message\n    Main->>Main: Process with LLM\n    Main->>Socket: Stream response\n    Socket->>Client: Stream chunks\n    \n    Client->>Socket: Disconnect\n    Socket->>Client: Connection closed\n```\n\n资料来源：[backend/open_webui/socket/main.py]()\n\n## Middleware Pipeline\n\nThe middleware system processes responses and transforms content for the frontend. The `build_output()` function in `backend/open_webui/utils/middleware.py` handles special content types:\n\n### Content Type Processing\n\n| Content Type | Rendering | Description |\n|-------------|-----------|-------------|\n| `reasoning` | `<details>` with thought process | Model reasoning display |\n| `code_interpreter` | `<details>` with code block | Code execution results |\n\n```python\nif item_type == 'open_webui:code_interpreter':\n    # Code interpreter needs to inspect/mutate prior accumulated content\n    content = '\\n'.join(parts)\n    content_stripped, original_whitespace = split_content_and_whitespace(content)\n    # ... processing logic\n    parts.append(\n        f'<details type=\"code_interpreter\" done=\"true\" duration=\"{duration or 0}\">\\n<summary>Analyzed</summary>\\n{display}\\n</details>'\n    )\n```\n\n资料来源：[backend/open_webui/utils/middleware.py:1-80]()\n\n### Deep Merge Utility\n\nThe middleware also provides a `deep_merge()` function for combining configuration:\n\n| Behavior | Description |\n|----------|-------------|\n| Dicts | Recursive merge |\n| Strings | Concatenation |\n| Others | Overwrite |\n\n资料来源：[backend/open_webui/utils/middleware.py:75-85]()\n\n## Frontend Application Structure\n\n### Layout System\n\nThe main layout is defined in `src/routes/+layout.svelte` which serves as the root component:\n\n```mermaid\ngraph TD\n    Layout[\"+layout.svelte<br/>Root Layout\"]\n    Splash[\"Splash Screen<br/>(#splash-screen)\"]\n    Progress[\"Progress Bar<br/>(#progress-bar)\"]\n    Logo[\"Logo Elements<br/>(#logo, #logo-her)\"]\n    Theme[\"Theme Detection<br/>(.dark, .her)\"]\n    \n    Layout --> Splash\n    Layout --> Progress\n    Layout --> Logo\n    Layout --> Theme\n```\n\n资料来源：[src/app.html:1-60]()\n\n### Utility Libraries\n\n| Library | Purpose |\n|---------|---------|\n| `src/lib/utils/index.ts` | Content processing, sanitization, Chinese language handling |\n| `src/lib/utils/codeHighlight.ts` | Code syntax highlighting with Shiki |\n| `src/lib/apis/index.ts` | API client exports |\n\n### Content Processing Pipeline\n\nThe `processResponseContent()` function handles special content transformations:\n\n```typescript\nexport const processResponseContent = (content: string) => {\n    content = processChineseContent(content);\n    return content.trim();\n};\n\nexport const sanitizeResponseContent = (content: string) => {\n    return content\n        .replace(/<\\|[a-z]*$/, '')\n        .replace(/<\\|[a-z]+\\|$/, '')\n        .replace(/<$/, '')\n        .replaceAll('<', '&lt;')\n        .replaceAll('>', '&gt;')\n        .replaceAll(/<\\|[a-z]+\\|>/g, ' ')\n        .trim();\n};\n```\n\n资料来源：[src/lib/utils/index.ts:1-50]()\n\n## Retrieval System\n\nThe RAG (Retrieval-Augmented Generation) system supports multiple document loaders and search engines:\n\n### Supported Document Sources\n\n| Source | Configuration |\n|--------|--------------|\n| External Document Loader | `EXTERNAL_DOCUMENT_LOADER_URL`, `EXTERNAL_DOCUMENT_LOADER_API_KEY` |\n| Apache TIKA | `TIKA_SERVER_URL` |\n| Docling | `DOCLING_SERVER_URL`, `DOCLING_API_KEY`, `DOCLING_PARAMS` |\n| Mistral OCR | `MISTRAL_OCR_API_BASE_URL`, `MISTRAL_OCR_API_KEY` |\n| PaddleOCR VL | `PADDLEOCR_VL_BASE_URL`, `PADDLEOCR_VL_TOKEN` |\n| MinerU | `MINERU_API_URL`, `MINERU_API_KEY`, `MINERU_PARAMS` |\n\n### Supported Search Providers\n\n| Provider | Notes |\n|----------|-------|\n| SearXNG | Self-hosted metasearch |\n| Google PSE | Programmable Search Engine |\n| Brave Search | Privacy-focused search |\n| Ollama Cloud | LLM provider search |\n| Azure AI Search | Enterprise search |\n\n资料来源：[backend/open_webui/retrieval/utils.py:1-60]()\n\n## Code Execution Engine\n\nOpen WebUI supports code execution with configurable backends:\n\n### Configuration Options\n\n| Setting | Default | Description |\n|---------|---------|-------------|\n| `ENABLE_CODE_EXECUTION` | `True` | Enable/disable code execution |\n| `CODE_EXECUTION_ENGINE` | `pyodide` | Execution engine (pyodide/jupyter) |\n| `CODE_EXECUTION_JUPYTER_URL` | `''` | Jupyter server URL |\n| `CODE_EXECUTION_JUPYTER_AUTH` | `''` | Jupyter authentication |\n| `CODE_EXECUTION_JUPYTER_AUTH_TOKEN` | `''` | Jupyter auth token |\n\n### Execution Environments\n\n| Engine | Environment | Constraints |\n|--------|-------------|-------------|\n| Pyodide | Browser-based | Cannot install packages, `pip install` unavailable |\n| Jupyter | External server | Requires URL and optional authentication |\n\n资料来源：[backend/open_webui/config.py:50-100]()\n\n## Technology Stack\n\n### Backend Dependencies\n\nKey packages from `backend/requirements-min.txt`:\n\n| Package | Version | Purpose |\n|---------|---------|---------|\n| `fastapi` | 0.135.1 | Web framework |\n| `uvicorn[standard]` | 0.41.0 | ASGI server |\n| `pydantic` | 2.12.5 | Data validation |\n| `python-multipart` | 0.0.22 | Form parsing |\n| `python-socketio` | 5.16.1 | WebSocket support |\n| `sqlalchemy` | 2.0.48 | ORM |\n| `aiosqlite` | 0.21.0 | Async SQLite |\n| `psycopg[binary]` | 3.2.9 | PostgreSQL driver |\n| `httpx[socks,http2,zstd,cli,brotli]` | 0.28.1 | HTTP client |\n| `redis` | latest | Session storage |\n| `pycrdt` | 0.12.47 | CRDT for collaboration |\n| ` RestrictedPython` | 8.1 | Safe Python execution |\n\n资料来源：[backend/requirements-min.txt:1-40]()\n\n### Frontend Architecture\n\n| Technology | Purpose |\n|------------|---------|\n| SvelteKit | Frontend framework |\n| TypeScript | Type safety |\n| Shiki | Code syntax highlighting |\n\n## Security Considerations\n\n### Authentication Flow\n\nThe system uses Bearer token authentication for API requests:\n\n```typescript\nheaders: {\n    authorization: `Bearer ${token}`\n}\n```\n\n### Role-Based Access Control (RBAC)\n\nOpen WebUI implements RBAC for:\n- Ollama endpoint access\n- Model creation/pulling rights\n- Knowledge base permissions\n\n资料来源：[README.md]()\n\n## Deployment Modes\n\n### Docker Deployment\n\n```bash\ndocker run -d -p 3000:8080 \\\n    -v open-webui:/app/backend/data \\\n    --name open-webui \\\n    --add-host=host.docker.internal:host-gateway \\\n    --restart always \\\n    ghcr.io/open-webui/open-webui:latest\n```\n\n### Python pip Installation\n\n```bash\npip install open-webui\nopen-webui serve\n```\n\n### Environment Variables\n\n| Variable | Values | Description |\n|----------|--------|-------------|\n| `DOCKER` | `True`/`False` | Docker environment detection |\n| `USE_CUDA_DOCKER` | `true`/`false` | GPU acceleration |\n| `HF_HUB_OFFLINE` | `1` | Offline mode (prevent downloads) |\n\n资料来源：[README.md](), [backend/open_webui/env.py:30-40]()\n\n---\n\n<a id='data-models'></a>\n\n## Data Models\n\n### 相关页面\n\n相关主题：[Architecture Overview](#architecture-overview), [API Routers](#api-routers)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/models/users.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/users.py)\n- [backend/open_webui/models/chats.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/chats.py)\n- [backend/open_webui/models/messages.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/messages.py)\n- [backend/open_webui/models/knowledge.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/knowledge.py)\n- [backend/open_webui/models/files.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/files.py)\n- [backend/open_webui/internal/db.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/internal/db.py)\n- [backend/open_webui/migrations/versions/7e5b5dc7342b_init.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/migrations/versions/7e5b5dc7342b_init.py)\n</details>\n\n# Data Models\n\n## Overview\n\nThe Open WebUI project implements a comprehensive data modeling layer that manages persistent storage for all core application entities. The data models are built using SQLAlchemy ORM and follow a structured approach to storing user interactions, configurations, and content within the application.\n\nThe data model architecture serves as the foundation for:\n\n- **User Management**: Authentication, authorization, and user preferences\n- **Chat Persistence**: Message history and conversation state\n- **Knowledge Bases**: RAG (Retrieval-Augmented Generation) document storage\n- **File Management**: Document uploads and attachments\n- **Access Control**: Permission management through groups and grants\n\n资料来源：[backend/open_webui/internal/db.py:1-50]()\n\n## Architecture Overview\n\nOpen WebUI uses a layered data access architecture where models are defined as SQLAlchemy ORM classes and accessed through service layers.\n\n```mermaid\ngraph TD\n    A[API Routers] --> B[Service Layer]\n    B --> C[Data Models]\n    C --> D[SQLAlchemy ORM]\n    D --> E[(SQLite Database)]\n    \n    F[ChatMessages Table] --> C\n    G[Chats Table] --> C\n    H[Users Table] --> C\n    I[Knowledge Table] --> C\n    J[Files Table] --> C\n```\n\n## Core Data Models\n\n### User Model\n\nThe User model manages user accounts, authentication, and preferences.\n\n```python\nclass UserModel(BaseModel):\n    id: str\n    name: str\n    email: Optional[str]\n    role: str  # admin, user, guest\n    email_verified: bool\n    created_at: datetime\n    updated_at: datetime\n    settings: dict\n    keys: list\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Unique user identifier (UUID) |\n| `name` | String | Display name |\n| `email` | String (nullable) | User email address |\n| `role` | Enum | User role: `admin`, `user`, `guest` |\n| `email_verified` | Boolean | Email verification status |\n| `created_at` | DateTime | Account creation timestamp |\n| `updated_at` | DateTime | Last modification timestamp |\n| `settings` | JSON | User preferences and configurations |\n\n资料来源：[backend/open_webui/models/users.py:1-100]()\n\n### Chat Model\n\nThe Chat model stores conversation sessions and their associated metadata.\n\n```mermaid\ngraph LR\n    A[User] -->|has many| B[Chats]\n    B -->|contains| C[Messages]\n    B -->|references| D[ChatMessages Table]\n    D -->|links to| E[Messages JSON]\n```\n\nThe Chat model structure:\n\n```python\nclass ChatModel(BaseModel):\n    id: str\n    user_id: str\n    title: str\n    chat: dict  # Contains history, messages, metadata\n    created_at: datetime\n    updated_at: datetime\n    share_id: Optional[str]\n    archived: bool\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Unique chat identifier |\n| `user_id` | String | Owner user ID |\n| `title` | String | Chat title |\n| `chat` | JSON | Full chat history and state |\n| `share_id` | String (nullable) | Public sharing identifier |\n| `archived` | Boolean | Archive status |\n\nThe `chat` field contains a nested JSON structure:\n\n```json\n{\n  \"history\": {\n    \"messages\": {\n      \"message_id\": {\n        \"id\": \"...\",\n        \"type\": \"human|ai|system\",\n        \"content\": \"...\",\n        \"created_at\": \"...\"\n      }\n    }\n  },\n  \"metadata\": {}\n}\n```\n\n资料来源：[backend/open_webui/models/chats.py:1-150]()\n\n### Message Model\n\nThe Message model represents individual messages within a chat conversation.\n\n```mermaid\ngraph TD\n    A[Message] --> B[type]\n    A --> C[content]\n    A --> D[role]\n    A --> E[timestamp]\n    \n    B --> F[human|ai|system|tool]\n    C --> G[text|images|files]\n```\n\n```python\nclass MessageModel(BaseModel):\n    id: str\n    chat_id: str\n    message_id: str\n    type: str  # human, ai, system, tool\n    role: str\n    content: str\n    files: list\n    images: list\n    created_at: datetime\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Unique message ID |\n| `chat_id` | String | Parent chat ID |\n| `message_id` | String | Message identifier within chat |\n| `type` | Enum | Message type |\n| `role` | String | Role: `user`, `assistant`, `system`, `tool` |\n| `content` | String | Message content |\n| `files` | List | Attached file references |\n| `images` | List | Embedded image data |\n\n资料来源：[backend/open_webui/models/messages.py:1-100]()\n\n### Knowledge Model\n\nThe Knowledge model manages RAG knowledge bases for document retrieval.\n\n```python\nclass KnowledgeModel(BaseModel):\n    id: str\n    user_id: str\n    name: str\n    description: str\n    created_at: datetime\n    updated_at: datetime\n    data: dict  # Contains documents and vectors\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Knowledge base ID |\n| `user_id` | String | Owner user ID |\n| `name` | String | Knowledge base name |\n| `description` | String | Knowledge base description |\n| `data` | JSON | Documents and vector embeddings |\n\n资料来源：[backend/open_webui/models/knowledge.py:1-100]()\n\n### File Model\n\nThe File model handles file uploads and attachments.\n\n```python\nclass FileModel(BaseModel):\n    id: str\n    user_id: str\n    filename: str\n    path: str\n    type: str\n    size: int\n    created_at: datetime\n    data: dict  # Metadata\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | File identifier |\n| `user_id` | String | Owner user ID |\n| `filename` | String | Original filename |\n| `path` | String | Storage path |\n| `type` | String | MIME type |\n| `size` | Integer | File size in bytes |\n| `data` | JSON | Additional metadata |\n\n资料来源：[backend/open_webui/models/files.py:1-100]()\n\n## Database Schema\n\n### Entity Relationship Diagram\n\n```mermaid\nerDiagram\n    USERS ||--o{ CHATS : \"owns\"\n    USERS ||--o{ FILES : \"owns\"\n    USERS ||--o{ KNOWLEDGE : \"owns\"\n    USERS ||--o{ MESSAGES : \"sends\"\n    \n    CHATS ||--o{ CHAT_MESSAGES : \"contains\"\n    CHAT_MESSAGES ||--|| MESSAGES : \"references\"\n    \n    KNOWLEDGE ||--o{ DOCUMENTS : \"contains\"\n    \n    USERS ||--o{ GROUPS : \"belongs to\"\n    GROUPS ||--o{ ACCESS_GRANTS : \"grants\"\n    \n    CHATS ||--o| SHARES : \"can be shared\"\n```\n\n### Database Tables\n\n| Table Name | Primary Key | Description |\n|------------|-------------|-------------|\n| `users` | `id` | User accounts and settings |\n| `chats` | `id` | Chat session storage |\n| `chat_messages` | `id, chat_id, message_id` | Normalized message storage |\n| `messages` | `id` | Message content (embedded in chats) |\n| `knowledge` | `id` | Knowledge base definitions |\n| `documents` | `id` | Knowledge base documents |\n| `files` | `id` | File metadata |\n| `folders` | `id` | Folder organization |\n| `groups` | `id` | User groups |\n| `access_grants` | `id` | Permission grants |\n| `memories` | `id` | User memory storage |\n| `channels` | `id` | Communication channels |\n| `notes` | `id` | User notes |\n\n资料来源：[backend/open_webui/migrations/versions/7e5b5dc7342b_init.py:1-500]()\n\n## Access Control Models\n\n### User Groups\n\n```python\nclass GroupModel(BaseModel):\n    id: str\n    name: str\n    description: str\n    created_at: datetime\n    user_id: str  # Creator/owner\n```\n\n### Access Grants\n\n```mermaid\ngraph TD\n    A[User] -->|belongs to| B[Groups]\n    B -->|grants| C[Access Grants]\n    C -->|applies to| D[Resource]\n    \n    D --> E[Model]\n    D --> F[Knowledge]\n    D --> G[Tool]\n    D --> H[Function]\n```\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | String | Grant identifier |\n| `user_id` | String | User receiving access |\n| `group_id` | String | Group granting access |\n| `resource_type` | Enum | Type: `model`, `knowledge`, `tool`, `function` |\n| `resource_id` | String | Target resource ID |\n| `permission` | String | Permission level: `read`, `write`, `admin` |\n\n资料来源：[backend/open_webui/utils/access_control/__init__.py:1-50]()\n\n## Service Layer Integration\n\n### Chat Service Pattern\n\nThe Chat model provides methods for message management:\n\n```python\nasync def get_messages_map_by_chat_id(id: str) -> dict:\n    \"\"\"Get message map for walking history.\"\"\"\n    \nasync def get_message_by_id_and_message_id(\n    id: str, \n    message_id: str\n) -> Optional[dict]:\n    \"\"\"Retrieve specific message from chat.\"\"\"\n    \nasync def upsert_message_to_chat_by_id_and_message_id(\n    id: str, \n    message_id: str, \n    message: dict\n) -> Optional[ChatModel]:\n    \"\"\"Update or insert message in chat.\"\"\"\n```\n\n### Message Sanitization\n\nBefore database operations, message content is sanitized to prevent issues:\n\n```python\ndef sanitize_text_for_db(text: str) -> str:\n    \"\"\"Remove null characters and invalid sequences.\"\"\"\n```\n\nThis ensures database compatibility and prevents JSON parsing errors when loading chat history.\n\n资料来源：[backend/open_webui/models/chats.py:100-180]()\n\n## Model Operations\n\n### CRUD Operations\n\n| Operation | Method | Description |\n|-----------|--------|-------------|\n| Create | `Model.create()` | Insert new record |\n| Read | `Model.get()` | Retrieve by ID |\n| Update | `Model.update()` | Modify existing record |\n| Delete | `Model.delete()` | Remove record |\n| List | `Model.get_all()` | Retrieve all records |\n| Filter | `Model.filter_by()` | Query with conditions |\n\n### Async Database Access\n\nOpen WebUI uses async database operations for improved performance:\n\n```python\nasync def get_chat_by_id(id: str) -> Optional[ChatModel]:\n    \"\"\"Async retrieval of chat by ID.\"\"\"\n    \nasync def upsert_message_to_chat_by_id_and_message_id(\n    id: str, \n    message_id: str, \n    message: dict\n) -> Optional[ChatModel]:\n    \"\"\"Async upsert operation.\"\"\"\n```\n\n## Data Storage Locations\n\n### Database File\n\nBy default, Open WebUI uses SQLite stored at:\n\n```\nbackend/data/webui.db\n```\n\n### File Storage\n\nUploaded files are stored in:\n\n```\nbackend/data/uploads/\n```\n\n### Configuration\n\nDatabase and storage paths are configured via environment variables:\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `DATA_DIR` | `backend/data` | Base data directory |\n| `DATABASE_URL` | `sqlite:///data/webui.db` | Database connection string |\n\n资料来源：[backend/open_webui/env.py:1-80]()\n\n## Migration System\n\nOpen WebUI uses Alembic for database migrations:\n\n```mermaid\ngraph LR\n    A[Migration Scripts] --> B[Alembic]\n    B --> C[Database Schema]\n    C --> D[Model Definitions]\n    D --> E[Application]\n```\n\nMigration files are located in:\n\n```\nbackend/open_webui/migrations/versions/\n```\n\n资料来源：[backend/open_webui/migrations/versions/7e5b5dc7342b_init.py:1-500]()\n\n## Summary\n\nThe Open WebUI data model layer provides a robust foundation for:\n\n1. **User Management**: Complete user lifecycle including authentication and authorization\n2. **Chat Persistence**: Flexible JSON-based chat storage with normalized message tables\n3. **Knowledge Management**: RAG-capable knowledge bases for document retrieval\n4. **File Handling**: Secure file upload and storage with metadata tracking\n5. **Access Control**: Fine-grained permissions through groups and resource grants\n\nThe architecture prioritizes:\n- **Performance**: Async database operations and message normalization\n- **Flexibility**: JSON-based storage for variable content structures\n- **Security**: Text sanitization and access control enforcement\n- **Extensibility**: Modular model design for future features\n\n---\n\n<a id='api-routers'></a>\n\n## API Routers\n\n### 相关页面\n\n相关主题：[Architecture Overview](#architecture-overview), [Data Models](#data-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [backend/open_webui/routers/pipelines.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/pipelines.py)\n- [backend/open_webui/routers/tasks.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/tasks.py)\n- [backend/open_webui/routers/prompts.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/prompts.py)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n- [backend/open_webui/utils/asgi_middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/asgi_middleware.py)\n</details>\n\n# API Routers\n\n## Overview\n\nThe Open WebUI project implements a comprehensive API routing architecture built on **FastAPI**. API Routers serve as the primary mechanism for organizing and exposing RESTful endpoints across the application. Each router encapsulates a specific functional domain (e.g., authentication, chat management, file handling, knowledge bases) and is mounted at a defined prefix under the `/api/v1/` base path.\n\nThe router architecture follows a modular design pattern where related endpoints are grouped into dedicated router modules located in `backend/open_webui/routers/`. This separation of concerns enables maintainability, testability, and clear API boundaries.\n\n资料来源：[backend/open_webui/main.py:1-60]()\n\n## Router Registration Architecture\n\n### Central Router Assembly\n\nAll routers are registered in `backend/open_webui/main.py` using FastAPI's `include_router()` method. Each router receives a unique URL prefix and OpenAPI tag for documentation and routing purposes.\n\n```python\napp.include_router(auths.router, prefix='/api/v1/auths', tags=['auths'])\napp.include_router(users.router, prefix='/api/v1/users', tags=['users'])\napp.include_router(chats.router, prefix='/api/v1/chats', tags=['chats'])\napp.include_router(models.router, prefix='/api/v1/models', tags=['models'])\napp.include_router(knowledge.router, prefix='/api/v1/knowledge', tags=['knowledge'])\napp.include_router(files.router, prefix='/api/v1/files', tags=['files'])\n```\n\n资料来源：[backend/open_webui/main.py:35-55]()\n\n### Router Prefix Mapping\n\n| Functional Domain | Router Module | API Prefix | OpenAPI Tag |\n|-------------------|---------------|------------|-------------|\n| Authentication | `auths` | `/api/v1/auths` | `auths` |\n| User Management | `users` | `/api/v1/users` | `users` |\n| Chat Operations | `chats` | `/api/v1/chats` | `chats` |\n| Model Management | `models` | `/api/v1/models` | `models` |\n| Knowledge Bases | `knowledge` | `/api/v1/knowledge` | `knowledge` |\n| File Handling | `files` | `/api/v1/files` | `files` |\n| Prompts | `prompts` | `/api/v1/prompts` | `prompts` |\n| Tools | `tools` | `/api/v1/tools` | `tools` |\n| Skills | `skills` | `/api/v1/skills` | `skills` |\n| Memories | `memories` | `/api/v1/memories` | `memories` |\n| Folders | `folders` | `/api/v1/folders` | `folders` |\n| Groups | `groups` | `/api/v1/groups` | `groups` |\n| Functions | `functions` | `/api/v1/functions` | `functions` |\n| Evaluations | `evaluations` | `/api/v1/evaluations` | `evaluations` |\n| Audio Processing | `audio` | `/api/v1/audio` | `audio` |\n| Image Processing | `images` | `/api/v1/images` | `images` |\n| Retrieval | `retrieval` | `/api/v1/retrieval` | `retrieval` |\n| Configurations | `configs` | `/api/v1/configs` | `configs` |\n| Channels | `channels` | `/api/v1/channels` | `channels` |\n| Notes | `notes` | `/api/v1/notes` | `notes` |\n| Tasks | `tasks` | `/api/v1/tasks` | `tasks` |\n| Utils | `utils` | `/api/v1/utils` | `utils` |\n| Terminals | `terminals` | `/api/v1/terminals` | `terminals` |\n| Automations | `automations` | `/api/v1/automations` | `automations` |\n| Calendars | `calendar` | `/api/v1/calendars` | `calendars` |\n| SCIM Identity | `scim` | `/api/v1/scim/v2` | `scim` |\n| Analytics | `analytics` | `/api/v1/analytics` | `analytics` |\n\n资料来源：[backend/open_webui/main.py:35-65]()\n\n## Request Flow and Middleware Pipeline\n\n### Middleware Stack\n\nThe API request lifecycle involves multiple middleware layers that process requests before they reach individual route handlers.\n\n```mermaid\ngraph TD\n    A[HTTP Request] --> B[ASGI Middleware]\n    B --> C[Authentication Middleware]\n    C --> D[Token Extraction<br/>API Key/Cookie/Bearer]\n    D --> E[Audit Logging Middleware<br/>Conditional]\n    E --> F[Pipeline Inlet Filter]\n    F --> G[Route Handler]\n    G --> H[Pipeline Outlet Filter]\n    H --> I[Response]\n```\n\n资料来源：[backend/open_webui/utils/asgi_middleware.py:1-30]()\n\n### Authentication Middleware\n\nThe ASGI middleware (`asgi_middleware.py`) handles credential extraction from multiple sources:\n\n1. **Bearer Token**: Extracted from `Authorization` header\n2. **Cookie Token**: Retrieved from `token` cookie\n3. **API Key**: Retrieved from custom header specified by `CUSTOM_API_KEY_HEADER` environment variable\n\nThe extracted credentials are stored in `request.state.token` for downstream route handlers.\n\n资料来源：[backend/open_webui/utils/asgi_middleware.py:12-40]()\n\n### Pipeline Filter System\n\nThe `pipelines.py` module implements a filter system that allows middleware-like processing at the inlet and outlet of request handling. This enables transformation and validation of payloads through user-defined pipeline stages.\n\n```python\ndef get_sorted_filters(model_id, models):\n    filters = [\n        model\n        for model in models.values()\n        if 'pipeline' in model\n        and 'type' in model['pipeline']\n        and model['pipeline']['type'] == 'filter'\n        and (\n            model['pipeline']['pipelines'] == ['*']\n            or any(model_id == target_model_id for target_model_id in model['pipeline']['pipelines'])\n        )\n    ]\n    sorted_filters = sorted(filters, key=lambda x: x['pipeline']['priority'])\n    return sorted_filters\n```\n\n资料来源：[backend/open_webui/routers/pipelines.py:30-45]()\n\n## Router Module Structure\n\n### Standard Router Pattern\n\nEach router module follows a consistent pattern:\n\n```python\nfrom fastapi import APIRouter, Depends, HTTPException, Request, status\nfrom pydantic import BaseModel\nfrom typing import Optional\nfrom open_webui.utils.auth import get_verified_user, get_admin_user\n\nrouter = APIRouter()\n\nclass EndpointForm(BaseModel):\n    # Request payload schema\n\n@router.post('/endpoint')\nasync def endpoint_handler(\n    request: Request,\n    form_data: EndpointForm,\n    user=Depends(get_verified_user)\n):\n    # Handler implementation\n```\n\n资料来源：[backend/open_webui/routers/prompts.py:1-30]()\n\n### Authentication Dependencies\n\n| Dependency | Purpose | Access Level |\n|------------|---------|--------------|\n| `get_verified_user` | Validates authenticated user | Authenticated users |\n| `get_admin_user` | Validates admin privileges | Admin only |\n\n资料来源：[backend/open_webui/routers/prompts.py:25-30]()\n\n## Core Router Modules\n\n### Tasks Router\n\nThe tasks router (`tasks.py`) handles asynchronous operations for chat-related tasks including title generation, follow-up generation, query generation, and image prompt generation.\n\n**Task Types Available:**\n\n| Task | Purpose | Template Function |\n|------|---------|-------------------|\n| Title Generation | Create chat titles | `title_generation_template()` |\n| Follow-up Generation | Generate follow-up questions | `follow_up_generation_template()` |\n| Query Generation | Create search queries | `query_generation_template()` |\n| Image Prompt Generation | Generate image prompts | `image_prompt_generation_template()` |\n| Autocomplete | Autocomplete suggestions | `autocomplete_generation_template()` |\n| Tags Generation | Generate content tags | `tags_generation_template()` |\n| Emoji Generation | Generate emoji suggestions | `emoji_generation_template()` |\n| MoA Response | Mixture of Agents response | `moa_response_generation_template()` |\n\n资料来源：[backend/open_webui/routers/tasks.py:1-40]()\n\n### Prompts Router\n\nThe prompts router manages user-defined prompt templates with command-based activation. It implements access control based on user roles and resource grants.\n\n**Access Control Logic:**\n\n```python\nwrite_access=(\n    (user.role == 'admin' and BYPASS_ADMIN_ACCESS_CONTROL)\n    or user.id == prompt.user_id\n    or await AccessGrants.has_access(\n        user_id=user.id,\n        resource_type='prompt',\n        resource_id=prompt.id,\n        permission='write',\n        db=db,\n    )\n)\n```\n\n资料来源：[backend/open_webui/routers/prompts.py:50-70]()\n\n## Conditional Router Loading\n\nSome routers are conditionally loaded based on configuration flags:\n\n### SCIM Router\n\nThe SCIM 2.0 router for identity management is enabled via the `ENABLE_SCIM` environment variable:\n\n```python\nif ENABLE_SCIM:\n    app.include_router(scim.router, prefix='/api/v1/scim/v2', tags=['scim'])\n```\n\n### Analytics Router\n\nThe analytics router is loaded when admin analytics are enabled:\n\n```python\nif ENABLE_ADMIN_ANALYTICS:\n    app.include_router(analytics.router, prefix='/api/v1/analytics', tags=['analytics'])\n```\n\n### Audit Logging Middleware\n\nAudit logging is conditionally applied based on the `AUDIT_LOG_LEVEL` configuration:\n\n```python\ntry:\n    audit_level = AuditLevel(AUDIT_LOG_LEVEL)\nexcept ValueError as e:\n    logger.error(f'Invalid audit level: {AUDIT_LOG_LEVEL}. Error: {e}')\n    audit_level = AuditLevel.NONE\n\nif audit_level != AuditLevel.NONE:\n    app.add_middleware(\n        AuditLoggingMiddleware,\n        audit_level=audit_level,\n        excluded_paths=AUDIT_EXCLUDED_PATHS,\n    )\n```\n\n资料来源：[backend/open_webui/main.py:55-70]()\n\n## Utility Functions and Helpers\n\n### Middleware Utility Imports\n\nThe `middleware.py` module aggregates utility functions from multiple sources for use by route handlers:\n\n```python\nfrom open_webui.utils.chat import generate_chat_completion\nfrom open_webui.utils.task import get_task_model_id, rag_template\nfrom open_webui.utils.tools import get_tools, get_terminal_tools\nfrom open_webui.utils.misc import (\n    deep_update, extract_urls, get_message_list,\n    add_or_update_system_message, merge_system_messages\n)\nfrom open_webui.utils.files import (\n    convert_markdown_base64_images,\n    get_file_url_from_base64,\n    get_image_base64_from_url,\n)\n```\n\n资料来源：[backend/open_webui/utils/middleware.py:1-35]()\n\n## Security Architecture\n\n### Token-Based Authentication\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant M as ASGI Middleware\n    participant R as Route Handler\n    \n    C->>M: Request + Credentials\n    M->>M: Extract Bearer/Cookie/API-Key\n    M->>R: Set request.state.token\n    R->>R: Verify with get_verified_user\n    alt Invalid Token\n        R-->>C: 401 Unauthorized\n    else Valid Token\n        R->>R: Process Request\n        R-->>C: Response\n    end\n```\n\n资料来源：[backend/open_webui/utils/asgi_middleware.py:20-50]()\n\n## Frontend API Integration\n\nThe frontend TypeScript codebase in `src/lib/apis/` provides typed interfaces for all major routers:\n\n| Router Domain | Frontend Module |\n|--------------|-----------------|\n| Knowledge Bases | `src/lib/apis/knowledge/index.ts` |\n| Skills | `src/lib/apis/skills/index.ts` |\n| OpenAI Config | `src/lib/apis/openai/index.ts` |\n| Tool Servers | `src/lib/apis/index.ts` |\n\nThe frontend uses `WEBUI_API_BASE_URL` constant (`${WEBUI_BASE_URL}/api/v1`) as the base for all API calls.\n\n资料来源：[src/lib/constants.ts:1-20]()\n\n## Summary\n\nThe API Routers system in Open WebUI implements a well-organized, FastAPI-based architecture with:\n\n- **Modular Design**: 26+ functional router modules organized by domain\n- **Consistent Patterns**: Standardized router structure with Pydantic models and authentication dependencies\n- **Middleware Pipeline**: Request processing through ASGI middleware, authentication, audit logging, and pipeline filters\n- **Conditional Loading**: Feature flags for SCIM, analytics, and audit logging\n- **Access Control**: Role-based and grant-based authorization at the router and endpoint levels\n- **Frontend Integration**: TypeScript API clients aligned with backend router structure\n\n---\n\n<a id='retrieval-system'></a>\n\n## Retrieval System\n\n### 相关页面\n\n相关主题：[Ollama Integration](#ollama-integration), [RAG Pipeline](#rag-pipeline)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/retrieval/vector/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/main.py)\n- [backend/open_webui/retrieval/vector/factory.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/factory.py)\n- [backend/open_webui/retrieval/web/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/web/main.py)\n- [backend/open_webui/retrieval/loaders/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/loaders/main.py)\n- [backend/open_webui/retrieval/vector/dbs/pgvector.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/dbs/pgvector.py)\n- [backend/open_webui/retrieval/web/duckduckgo.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/web/duckduckgo.py)\n</details>\n\n# Retrieval System\n\nThe Retrieval System in Open WebUI is a comprehensive framework for document loading, web searching, and vector-based information retrieval. It enables users to ingest documents, perform web searches, and leverage retrieval-augmented generation (RAG) capabilities to enhance LLM responses with contextual information.\n\n## Architecture Overview\n\nThe retrieval system is composed of three primary subsystems:\n\n```mermaid\ngraph TD\n    subgraph Retrieval[\"Retrieval System\"]\n        subgraph Loaders[\"Document Loaders\"]\n            PDF[PDF Loader]\n            OCR[OCR Loaders]\n            WebLoader[Web Loader]\n        end\n        \n        subgraph WebSearch[\"Web Search Providers\"]\n            SearXNG[SearXNG]\n            DuckDuckGo[DuckDuckGo]\n            GooglePSE[Google PSE]\n            Brave[Brave Search]\n            YouDC[You.com]\n        end\n        \n        subgraph VectorDB[\"Vector Stores\"]\n            Chroma[Chroma]\n            FAISS[FAISS]\n            Milvus[Milvus]\n            Qdrant[Qdrant]\n            PGVector[pgvector]\n        end\n    end\n    \n    API[API Router] --> Loaders\n    API --> WebSearch\n    API --> VectorDB\n```\n\n### Core Components\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| **Document Loaders** | Ingest various file formats into the system | `backend/open_webui/retrieval/loaders/` |\n| **Web Search** | Query external search engines for information | `backend/open_webui/retrieval/web/` |\n| **Vector Database** | Store and query embeddings for semantic search | `backend/open_webui/retrieval/vector/` |\n| **API Router** | Expose retrieval endpoints to the frontend | `backend/open_webui/routers/retrieval.py` |\n\n## Document Loaders\n\nThe document loader subsystem handles ingestion of various file formats into the retrieval pipeline.\n\n### Supported File Types\n\nThe system supports the following file types for upload and processing:\n\n| Category | MIME Types |\n|----------|------------|\n| Documents | PDF, EPUB, DOCX, TXT, CSV, XML, HTML, Markdown |\n| Code | Python, JavaScript, CSS |\n| Audio | MP3, WAV |\n| Images | PNG, JPG (with OCR) |\n\n资料来源：[src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n\n### OCR Processing\n\nFor scanned documents and images, Open WebUI supports multiple OCR engines:\n\n**PaddleOCR VL** is one of the supported OCR backends. It processes documents page-by-page, extracting text and returning structured `Document` objects with metadata.\n\n```python\n# Processing flow in paddleocr_vl.py\nfor i, page in enumerate(doc):\n    markdown_text = run_paddle_ocr(page)\n    cleaned_content = clean_markdown(markdown_text)\n    \n    documents.append(\n        Document(\n            page_content=cleaned_content,\n            metadata={\n                'page': i,\n                'page_label': i + 1,\n                'total_pages': total_pages,\n                'file_name': self.file_name,\n                'processing_engine': 'paddleocr-vl',\n            }\n        )\n    )\n```\n\n资料来源：[backend/open_webui/retrieval/loaders/paddleocr_vl.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/loaders/paddleocr_vl.py)\n\n### Configuration Options\n\nThe retrieval loaders are configured through the following environment variables:\n\n| Variable | Description |\n|----------|-------------|\n| `EXTERNAL_DOCUMENT_LOADER_URL` | URL for external document loader service |\n| `EXTERNAL_DOCUMENT_LOADER_API_KEY` | API key for external loader |\n| `TIKA_SERVER_URL` | Apache Tika server endpoint |\n| `DOCLING_SERVER_URL` | Docling OCR server endpoint |\n| `DOCLING_API_KEY` | API key for Docling service |\n| `DOCLING_PARAMS` | Additional Docling parameters |\n| `PDF_EXTRACT_IMAGES` | Enable image extraction from PDFs |\n| `PDF_LOADER_MODE` | PDF loading mode configuration |\n| `DOCUMENT_INTELLIGENCE_ENDPOINT` | Azure Document Intelligence endpoint |\n| `DOCUMENT_INTELLIGENCE_KEY` | Azure Document Intelligence API key |\n| `DOCUMENT_INTELLIGENCE_MODEL` | Model identifier for document processing |\n| `MISTRAL_OCR_API_BASE_URL` | Mistral OCR API base URL |\n| `MISTRAL_OCR_API_KEY` | Mistral OCR API key |\n| `PADDLEOCR_VL_BASE_URL` | PaddleOCR VL server URL |\n| `PADDLEOCR_VL_TOKEN` | Authentication token for PaddleOCR VL |\n| `MINERU_API_MODE` | MinerU API mode |\n| `MINERU_API_URL` | MinerU API endpoint |\n| `MINERU_API_KEY` | MinerU API key |\n| `MINERU_API_TIMEOUT` | MinerU API timeout in seconds |\n\n资料来源：[backend/open_webui/retrieval/utils.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/utils.py)\n\n## Web Search\n\nThe web search subsystem provides integration with multiple search providers for retrieving up-to-date information from the internet.\n\n### Supported Providers\n\n| Provider | Implementation | Features |\n|----------|----------------|----------|\n| **SearXNG** | Self-hosted meta-search engine | Privacy-focused, aggregated results |\n| **DuckDuckGo** | Public search API | No API key required |\n| **Google PSE** | Google Programmable Search | Requires API key |\n| **Brave Search** | Privacy-focused search | API-based |\n| **You.com** | AI-enhanced search | Rich snippets and descriptions |\n| **Tavily** | AI-optimized search | Structured outputs |\n| **Perplexity** | LLM-optimized search | Citations included |\n\n### Search Result Structure\n\nSearch results are normalized into a common `SearchResult` format:\n\n```python\n@dataclass\nclass SearchResult:\n    link: str           # URL of the result\n    title: str          # Title of the page\n    snippet: str        # Text snippet/summary\n```\n\n#### You.com Implementation\n\nThe You.com provider demonstrates the search result normalization:\n\n```python\ndef _build_snippet(result: dict) -> str:\n    \"\"\"Combine the description and snippets list into a single string.\"\"\"\n    parts: list[str] = []\n    \n    description = result.get('description')\n    if description:\n        parts.append(description)\n    \n    snippets = result.get('snippets')\n    if snippets and isinstance(snippets, list):\n        parts.extend(snippets)\n    \n    return '\\n\\n'.join(parts)\n```\n\n资料来源：[backend/open_webui/retrieval/web/ydc.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/web/ydc.py)\n\n### Web Loader Configuration\n\nThe web loader for content extraction supports the following configuration:\n\n| Setting | Description |\n|---------|-------------|\n| `ENABLE_WEB_LOADER_SSL_VERIFICATION` | Enable SSL certificate verification |\n| `WEB_LOADER_CONCURRENT_REQUESTS` | Rate limiting for concurrent requests |\n| `WEB_SEARCH_TRUST_ENV` | Trust environment variables for requests |\n| `BYPASS_WEB_SEARCH_WEB_LOADER` | Skip content extraction, use snippets only |\n| `BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL` | Skip embedding and retrieval stages |\n\n资料来源：[backend/open_webui/routers/retrieval.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/retrieval.py)\n\n### Web Search Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant API as /api/v1/retrieval/web/search\n    participant SearchProvider as Search Provider\n    participant WebLoader as Web Loader\n    participant VectorDB as Vector Store\n    \n    Client->>API: POST /search {query, urls}\n    API->>SearchProvider: Execute search queries\n    SearchProvider-->>API: Raw search results\n    API->>WebLoader: Extract content from URLs\n    WebLoader-->>API: Document objects\n    API->>VectorDB: Store documents\n    VectorDB-->>API: Collection confirmation\n    API-->>Client: {status, collection_name, files}\n```\n\n## Vector Database Integration\n\nThe vector database subsystem handles storage and retrieval of document embeddings for semantic search.\n\n### Supported Vector Stores\n\n| Database | Implementation | Use Case |\n|----------|----------------|----------|\n| **Chroma** | `chromadb` | Lightweight, local-first |\n| **FAISS** | `faiss-cpu`/`faiss-gpu` | Large-scale similarity search |\n| **Milvus** | `pymilvus` | Cloud-native, scalable |\n| **Qdrant** | `qdrant-client` | High-performance, hybrid search |\n| **pgvector** | `psycopg2` | PostgreSQL extension for vectors |\n\n### Vector Factory Pattern\n\nThe system uses a factory pattern to instantiate vector databases:\n\n```python\nclass VectorStoreFactory:\n    @staticmethod\n    def get_vector_store(config: Config) -> VectorStore:\n        provider = config.VECTOR_DB\n        if provider == \"chromadb\":\n            return ChromaDBStore()\n        elif provider == \"pgvector\":\n            return PGVectorStore()\n        # ... other providers\n```\n\n资料来源：[backend/open_webui/retrieval/vector/factory.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/factory.py)\n\n### pgvector Implementation\n\nFor PostgreSQL-based vector storage:\n\n```python\nclass PGVectorStore:\n    def __init__(self, connection_string: str, embedding_dim: int = 1536):\n        self.conn = psycopg2.connect(connection_string)\n        self.embedding_dim = embedding_dim\n    \n    def insert(self, collection: str, documents: list[Document]):\n        # Insert vectors with pgvector extension\n```\n\n资料来源：[backend/open_webui/retrieval/vector/dbs/pgvector.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/vector/dbs/pgvector.py)\n\n## API Endpoints\n\nThe retrieval system exposes REST API endpoints through the router.\n\n### Web Search Endpoint\n\n```\nPOST /api/v1/retrieval/web/search\n```\n\n**Request Body:**\n```json\n{\n  \"query\": \"search query string\",\n  \"collection_name\": \"optional_collection\",\n  \"retrieval_enabled\": true,\n  \"k\": 5\n}\n```\n\n**Response:**\n```json\n{\n  \"status\": true,\n  \"collection_name\": \"web_20240115_abc123\",\n  \"filenames\": [\"python.org\", \"wikipedia.org\"],\n  \"content\": \"extracted content...\",\n  \"sources\": [\n    {\"url\": \"https://python.org\", \"content\": \"...\"}\n  ]\n}\n```\n\n### File Upload and Processing\n\n```\nPOST /api/v1/retrieval/upload\n```\n\nHandles file uploads, runs document loaders, and stores in the configured vector database.\n\n资料来源：[backend/open_webui/routers/retrieval.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/retrieval.py)\n\n## Configuration Reference\n\n### Environment Variables\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `VECTOR_DB` | `chroma` | Vector database provider |\n| `RAG_TOP_K` | `5` | Number of top results to retrieve |\n| `RAG_RELEVANCE_THRESHOLD` | `0.0` | Minimum relevance score threshold |\n| `WEB_SEARCH_ENABLED` | `True` | Enable web search functionality |\n\n### Frontend API URLs\n\nThe frontend communicates with these API base URLs:\n\n```typescript\nexport const RETRIEVAL_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/retrieval`;\n```\n\n资料来源：[src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n\n## Data Flow\n\n```mermaid\ngraph LR\n    subgraph Input[\"Input Sources\"]\n        Files[Uploaded Files]\n        WebSearch[Web Search]\n        URLs[Direct URLs]\n    end\n    \n    subgraph Processing[\"Processing Pipeline\"]\n        Loaders[Document Loaders]\n        Chunks[Text Chunking]\n        Embed[Embedding Model]\n    end\n    \n    subgraph Storage[\"Storage\"]\n        Vector[Vector Store]\n        Meta[Metadata Store]\n    end\n    \n    subgraph Query[\"Query Processing\"]\n        QueryEmb[Query Embedding]\n        Similarity[Similarity Search]\n        Rerank[Reranking]\n    end\n    \n    Files --> Loaders\n    WebSearch --> Loaders\n    URLs --> Loaders\n    Loaders --> Chunks\n    Chunks --> Embed\n    Embed --> Vector\n    \n    Query --> QueryEmb\n    QueryEmb --> Similarity\n    Similarity --> Rerank\n    Rerank --> Context[LLM Context]\n```\n\n## Error Handling\n\nThe retrieval system implements comprehensive error handling:\n\n| Error Type | HTTP Code | Message |\n|------------|-----------|---------|\n| Web search failure | 400 | `WEB_SEARCH_ERROR` with exception details |\n| No results found | 404 | `No results found from web search` |\n| Loader failure | 500 | Loader-specific error message |\n| Vector store error | 500 | Database connection or query errors |\n\n资料来源：[backend/open_webui/routers/retrieval.py:1-50](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/retrieval.py)\n\n## Extension Points\n\nThe retrieval system is designed for extensibility:\n\n1. **Custom Document Loaders**: Implement the `DocumentLoader` interface in `loaders/`\n2. **New Search Providers**: Add provider class in `web/` following the `SearchProvider` protocol\n3. **Vector Store Adapters**: Implement `VectorStore` abstract class in `vector/dbs/`\n4. **Embedding Models**: Configure through `EMBEDDING_MODEL` setting\n\n---\n\n<a id='frontend-structure'></a>\n\n## Frontend Structure\n\n### 相关页面\n\n相关主题：[Chat Interface](#chat-interface), [Architecture Overview](#architecture-overview)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [src/lib/utils/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/index.ts)\n- [src/lib/utils/codeHighlight.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/codeHighlight.ts)\n- [src/lib/apis/knowledge/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/knowledge/index.ts)\n- [src/lib/apis/skills/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/skills/index.ts)\n- [src/app.html](https://github.com/open-webui/open-webui/blob/main/src/app.html)\n</details>\n\n# Frontend Structure\n\n## Overview\n\nOpen WebUI uses a modern SvelteKit-based frontend architecture built with TypeScript. The frontend is responsible for the user interface, real-time chat interactions, multimedia handling, and communication with the backend API. The application runs as a Single Page Application (SPA) with server-side rendering capabilities provided by SvelteKit.\n\n**技术栈**\n\n| Layer | Technology |\n|-------|------------|\n| Framework | SvelteKit |\n| Language | TypeScript |\n| Styling | CSS (with custom properties) |\n| State Management | Svelte Stores |\n| API Communication | Fetch API |\n| Internationalization | i18n module |\n| Code Highlighting | Shiki |\n| Build Tool | Vite (via SvelteKit) |\n\n资料来源：[src/lib/constants.ts:1]()\n\n---\n\n## Directory Structure\n\n```\nsrc/\n├── lib/\n│   ├── components/       # Reusable UI components\n│   ├── apis/              # API client modules\n│   ├── stores/            # Svelte stores for state management\n│   ├── utils/             # Utility functions\n│   ├── i18n/              # Internationalization\n│   └── constants.ts       # Application constants\n├── routes/                # SvelteKit page routes\n└── app.html               # HTML template\n```\n\n资料来源：[src/lib/index.ts]()\n\n---\n\n## Core Modules\n\n### Constants Module\n\nThe `constants.ts` file centralizes all application-wide configuration values and constants.\n\n```typescript\nexport const APP_NAME = 'Open WebUI';\nexport const WEBUI_HOSTNAME = browser ? (dev ? `${location.hostname}:8080` : ``) : '';\nexport const WEBUI_BASE_URL = browser ? (dev ? `http://${WEBUI_HOSTNAME}` : ``) : ``;\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\n```\n\n**Key Constants**\n\n| Constant | Purpose |\n|----------|---------|\n| `APP_NAME` | Application display name |\n| `WEBUI_HOSTNAME` | Dynamic hostname resolution |\n| `WEBUI_BASE_URL` | Base URL for the application |\n| `WEBUI_API_BASE_URL` | Backend API endpoint |\n| `OLLAMA_API_BASE_URL` | Ollama integration endpoint |\n| `OPENAI_API_BASE_URL` | OpenAI-compatible API endpoint |\n| `AUDIO_API_BASE_URL` | Audio processing endpoint |\n| `RETRIEVAL_API_BASE_URL` | RAG retrieval endpoint |\n| `SUPPORTED_FILE_TYPE` | List of accepted file types |\n\n资料来源：[src/lib/constants.ts:1-20]()\n\n**Supported File Types**\n\nThe application supports the following file types for uploads:\n\n| Category | Types |\n|----------|-------|\n| Documents | PDF, EPUB, DOCX, plain text |\n| Code | Python, CSS, JavaScript, Markdown |\n| Data | CSV, XML, JSON |\n| Media | MP3, WAV (audio) |\n\n### API Client Architecture\n\nThe frontend uses modular API clients located in `src/lib/apis/`. Each domain has its own dedicated API module.\n\n#### Knowledge API Client\n\nThe Knowledge API client handles knowledge base operations:\n\n```typescript\nexport const createNewKnowledge = async (\n    token: string,\n    name: string,\n    description: string,\n    accessGrants: object[]\n) => {\n    const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/create`, {\n        method: 'POST',\n        headers: {\n            Accept: 'application/json',\n            'Content-Type': 'application/json',\n            authorization: `Bearer ${token}`\n        },\n        body: JSON.stringify({\n            name: name,\n            description: description,\n            access_grants: accessGrants\n        })\n    });\n    return res.json();\n};\n```\n\n#### Skills API Client\n\nThe Skills API client manages skill-related operations with consistent error handling:\n\n```typescript\nexport const deleteSkillById = async (token: string, id: string) => {\n    const res = await fetch(`${WEBUI_API_BASE_URL}/skills/id/${id}/delete`, {\n        method: 'DELETE',\n        headers: {\n            Accept: 'application/json',\n            'Content-Type': 'application/json',\n            authorization: `Bearer ${token}`\n        }\n    });\n    return res.json();\n};\n```\n\n资料来源：[src/lib/apis/knowledge/index.ts:1-45]()\n资料来源：[src/lib/apis/skills/index.ts]()\n\n#### API Communication Pattern\n\nAll API clients follow a consistent pattern:\n\n1. Construct URL with query parameters\n2. Set required headers (Content-Type, Authorization)\n3. Handle responses with proper error handling\n4. Return JSON data or throw detailed errors\n\n---\n\n## Content Processing Pipeline\n\n### Response Content Utilities\n\nThe frontend includes sophisticated text processing utilities for handling various content types:\n\n```typescript\nexport const sanitizeResponseContent = (content: string) => {\n    return content\n        .replace(/<\\|[a-z]*$/, '')\n        .replace(/<\\|[a-z]+\\|$/, '')\n        .replace(/<$/, '')\n        .replaceAll('<', '&lt;')\n        .replaceAll('>', '&gt;')\n        .replaceAll(/<\\|[a-z]+\\|>/g, ' ')\n        .trim();\n};\n```\n\n### Text Extraction Utilities\n\nThe frontend provides text extraction for audio processing:\n\n| Function | Purpose |\n|----------|---------|\n| `extractSentencesForAudio()` | Splits text into audio-ready sentences |\n| `extractParagraphsForAudio()` | Extracts paragraphs for audio playback |\n\n资料来源：[src/lib/utils/index.ts:150-200]()\n\n---\n\n## Code Highlighting System\n\n### Shiki Integration\n\nOpen WebUI uses Shiki for syntax highlighting with a bundled language configuration:\n\n```typescript\nconst KNOWN_LANG_IDS = new Set([\n    'ada', 'awk', 'bat', 'c', 'cmake', 'clojure', 'cpp', 'crystal',\n    'css', 'd', 'dart', 'diff', 'elixir', 'elm', 'erlang', 'fish',\n    'gleam', 'glsl', 'go', 'groovy', 'haml', 'haskell', 'hlsl',\n    'html', 'ini', 'java', 'javascript', 'json', 'json5', 'jsonc',\n    'jsx', 'julia', 'kotlin', 'latex', 'less', 'lisp', 'log', 'lua',\n    'make', 'markdown', 'matlab', 'mdx', 'mojo', 'nim', 'nix',\n    'nushell', 'ocaml', 'pascal', 'perl', 'php', 'postcss',\n    'powershell', 'prisma', 'python', 'r', 'ruby', 'rust', 'scala',\n    'scheme', 'scss', 'shellscript', 'sql', 'swift', 'toml',\n    'tsx', 'typescript', 'vb', 'xml', 'yaml', 'zig'\n]);\n```\n\n资料来源：[src/lib/utils/codeHighlight.ts]()\n\n**Supported Languages (Partial List)**\n\n| Category | Languages |\n|----------|-----------|\n| Systems | C, C++, Rust, Go, Zig |\n| Web | JavaScript, TypeScript, HTML, CSS, JSX, TSX |\n| Scripting | Python, Ruby, PHP, Perl, Lua |\n| Data | SQL, JSON, YAML, TOML |\n| Functional | Haskell, Elixir, Clojure, Scheme |\n| Markup | Markdown, LaTeX, XML, HTML |\n\n---\n\n## HTML Template Structure\n\n### Application Shell\n\nThe `app.html` file provides the base HTML structure with splash screen support:\n\n```html\n<!doctype html>\n<html lang=\"en\">\n    <head>\n        <meta charset=\"utf-8\" />\n        <link rel=\"icon\" type=\"image/png\" href=\"/static/favicon.png\" />\n        <link rel=\"manifest\" href=\"/manifest.json\" />\n        <meta name=\"viewport\" content=\"width=device-width, initial-scale=1...\" />\n        <meta name=\"theme-color\" content=\"#171717\" />\n        <script src=\"/static/loader.js\" defer></script>\n        <link rel=\"stylesheet\" href=\"/static/custom.css\" />\n    </head>\n</html>\n```\n\n### Theme Support\n\nThe application supports multiple themes including a special \"her\" theme:\n\n```css\nhtml.dark #splash-screen {\n    background: #000;\n}\n\nhtml.her #splash-screen {\n    background: #983724;\n}\n\nhtml.her #logo {\n    display: none;\n}\n\nhtml.her #logo-her {\n    display: block;\n    filter: invert(1);\n}\n```\n\n资料来源：[src/app.html]()\n\n---\n\n## State Management Architecture\n\n### Store-Based State Management\n\nThe frontend uses Svelte's reactive stores for global state management. Stores are defined in `src/lib/stores/index.ts` and provide reactive access to:\n\n- User authentication state\n- Chat history and messages\n- Model configurations\n- UI preferences\n- Application settings\n\n### Store Access Pattern\n\n```typescript\nimport { storeName } from '$lib/stores';\n\n// Reactive subscription\n$: result = storeName.subscribe(value => {\n    // Handle value changes\n});\n\n// Or using Svelte's auto-subscription\n$: value = $storeName;\n```\n\n---\n\n## Internationalization\n\n### i18n Module\n\nInternationalization is handled through `src/lib/i18n/index.ts` which provides:\n\n- Language detection and switching\n- Translation loading\n- Locale-specific formatting\n- Fallback language support\n\n---\n\n## Frontend Architecture Diagram\n\n```mermaid\ngraph TB\n    subgraph \"Presentation Layer\"\n        UI[UI Components<br/>src/lib/components]\n        Routes[SvelteKit Routes<br/>src/routes]\n    end\n\n    subgraph \"Logic Layer\"\n        Stores[State Management<br/>src/lib/stores]\n        Utils[Utilities<br/>src/lib/utils]\n        i18n[Internationalization<br/>src/lib/i18n]\n    end\n\n    subgraph \"Integration Layer\"\n        APIs[API Clients<br/>src/lib/apis]\n        Constants[Constants<br/>src/lib/constants]\n    end\n\n    subgraph \"External Services\"\n        Backend[Backend API<br/>/api/v1]\n        Ollama[Ollama API<br/>/ollama]\n    end\n\n    UI --> Routes\n    Routes --> Stores\n    UI --> Stores\n    Routes --> APIs\n    APIs --> Constants\n    Stores --> Utils\n    APIs --> Backend\n    Constants --> Ollama\n```\n\n---\n\n## Build Configuration\n\n### SvelteKit Configuration\n\nThe SvelteKit configuration (`svelte.config.js`) defines:\n\n- Adapter settings for deployment\n- Preprocessing options\n- Build optimizations\n- SSR configuration\n\nThe frontend is built with Vite under the hood, providing:\n\n- Hot Module Replacement (HMR)\n- Code splitting\n- Tree shaking\n- TypeScript support\n\n---\n\n## Request/Response Flow\n\n```mermaid\nsequenceDiagram\n    participant User as User Interface\n    participant Store as Svelte Store\n    participant API as API Client\n    participant Backend as Backend API\n\n    User->>Store: Trigger action\n    Store->>API: Call API function\n    API->>API: Set headers & body\n    API->>Backend: HTTP Request\n    Backend-->>API: JSON Response\n    API-->>Store: Processed data\n    Store-->>User: Update UI\n```\n\n---\n\n## Summary\n\nThe Open WebUI frontend is built on a clean, modular architecture that separates concerns through dedicated directories for components, API clients, stores, utilities, and internationalization. The TypeScript-based codebase provides type safety while SvelteKit enables both server-side rendering and client-side hydration. The constant module centralizes configuration, making environment-specific settings easy to manage across different deployment scenarios.\n\n---\n\n<a id='chat-interface'></a>\n\n## Chat Interface\n\n### 相关页面\n\n相关主题：[Ollama Integration](#ollama-integration), [Frontend Structure](#frontend-structure)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/lib/stores/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/stores/index.ts)\n- [src/lib/utils/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/utils/index.ts)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/models/chats.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/models/chats.py)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [backend/open_webui/utils/middleware.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/middleware.py)\n</details>\n\n# Chat Interface\n\n## Overview\n\nThe Chat Interface is the core user-facing component of Open WebUI, providing an interactive environment for conversations with AI models. It handles message composition, response rendering, conversation state management, and integration with various backend services including Ollama, OpenAI-compatible APIs, and code execution engines.\n\nThe interface is built with SvelteKit on the frontend and Python/FastAPI on the backend, enabling real-time streaming responses, multi-model conversations, and rich content rendering including markdown, code blocks, and embedded media.\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte)\"]\n        Chat[Chat.svelte]\n        MessageInput[MessageInput.svelte]\n        Message[Message.svelte]\n        Markdown[Markdown.svelte]\n        ModelSelector[ModelSelector.svelte]\n        Navbar[Navbar.svelte]\n    end\n    \n    subgraph StateManagement[\"State Management\"]\n        Stores[index.ts - Svelte Stores]\n    end\n    \n    subgraph Backend[\"Backend (Python/FastAPI)\"]\n        ChatModel[models/chats.py]\n        Config[config.py]\n        Middleware[middleware.py]\n    end\n    \n    Chat --> Stores\n    MessageInput --> Stores\n    Message --> Stores\n    Chat --> MessageInput\n    Chat --> Message\n    Message --> Markdown\n    Stores --> ChatModel\n    Stores --> Middleware\n```\n\n## State Management\n\nThe chat interface relies heavily on Svelte stores for reactive state management. These stores maintain the current conversation state, UI visibility flags, and application-wide settings.\n\n### Core Chat Stores\n\nAll chat-related state is managed through Svelte writable stores defined in `src/lib/stores/index.ts`:\n\n| Store | Type | Purpose |\n|-------|------|---------|\n| `chatId` | `Writable<string>` | Current active chat identifier |\n| `chatTitle` | `Writable<string>` | Title of the current chat |\n| `chats` | `Writable<null>` | Cached chat objects |\n| `pinnedChats` | `Writable<Chat[]>` | Pinned conversations |\n| `models` | `Writable<Model[]>` | Available AI models |\n| `chatRequestQueues` | `Writable<Record<string, QueueItem[]>>` | Request queue management |\n\n资料来源：[src/lib/stores/index.ts:53-58]()\n\n### UI Visibility Stores\n\nThe interface uses boolean stores to control component visibility:\n\n| Store | Type | Purpose |\n|-------|------|---------|\n| `showSidebar` | `Writable<boolean>` | Sidebar visibility |\n| `showSettings` | `Writable<boolean>` | Settings panel visibility |\n| `showShortcuts` | `Writable<boolean>` | Keyboard shortcuts overlay |\n| `showControls` | `Writable<boolean>` | Chat controls visibility |\n| `showEmbeds` | `Writable<boolean>` | Embedded content display |\n| `showArtifacts` | `Writable<boolean>` | Code artifacts panel |\n\n资料来源：[src/lib/stores/index.ts:22-30]()\n\n### Audio and Transcription Stores\n\n| Store | Type | Purpose |\n|-------|------|---------|\n| `audioQueue` | `Writable<AudioQueue \\| null>` | TTS audio queue |\n| `TTSWorker` | `Writable<Worker \\| null>` | Text-to-speech web worker |\n\n## Message Processing Pipeline\n\n### Content Sanitization\n\nBefore rendering, message content undergoes sanitization to prevent XSS attacks and normalize special tokens:\n\n```typescript\nexport const sanitizeResponseContent = (content: string) => {\n    return content\n        .replace(/<\\|[a-z]*$/, '')\n        .replace(/<\\|[a-z]+\\|$/, '')\n        .replace(/<$/, '')\n        .replaceAll('<', '&lt;')\n        .replaceAll('>', '&gt;')\n        .replaceAll(/<\\|[a-z]+\\|>/g, ' ')\n        .trim();\n};\n```\n\n资料来源：[src/lib/utils/index.ts:180-189]()\n\n### Content Processing for Chinese Text\n\nThe system includes special handling for Chinese content to address markdown and LaTeX formatting issues:\n\n```typescript\nfunction processChineseContent(content: string): string {\n    if (!/[\\u4e00-\\u9fa5]/.test(content)) return content;\n    const lines = content.split('\\n');\n    const processedLines = lines.map((line) => {\n        // Chinese-specific processing logic\n    });\n    return processedLines.join('\\n');\n}\n```\n\n资料来源：[src/lib/utils/index.ts:195-208]()\n\n### Sentence and Paragraph Extraction\n\nFor audio processing (text-to-speech), messages are split into appropriate segments:\n\n```typescript\nexport const extractSentencesForAudio = (text: string) => {\n    return extractSentences(text).reduce((mergedTexts, currentText) => {\n        const lastIndex = mergedTexts.length - 1;\n        if (lastIndex >= 0) {\n            const previousText = mergedTexts[lastIndex];\n            const wordCount = previousText.split(/\\s+/).length;\n            const charCount = previousText.length;\n            if (wordCount < 4 || charCount < 50) {\n                mergedTexts[lastIndex] = previousText + ' ' + currentText;\n            } else {\n                mergedTexts.push(currentText);\n            }\n        }\n        return mergedTexts;\n    }, []);\n};\n```\n\n资料来源：[src/lib/utils/index.ts:300-319]()\n\n## Chat Data Models\n\n### Backend Chat Model\n\nThe backend defines chat structures in `backend/open_webui/models/chats.py`:\n\n```python\nclass ChatModel:\n    async def get_message_list(self, id: str) -> Optional[dict]:\n        \"\"\"Message map for walking history.\n        \n        Prefer chat_message rows to avoid loading the large chat\n        JSON blob; fall back to embedded history when no rows exist\n        (legacy chats).\n        \"\"\"\n        messages_map = await ChatMessages.get_messages_map_by_chat_id(id)\n        if messages_map is not None:\n            return messages_map\n        \n        # Fall back to embedded JSON blob for legacy chats\n        chat = await self.get_chat_by_id(id)\n        if chat is None:\n            return None\n        \n        return chat.chat.get('history', {}).get('messages', {}) or {}\n```\n\n资料来源：[backend/open_webui/models/chats.py:1-25]()\n\n### Message Structure\n\nMessages support both normalized storage (via `chat_message` rows) and legacy embedded JSON format:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | `string` | Unique message identifier |\n| `parentId` | `string \\| null` | Parent message ID for threading |\n| `childrenIds` | `string[]` | Child message IDs |\n| `role` | `user \\| assistant` | Message author role |\n| `content` | `string` | Message content |\n| `model` | `string` | Model used for assistant responses |\n| `timestamp` | `number` | Unix timestamp of creation |\n| `done` | `boolean` | Whether response is complete |\n\n## Configuration and Prompt Templates\n\n### Voice Mode Configuration\n\nVoice mode settings are configurable via environment variables:\n\n| Config Key | Environment Variable | Default | Description |\n|------------|---------------------|---------|-------------|\n| `ENABLE_VOICE_MODE_PROMPT` | `ENABLE_VOICE_MODE_PROMPT` | `True` | Enable voice mode prompt |\n| `VOICE_MODE_PROMPT_TEMPLATE` | `VOICE_MODE_PROMPT_TEMPLATE` | `''` | Custom voice prompt template |\n\n资料来源：[backend/open_webui/config.py:1-20]()\n\n### Code Interpreter Configuration\n\nThe chat interface integrates code execution capabilities:\n\n| Config Key | Environment Variable | Default | Description |\n|------------|---------------------|---------|-------------|\n| `ENABLE_CODE_EXECUTION` | `ENABLE_CODE_EXECUTION` | `True` | Enable code execution |\n| `CODE_EXECUTION_ENGINE` | `CODE_EXECUTION_ENGINE` | `pyodide` | Execution engine (pyodide/jupyter) |\n| `CODE_EXECUTION_JUPYTER_URL` | `CODE_EXECUTION_JUPYTER_URL` | `''` | Jupyter server URL |\n| `CODE_EXECUTION_JUPYTER_AUTH` | `CODE_EXECUTION_JUPYTER_AUTH` | `''` | Jupyter authentication |\n\n资料来源：[backend/open_webui/config.py:35-60]()\n\n### Prompt Generation Templates\n\nThe system uses configurable prompt templates for various tasks:\n\n| Template | Purpose |\n|----------|---------|\n| `DEFAULT_MOA_GENERATION_PROMPT_TEMPLATE` | Multi-model answer synthesis |\n| `IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE` | Image generation prompt creation |\n| `FOLLOW_UP_GENERATION_PROMPT_TEMPLATE` | Suggesting follow-up questions |\n\n## Code Interpreter Integration\n\n### Backend Middleware Rendering\n\nThe backend middleware handles code interpreter rendering in the streaming response pipeline:\n\n```python\nelif item_type == 'open_webui:code_interpreter':\n    # Code interpreter needs to inspect/mutate prior accumulated content\n    # to strip trailing unclosed code fences\n    content = '\\n'.join(parts)\n    content_stripped, original_whitespace = split_content_and_whitespace(content)\n    if is_opening_code_block(content_stripped):\n        content = content_stripped.rstrip('`').rstrip() + original_whitespace\n    else:\n        content = content_stripped + original_whitespace\n    \n    # Render as <details> block for frontend Collapsible\n    code = item.get('code', '').strip()\n    lang = item.get('lang', 'python')\n    status = item.get('status', 'in_progress')\n```\n\n资料来源：[backend/open_webui/utils/middleware.py:1-30]()\n\n### Code Block Rendering\n\nMessages with code content are wrapped in collapsible elements:\n\n```mermaid\ngraph LR\n    A[Message Content] --> B{Contains Code?}\n    B -->|Yes| C[Wrap in Details Block]\n    B -->|No| D[Render as Markdown]\n    C --> E[\"<details type='code'>\"]\n    E --> F[\"<summary>Code Block</summary>\"]\n    E --> G[\"```python<br>code```\"]\n```\n\n### Pyodide Environment\n\nFor browser-based code execution:\n\n- Runs via Pyodide in the browser for fast execution\n- No package installation available (`pip install`, `subprocess`, `micropip.install()` disabled)\n- Uses web worker for background processing\n\n## Chat History and Import\n\n### OpenAI Chat Import\n\nThe system supports importing chats from OpenAI's export format:\n\n```typescript\nexport const convertOpenAIMessages = (convo) => {\n    const messages = [];\n    const uniqueModels = new Set();\n    let lastId = null;\n    \n    for (const message_id in mapping) {\n        const message = mapping[message_id];\n        // Skip system and tool messages\n        if (role === 'system' || role === 'tool') {\n            continue;\n        }\n        \n        const new_chat = {\n            id: message_id,\n            parentId: lastId,\n            role: role !== 'user' ? 'assistant' : 'user',\n            content: extractOpenAIMessageContent(message['message']),\n            model,\n            done: true\n        };\n    }\n    return { messages, id, title, timestamp };\n};\n```\n\n资料来源：[src/lib/utils/index.ts:220-260]()\n\n### Validation\n\nImported chats undergo validation:\n\n```typescript\nconst validateChat = (chat) => {\n    const messages = chat.messages;\n    if (messages.length === 0) return false;\n    \n    for (const message of messages) {\n        if (typeof message.content !== 'string') {\n            return false;\n        }\n    }\n    return true;\n};\n```\n\n## API Integration\n\n### Base URLs Configuration\n\nThe frontend configures API endpoints in `src/lib/constants.ts`:\n\n| Constant | Value | Purpose |\n|----------|-------|---------|\n| `OLLAMA_API_BASE_URL` | `/ollama` | Ollama model API |\n| `OPENAI_API_BASE_URL` | `/openai` | OpenAI-compatible API |\n| `AUDIO_API_BASE_URL` | `/api/v1/audio` | Audio processing |\n| `IMAGES_API_BASE_URL` | `/api/v1/images` | Image generation |\n| `RETRIEVAL_API_BASE_URL` | `/api/v1/retrieval` | RAG/retrieval |\n\n资料来源：[src/lib/constants.ts:7-15]()\n\n### WebUI API Configuration\n\n```typescript\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\n```\n\n## Message Queue Management\n\nThe system handles concurrent chat requests through queue management:\n\n```typescript\nexport const chatRequestQueues: Writable<\n    Record<string, { id: string; prompt: string; files: any[] }[]>\n> = writable({});\n```\n\nThis allows multiple concurrent conversations with queued message processing per chat ID.\n\n## Supported File Types\n\nThe chat interface supports file attachments for context:\n\n| Category | MIME Types |\n|----------|------------|\n| Documents | PDF, EPUB, DOCX, TXT, CSV, XML |\n| Code | Python, CSS, JavaScript, HTML |\n| Audio | MPEG, WAV, OGG |\n| Images | PNG, JPEG, GIF, WebP |\n| Archives | ZIP |\n\n资料来源：[src/lib/constants.ts:20-30]()\n\n## User Interface Components\n\n### Chat Component Hierarchy\n\n```mermaid\ngraph TD\n    Chat[Chat.svelte] --> Navbar[Navbar.svelte]\n    Chat --> MessageInput[MessageInput.svelte]\n    Chat --> Messages[Messages Container]\n    Messages --> Message[Message.svelte]\n    Message --> Markdown[Markdown.svelte]\n    Message --> ModelSelector[ModelSelector.svelte]\n```\n\n### Sidebar and Navigation\n\n| Component | Store | Purpose |\n|-----------|-------|---------|\n| Sidebar | `showSidebar` | Navigation and chat list |\n| Search | `showSearch` | Search across chats |\n| Archived Chats | `showArchivedChats` | View archived conversations |\n| Folders | `folders`, `selectedFolder` | Organize chats |\n\n## Theming and Styling\n\nThe chat interface supports dynamic theming:\n\n```typescript\nexport const theme = writable('system');\n```\n\nTheme application is handled via CSS variables and the `html.dark` class:\n\n```css\nhtml.dark #splash-screen {\n    background: #000;\n}\n\nhtml.her #splash-screen {\n    background: #983724;\n}\n```\n\n资料来源：[src/app.html:1-30]()\n\n## Summary\n\nThe Chat Interface in Open WebUI represents a sophisticated integration of frontend Svelte components and Python backend services. Key architectural elements include:\n\n1. **Reactive State Management** - Svelte stores maintain real-time UI and chat state\n2. **Message Processing Pipeline** - Content sanitization, language-specific processing, and format conversion\n3. **Multi-Model Support** - Configurable model selection and parallel conversation capabilities\n4. **Code Execution** - Integrated code interpreter with Pyodide and Jupyter support\n5. **Rich Content Rendering** - Markdown, code blocks, images, and audio\n6. **Import/Export** - OpenAI chat format compatibility\n7. **Voice Integration** - TTS and voice input capabilities\n\nThe modular design allows each component to be independently configured while maintaining a cohesive user experience across different interaction modes.\n\n---\n\n<a id='ollama-integration'></a>\n\n## Ollama Integration\n\n### 相关页面\n\n相关主题：[RAG Pipeline](#rag-pipeline), [Chat Interface](#chat-interface)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/routers/ollama.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n- [backend/open_webui/routers/openai.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/openai.py)\n- [backend/open_webui/utils/models.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/utils/models.py)\n- [src/lib/apis/ollama/index.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/ollama/index.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n</details>\n\n# Ollama Integration\n\n## Overview\n\nThe Ollama Integration is a core component of Open WebUI that enables seamless communication between the frontend application and local Ollama instances. This integration provides a unified interface for managing, accessing, and interacting with LLM models hosted locally through Ollama, supporting both native Ollama API calls and OpenAI-compatible endpoints.\n\nOllama serves as the primary backend inference engine for Open WebUI, allowing users to run large language models entirely on their local hardware without relying on cloud-based services.\n\n## Architecture Overview\n\nThe Ollama Integration follows a proxy pattern where the backend server acts as an intermediary, forwarding requests from the frontend to Ollama instances while applying access controls, model routing, and API transformations.\n\n```mermaid\ngraph TD\n    subgraph Frontend[\"Frontend (Svelte)\"]\n        UI[User Interface]\n        API_CLIENT[API Client<br/>src/lib/apis/ollama/index.ts]\n    end\n\n    subgraph Backend[\"Backend Server (Python/FastAPI)\"]\n        OLLAMA_ROUTER[Ollama Router<br/>routers/ollama.py]\n        OPENAI_ROUTER[OpenAI Router<br/>routers/openai.py]\n        MODEL_UTILS[Model Utilities<br/>utils/models.py]\n        CONFIG[Configuration<br/>config.py]\n    end\n\n    subgraph OllamaInstances[\"Ollama Instances\"]\n        OLLAMA_LOCAL[Local Ollama<br/>localhost:11434]\n        OLLAMA_CUSTOM[Custom Ollama<br/>Configured URLs]\n    end\n\n    UI --> API_CLIENT\n    API_CLIENT -->|HTTP Requests| OLLAMA_ROUTER\n    API_CLIENT -->|OpenAI-compatible| OPENAI_ROUTER\n    OLLAMA_ROUTER --> MODEL_UTILS\n    OLLAMA_ROUTER --> CONFIG\n    OLLAMA_ROUTER -->|Native API| OLLAMA_LOCAL\n    OLLAMA_ROUTER -->|Native API| OLLAMA_CUSTOM\n    OPENAI_ROUTER -->|v1/chat/completions| OLLAMA_LOCAL\n    OPENAI_ROUTER -->|v1/chat/completions| OLLAMA_CUSTOM\n\n    style Frontend fill:#e1f5fe\n    style Backend fill:#f3e5f5\n    style OllamaInstances fill:#fff3e0\n```\n\n## Core Components\n\n### Backend Router (routers/ollama.py)\n\nThe Ollama router (`backend/open_webui/routers/ollama.py`) handles all native Ollama API operations. It provides endpoints for model management, chat completions, and model operations.\n\n**Primary Endpoints:**\n\n| Endpoint | Method | Description |\n|----------|--------|-------------|\n| `/api/chat` | POST | Send chat completion requests |\n| `/api/generate` | POST | Generate text with model |\n| `/api/tags` | GET | List available models |\n| `/api/pull` | POST | Pull a new model |\n| `/api/push` | POST | Push a model to registry |\n| `/api/delete` | DELETE | Delete a model |\n| `/api/create` | POST | Create a new model |\n| `/config` | GET/POST | Get/update Ollama configuration |\n| `/verify` | POST | Verify connection to Ollama |\n| `/v1/chat/completions` | POST | OpenAI-compatible chat endpoint |\n| `/v1/models` | GET | OpenAI-compatible models list |\n| `/v1/messages` | POST | Anthropic-compatible messages endpoint |\n| `/v1/responses` | POST | Ollama Responses API endpoint |\n\n资料来源：[backend/open_webui/routers/ollama.py:1-500](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n### Model Resolution and URL Selection\n\nThe system supports multiple Ollama instances through a URL index system. When a request is made, the router resolves the appropriate Ollama instance based on model configuration.\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Router\n    participant Config\n    participant Ollama\n    \n    Client->>Router: POST /api/chat {model: \"llama2\"}\n    Router->>Config: get_ollama_url(model, url_idx)\n    Config->>Config: Check model-to-URL mapping\n    Config->>Config: Check url_idx or default\n    Config-->>Router: (url, url_idx)\n    Router->>Ollama: Forward request to url\n    Ollama-->>Router: Response\n    Router-->>Client: Forwarded response\n```\n\nThe `get_ollama_url` function performs the following resolution logic:\n\n1. If `url_idx` is provided, use the corresponding URL from `OLLAMA_BASE_URLS`\n2. Check model-specific URL mappings stored in `OLLAMA_MODELS`\n3. Fall back to the primary `OLLAMA_BASE_URL`\n\n资料来源：[backend/open_webui/routers/ollama.py:100-200](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n### Prefix ID Handling\n\nFor multi-tenant deployments, the system supports `prefix_id` configuration. When a prefix is configured, model names are automatically transformed:\n\n```python\nprefix_id = api_config.get('prefix_id', None)\nif prefix_id:\n    payload['model'] = payload['model'].replace(f'{prefix_id}.', '')\n```\n\nThis allows users to use short model names (e.g., `llama2`) while the backend automatically transforms them to prefixed names (e.g., `tenant1.llama2`) for the Ollama API.\n\n资料来源：[backend/open_webui/routers/ollama.py:200-220](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Configuration\n\n### Environment Variables\n\nThe Ollama integration is configured through environment variables in `backend/open_webui/config.py`:\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `ENABLE_OLLAMA_API` | `True` | Enable/disable Ollama API |\n| `OLLAMA_API_BASE_URL` | `http://localhost:11434/api` | Primary Ollama API URL |\n| `OLLAMA_BASE_URL` | Auto-derived | Base URL for Ollama connections |\n| `USE_OLLAMA_DOCKER` | `false` | Use all-in-one Docker container |\n| `K8S_FLAG` | Empty | Kubernetes deployment flag |\n\n```python\nENABLE_OLLAMA_API = PersistentConfig(\n    'ENABLE_OLLAMA_API',\n    'ollama.enable',\n    os.environ.get('ENABLE_OLLAMA_API', 'True').lower() == 'true',\n)\n\nOLLAMA_API_BASE_URL = os.environ.get('OLLAMA_API_BASE_URL', 'http://localhost:11434/api')\n```\n\n资料来源：[backend/open_webui/config.py:1-100](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n\n### Port Fallback Resolution\n\nThe configuration includes automatic port fallback logic for environments where the default Ollama port (11434) might be blocked:\n\n```python\ndef _resolve_ollama_base_url(url: str) -> str:\n    \"\"\"If the default Ollama port (11434) is unreachable, try the fallback port (12434).\"\"\"\n    # Checks port 11434 first, then falls back to 12434 if unreachable\n```\n\nThis enables seamless operation in environments like certain corporate networks or containerized setups where only specific ports are accessible.\n\n资料来源：[backend/open_webui/config.py:50-80](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n\n### Docker and Kubernetes Handling\n\nThe configuration adapts to different deployment scenarios:\n\n```python\nif OLLAMA_BASE_URL == '/ollama' and not K8S_FLAG:\n    if USE_OLLAMA_DOCKER.lower() == 'true':\n        OLLAMA_BASE_URL = 'http://localhost:11434'\n    else:\n        OLLAMA_BASE_URL = 'http://host.docker.internal:11434'\nelif K8S_FLAG:\n    OLLAMA_BASE_URL = 'http://ollama-service.open-webui.svc.cluster.local:11434'\n```\n\n资料来源：[backend/open_webui/config.py:40-50](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n\n## API Compatibility Layers\n\n### OpenAI-Compatible API\n\nThe Ollama router provides OpenAI-compatible endpoints that translate requests to Ollama's API format:\n\n**Endpoint:** `POST /ollama/v1/chat/completions`\n\nThe system transforms OpenAI-format requests into Ollama-native format:\n\n```python\npayload = apply_model_params_to_body_openai(params, payload)\npayload = await apply_system_prompt_to_body(system, payload, metadata, user)\n```\n\nThis transformation includes:\n- Converting OpenAI parameter names to Ollama format\n- Applying model-specific parameter modifications\n- Injecting system prompts from user metadata\n\n资料来源：[backend/open_webui/routers/ollama.py:150-180](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n### Anthropic-Compatible API\n\nSupport for Anthropic's `/v1/messages` endpoint is provided through the Responses API:\n\n**Endpoint:** `POST /ollama/v1/messages`\n\n```python\n@router.post('/v1/messages')\nasync def generate_anthropic_messages(\n    request: Request,\n    form_data: dict,\n    url_idx: Optional[int] = None,\n    user=Depends(get_verified_user),\n):\n    \"\"\"\n    Proxy for Ollama's Anthropic-compatible /v1/messages endpoint.\n    Forwards the request as-is to the Ollama backend.\n    \"\"\"\n```\n\nThe request is forwarded to Ollama's `/v1/responses` endpoint with appropriate streaming headers.\n\n资料来源：[backend/open_webui/routers/ollama.py:250-280](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Frontend Integration\n\n### API Client (src/lib/apis/ollama/index.ts)\n\nThe frontend provides a TypeScript API client for communicating with the backend Ollama proxy:\n\n**Key Functions:**\n\n| Function | Purpose |\n|----------|---------|\n| `deleteModel()` | Delete a model from Ollama |\n| `pullModel()` | Pull a new model with progress tracking |\n| `verifyOllamaConnection()` | Test connectivity to Ollama instance |\n| `getOllamaConfig()` | Retrieve current Ollama configuration |\n\n```typescript\nexport const pullModel = async (token: string, tagName: string, urlIdx: number | null = null) => {\n    const controller = new AbortController();\n    const res = await fetch(\n        `${OLLAMA_API_BASE_URL}/api/pull${urlIdx !== null ? `/${urlIdx}` : ''}`,\n        {\n            signal: controller.signal,\n            method: 'POST',\n            headers: {\n                'Content-Type': 'application/json',\n                Authorization: `Bearer ${token}`\n            },\n            body: JSON.stringify({ name: tagName })\n        }\n    );\n    return res;\n};\n```\n\n资料来源：[src/lib/apis/ollama/index.ts:1-150](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/ollama/index.ts)\n\n### API Base URL Configuration\n\nFrontend constants define the base URLs for API communication:\n\n```typescript\nexport const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama`;\nexport const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;\n```\n\nThe system automatically configures the base URL based on environment:\n- **Development:** `http://hostname:8080`\n- **Production:** Uses the configured domain\n\n资料来源：[src/lib/constants.ts:1-30](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n\n## Request Flow\n\n```mermaid\ngraph LR\n    A[User Request] --> B[Frontend API Client]\n    B --> C[Backend Router]\n    \n    C --> D{Request Type?}\n    \n    D -->|Native Ollama| E[Native API Handler]\n    D -->|OpenAI Format| F[OpenAI-Compatible Handler]\n    D -->|Anthropic Format| G[Anthropic-Compatible Handler]\n    \n    E --> H[Model Resolution]\n    F --> H\n    G --> H\n    \n    H --> I[Access Control Check]\n    I --> J{Model Access Allowed?}\n    \n    J -->|Yes| K[Forward to Ollama]\n    J -->|No| L[HTTP 403 Forbidden]\n    \n    K --> M[Ollama Instance]\n    M --> N[Response]\n    N --> O[Stream/Return to Client]\n```\n\n## Model Management\n\n### Model Registration\n\nModels discovered from Ollama instances are registered in the application state:\n\n```python\napp.state.OLLAMA_MODELS = {}\n```\n\nEach model entry contains:\n- `urls`: Array of Ollama instance URLs where the model is available\n- `details`: Model metadata (size, capabilities, etc.)\n\n资料来源：[backend/open_webui/main.py:100-120](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n\n### Model Access Control\n\nBefore forwarding requests, the system checks user access permissions:\n\n```python\nmodel_info = await Models.get_model_by_id(model_id)\nif model_info:\n    if model_info.base_model_id:\n        payload['model'] = model_info.base_model_id\n    await check_model_access(user, model_info)\nelse:\n    await check_model_access(user, None)\n```\n\n资料来源：[backend/open_webui/routers/ollama.py:130-150](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Error Handling\n\n### Connection Verification\n\nThe system provides a connection verification endpoint for testing Ollama connectivity:\n\n```typescript\nexport const verifyOllamaConnection = async (token: string = '', connection: dict = {}) => {\n    const res = await fetch(`${OLLAMA_API_BASE_URL}/verify`, {\n        method: 'POST',\n        headers: {\n            Authorization: `Bearer ${token}`,\n            'Content-Type': 'application/json'\n        },\n        body: JSON.stringify({ ...connection })\n    });\n    return res;\n};\n```\n\n资料来源：[src/lib/apis/ollama/index.ts:150-180](https://github.com/open-webui/open-webui/blob/main/src/lib/apis/ollama/index.ts)\n\n### Error Messages\n\nCommon error scenarios include:\n\n| Scenario | HTTP Status | Error Message |\n|----------|-------------|---------------|\n| Ollama API Disabled | 503 | `OLLAMA_API_DISABLED` |\n| Model Not Found | 400 | `MODEL_NOT_FOUND` |\n| Network Problem | Various | `Ollama: Network Problem` |\n| Invalid Config | 500 | `DEFAULT(e)` |\n\n资料来源：[backend/open_webui/routers/ollama.py:50-80](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/ollama.py)\n\n## Summary\n\nThe Ollama Integration provides a robust, flexible bridge between Open WebUI and local Ollama instances. Key features include:\n\n- **Multi-instance support** through URL indexing\n- **API compatibility layers** for OpenAI and Anthropic formats\n- **Automatic port fallback** for network flexibility\n- **Access control** integration for model permissions\n- **Prefix-based multi-tenancy** support\n- **Streaming support** for real-time responses\n- **Docker and Kubernetes** deployment optimizations\n\nThis integration enables users to run powerful LLM models entirely locally while maintaining a modern, feature-rich web interface for interaction.\n\n---\n\n<a id='rag-pipeline'></a>\n\n## RAG Pipeline\n\n### 相关页面\n\n相关主题：[Ollama Integration](#ollama-integration), [Retrieval System](#retrieval-system)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [backend/open_webui/routers/knowledge.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/routers/knowledge.py)\n- [backend/open_webui/retrieval/utils.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/retrieval/utils.py)\n- [backend/open_webui/main.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/main.py)\n- [src/lib/constants.ts](https://github.com/open-webui/open-webui/blob/main/src/lib/constants.ts)\n- [backend/open_webui/config.py](https://github.com/open-webui/open-webui/blob/main/backend/open_webui/config.py)\n</details>\n\n# RAG Pipeline\n\nRetrieval-Augmented Generation (RAG) Pipeline in Open WebUI enables users to upload documents, process them into searchable vector embeddings, and augment LLM responses with relevant context from user knowledge bases.\n\n## Architecture Overview\n\nThe RAG Pipeline consists of multiple integrated components that work together to provide document retrieval and context augmentation capabilities.\n\n```mermaid\ngraph TD\n    A[User Upload] --> B[Document Processing]\n    B --> C[Text Extraction]\n    C --> D[Chunking]\n    D --> E[Embedding Generation]\n    E --> F[Vector Storage]\n    F --> G[Retrieval]\n    G --> H[Context Injection]\n    H --> I[LLM Response]\n    \n    J[Knowledge Management] --> K[Access Control]\n    K --> F\n```\n\n### Core Components\n\n| Component | Location | Purpose |\n|-----------|----------|---------|\n| Knowledge Router | `backend/open_webui/routers/knowledge.py` | REST API endpoints for knowledge management |\n| Retrieval Utils | `backend/open_webui/retrieval/utils.py` | Document loading and text extraction |\n| API Router | `backend/open_webui/main.py` | Registers retrieval endpoints |\n| Frontend | `src/lib/components/workspace/Knowledge.svelte` | UI for knowledge management |\n\n资料来源：[backend/open_webui/main.py:17-30]()\n\n## Supported Document Types\n\nOpen WebUI supports a wide range of document formats through configurable document loaders.\n\n```typescript\n// src/lib/constants.ts\nexport const SUPPORTED_FILE_TYPE = [\n    'application/epub+zip',\n    'application/pdf',\n    'text/plain',\n    'text/csv',\n    'text/xml',\n    'text/html',\n    'text/x-python',\n    'text/css',\n    'application/vnd.openxmlformats-officedocument.wordprocessingml.document',\n    'application/octet-stream',\n    'application/x-javascript',\n    'text/markdown',\n    'audio/mpeg',\n    'audio/wav',\n    'video/mp4',\n    'video/mpeg'\n];\n```\n\n资料来源：[src/lib/constants.ts:16-30]()\n\n## Document Processing Pipeline\n\n### Text Extraction\n\nThe retrieval utility module handles document parsing through multiple backends:\n\n```python\n# backend/open_webui/retrieval/utils.py\ndef _extract_text_from_binary_response(request, response, url):\n    \"\"\"Download response body to a temp file and extract text using the Loader pipeline.\"\"\"\n    import mimetypes\n    import tempfile\n    import urllib.parse\n```\n\n### Supported Document Loaders\n\n| Loader | Purpose | Configuration |\n|--------|---------|---------------|\n| TIKA Server | Apache Tika for generic document parsing | `TIKA_SERVER_URL` |\n| DOCLING | Advanced PDF and document processing | `DOCLING_SERVER_URL`, `DOCLING_API_KEY` |\n| PDF Loader | Configurable PDF extraction | `PDF_LOADER_MODE`, `PDF_EXTRACT_IMAGES` |\n| Document Intelligence | Azure AI document analysis | `DOCUMENT_INTELLIGENCE_ENDPOINT` |\n| Mistral OCR | OCR for scanned documents | `MISTRAL_OCR_API_BASE_URL` |\n| PaddleOCR VL | Visual language OCR | `PADDLEOCR_VL_BASE_URL` |\n| MinerU | Chinese document processing | `MINERU_API_MODE`, `MINERU_API_URL` |\n\n资料来源：[backend/open_webui/retrieval/utils.py:1-25]()\n\n## Knowledge Management API\n\n### Endpoints Overview\n\nThe knowledge router provides CRUD operations for managing user knowledge bases.\n\n```\n/api/v1/knowledge          - List and create knowledge bases\n/api/v1/knowledge/{id}    - Get, update, delete specific knowledge\n/api/v1/knowledge/{id}/file/add     - Add file to knowledge base\n/api/v1/knowledge/{id}/search       - Search within knowledge base\n```\n\n### Access Control\n\nKnowledge resources are protected by role-based access control:\n\n```python\n# backend/open_webui/routers/knowledge.py\nif not (\n    user.role == 'admin'\n    or knowledge.user_id == user.id\n    or await AccessGrants.has_access(\n        user_id=user.id,\n        resource_type='knowledge',\n        resource_id=knowledge.id,\n        permission='read',\n        db=db,\n    )\n):\n    raise HTTPException(\n        status_code=status.HTTP_400_BAD_REQUEST,\n        detail=ERROR_MESSAGES.ACCESS_PROHIBITED,\n    )\n```\n\n资料来源：[backend/open_webui/routers/knowledge.py:40-55]()\n\n### Search Functionality\n\nThe search endpoint supports pagination and filtering:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `page` | int | 1 | Page number (minimum 1) |\n| `query` | string | - | Search query text |\n| `view_option` | string | - | Filter option |\n| `order_by` | string | - | Sort field |\n| `direction` | string | - | Sort direction |\n\n```python\npage = max(page, 1)\nlimit = 30\nskip = (page - 1) * limit\n\nfilter = {}\nif query:\n    filter['query'] = query\n```\n\n资料来源：[backend/open_webui/routers/knowledge.py:57-70]()\n\n## Configuration Options\n\n### Environment Variables\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `EXTERNAL_DOCUMENT_LOADER_URL` | - | External document loader endpoint |\n| `EXTERNAL_DOCUMENT_LOADER_API_KEY` | - | API key for external loader |\n| `TIKA_SERVER_URL` | - | Apache Tika server URL |\n| `DOCLING_SERVER_URL` | - | Docling server endpoint |\n| `DOCLING_API_KEY` | - | Docling API authentication |\n| `PDF_LOADER_MODE` | - | PDF extraction mode |\n| `PDF_EXTRACT_IMAGES` | - | Enable image extraction from PDFs |\n| `DOCUMENT_INTELLIGENCE_ENDPOINT` | - | Azure AI endpoint |\n| `DOCUMENT_INTELLIGENCE_KEY` | - | Azure AI API key |\n| `MISTRAL_OCR_API_BASE_URL` | - | Mistral OCR service URL |\n| `MISTRAL_OCR_API_KEY` | - | Mistral OCR authentication |\n| `PADDLEOCR_VL_BASE_URL` | - | PaddleOCR endpoint |\n| `PADDLEOCR_VL_TOKEN` | - | PaddleOCR token |\n| `MINERU_API_MODE` | - | MinerU processing mode |\n| `MINERU_API_URL` | - | MinerU API endpoint |\n| `MINERU_API_KEY` | - | MinerU API key |\n| `MINERU_API_TIMEOUT` | - | MinerU request timeout |\n\n资料来源：[backend/open_webui/config.py:1-25]()\n\n## Data Flow\n\n```mermaid\nsequenceDiagram\n    participant U as User\n    participant F as Frontend\n    participant API as Knowledge API\n    participant DL as Document Loader\n    participant VC as Vector Cache\n    participant LLM as LLM\n    \n    U->>F: Upload Document\n    F->>API: POST /api/v1/knowledge/{id}/file/add\n    API->>DL: Extract Text\n    DL-->>API: Raw Text Content\n    API->>VC: Generate Embeddings\n    VC-->>API: Vector Embeddings\n    API-->>F: Success Response\n    \n    U->>F: Query with RAG\n    F->>API: POST /api/v1/retrieval\n    API->>VC: Search Vectors\n    VC-->>API: Relevant Chunks\n    API-->>F: Augmented Context\n    F->>LLM: Prompt + Context\n    LLM-->>U: Generated Response\n```\n\n## Frontend Integration\n\nThe knowledge management interface is implemented as a Svelte component:\n\n- Location: `src/lib/components/workspace/Knowledge.svelte`\n- Provides file upload, management, and search UI\n- Communicates with backend via REST API\n\n### API Base URLs\n\n```typescript\n// src/lib/constants.ts\nexport const RETRIEVAL_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/retrieval`;\nexport const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/audio`;\nexport const IMAGES_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1/images`;\n```\n\n资料来源：[src/lib/constants.ts:9-13]()\n\n## Dependencies\n\nKey Python packages for RAG functionality:\n\n| Package | Version | Purpose |\n|---------|---------|---------|\n| `sqlalchemy` | 2.0.48 | Database ORM |\n| `requests` | 2.33.1 | HTTP client |\n| `httpx` | 0.28.1 | Async HTTP with HTTP/2 support |\n| `aiofiles` | - | Async file operations |\n| `redis` | - | Vector caching |\n| `pycrdt` | 0.12.47 | CRDT operations |\n\n资料来源：[backend/requirements-min.txt:1-35]()\n\n## Error Handling\n\nThe system uses centralized error messages:\n\n```python\nERROR_MESSAGES.NOT_FOUND = \"Knowledge base not found\"\nERROR_MESSAGES.ACCESS_PROHIBITED = \"Access denied to this knowledge base\"\n```\n\n## Best Practices\n\n1. **Document Preparation**: Use supported formats for optimal extraction quality\n2. **Chunking Strategy**: Configure appropriate chunk sizes based on use case\n3. **Access Control**: Leverage RBAC to protect sensitive knowledge bases\n4. **Loader Selection**: Choose appropriate document loader based on document complexity\n5. **Resource Management**: Monitor vector storage size for large knowledge bases\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：open-webui/open-webui\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:701547123 | https://github.com/open-webui/open-webui | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | release_recency=unknown\n\n<!-- canonical_name: open-webui/open-webui; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：open-webui/open-webui\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:701547123 | https://github.com/open-webui/open-webui | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:701547123 | https://github.com/open-webui/open-webui | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:701547123 | https://github.com/open-webui/open-webui | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# open-webui - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for open-webui/open-webui.\n\nProject:\n- Name: open-webui\n- Repository: https://github.com/open-webui/open-webui\n- Summary: User-friendly AI Interface (Supports Ollama, OpenAI API, ...)\n- Host target: mcp_host, chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: User-friendly AI Interface (Supports Ollama, OpenAI API, ...)\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: User-friendly AI Interface (Supports Ollama, OpenAI API, ...)\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. project-introduction: Project Introduction. Produce one small intermediate artifact and wait for confirmation.\n2. installation-guide: Installation Guide. Produce one small intermediate artifact and wait for confirmation.\n3. architecture-overview: Architecture Overview. Produce one small intermediate artifact and wait for confirmation.\n4. data-models: Data Models. Produce one small intermediate artifact and wait for confirmation.\n5. api-routers: API Routers. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/open-webui/open-webui\n- https://github.com/open-webui/open-webui#readme\n- README.md\n- pyproject.toml\n- backend/open_webui/__init__.py\n- Dockerfile\n- docker-compose.yaml\n- .env.example\n- backend/start.sh\n- run.sh\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：open-webui/open-webui\n\n## 官方安装入口\n\n### Python / pip · 官方安装入口\n\n```bash\npip install open-webui\n```\n\n来源：https://github.com/open-webui/open-webui#readme\n\n## 来源\n\n- repo: https://github.com/open-webui/open-webui\n- docs: https://github.com/open-webui/open-webui#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_e2f0e782f2844612b65d669f75051cd6"
}
