{
  "canonical_name": "langchain-ai/langchain",
  "compilation_id": "pack_df941959176045fe822a0b10f3b8afc0",
  "created_at": "2026-05-15T22:17:43.355822+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=skill, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=skill, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `pip install langchain` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "pip install langchain",
      "sandbox_container_image": "python:3.12-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "llm_execute_isolated_install",
      "sandbox_validation_id": "sbx_af2dd482acd74d9294c553929d463dae"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_2782ce8124d3d7fbf783fa355fb00245",
    "canonical_name": "langchain-ai/langchain",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/langchain-ai/langchain",
    "slug": "langchain",
    "source_packet_id": "phit_8b5880d098b1485cbd91e3d9539efbfe",
    "source_validation_id": "dval_e8be47b3d404457fa44753c525c9108f"
  },
  "merchandising": {
    "best_for": "需要信息检索与知识管理能力，并使用 chatgpt的用户",
    "github_forks": 22522,
    "github_stars": 136252,
    "one_liner_en": "The agent engineering platform. Available in TypeScript!",
    "one_liner_zh": "The agent engineering platform. Available in TypeScript!",
    "primary_category": {
      "category_id": "research-knowledge",
      "confidence": "high",
      "name_en": "Research & Knowledge",
      "name_zh": "信息检索与知识管理",
      "reason": "curated popular coverage category matched project identity"
    },
    "target_user": "使用 chatgpt 等宿主 AI 的用户",
    "title_en": "langchain",
    "title_zh": "langchain 能力包",
    "visible_tags": [
      {
        "label_en": "Browser Agents",
        "label_zh": "浏览器 Agent",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-browser-agents",
        "type": "product_domain"
      },
      {
        "label_en": "Web Task Automation",
        "label_zh": "网页任务自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-web-task-automation",
        "type": "user_job"
      },
      {
        "label_en": "Structured Data Extraction",
        "label_zh": "结构化数据提取",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-structured-data-extraction",
        "type": "core_capability"
      },
      {
        "label_en": "Node-based Workflow",
        "label_zh": "节点式流程编排",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-node-based-workflow",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Open Source Tool",
        "label_zh": "开源工具",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-open-source-tool",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_8b5880d098b1485cbd91e3d9539efbfe",
  "page_model": {
    "artifacts": {
      "artifact_slug": "langchain",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "pip install langchain",
          "label": "Python / pip · 官方安装入口",
          "source": "https://github.com/langchain-ai/langchain#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "结构化数据提取",
        "节点式流程编排",
        "开源工具"
      ],
      "eyebrow": "信息检索与知识管理",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要信息检索与知识管理能力，并使用 chatgpt的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "The agent engineering platform. Available in TypeScript!"
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "chatgpt",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "skill, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:552661142 | https://github.com/langchain-ai/langchain | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "downstream_validation.risk_items | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "进入安全/权限治理复核队列。",
            "title": "下游验证发现风险项",
            "user_impact": "下游已经要求复核，不能在页面中弱化。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "risks.scoring_risks | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "把风险写入边界卡，并确认是否需要人工复核。",
            "title": "存在评分风险",
            "user_impact": "风险会影响是否适合普通用户安装。"
          },
          {
            "body": "issue_or_pr_quality=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | issue_or_pr_quality=unknown"
            ],
            "severity": "low",
            "suggested_check": "抽样最近 issue/PR，判断是否长期无人处理。",
            "title": "issue/PR 响应质量未知",
            "user_impact": "用户无法判断遇到问题后是否有人维护。"
          },
          {
            "body": "release_recency=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | release_recency=unknown"
            ],
            "severity": "low",
            "suggested_check": "确认最近 release/tag 和 README 安装命令是否一致。",
            "title": "发布节奏不明确",
            "user_impact": "安装命令和文档可能落后于代码，用户踩坑概率升高。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 3681,
        "forks": 22522,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 136252
      },
      "source_url": "https://github.com/langchain-ai/langchain",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "The agent engineering platform. Available in TypeScript!",
      "title": "langchain 能力包",
      "trial_prompt": "# langchain - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for langchain-ai/langchain.\n\nProject:\n- Name: langchain\n- Repository: https://github.com/langchain-ai/langchain\n- Summary: The agent engineering platform. Available in TypeScript!\n- Host target: chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: The agent engineering platform. Available in TypeScript!\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: The agent engineering platform. Available in TypeScript!\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. introduction: Introduction to LangChain. Produce one small intermediate artifact and wait for confirmation.\n2. getting-started: Getting Started with LangChain. Produce one small intermediate artifact and wait for confirmation.\n3. runnable-execution: Runnable and Execution Model. Produce one small intermediate artifact and wait for confirmation.\n4. messages-system: Messages and Prompt System. Produce one small intermediate artifact and wait for confirmation.\n5. chat-models-embeddings: Chat Models and Embeddings. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/langchain-ai/langchain\n- https://github.com/langchain-ai/langchain#readme\n- README.md\n- libs/core/langchain_core/__init__.py\n- libs/langchain/langchain_classic/__init__.py\n- libs/core/langchain_core/language_models/chat_models.py\n- libs/core/langchain_core/runnables/base.py\n- libs/core/langchain_core/runnables/config.py\n- libs/core/langchain_core/runnables/retry.py\n- libs/core/langchain_core/runnables/branch.py\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: [Integration] MINT Protocol - Agents earn crypto for execution（https://github.com/langchain-ai/langchain/issues/34979）；github/github_issue: Feature Request: Payment primitive integration — x402 payment layer for （https://github.com/langchain-ai/langchain/issues/36306）；github/github_issue: Cryptographic agent identity, intent verification, and kill switch for p（https://github.com/langchain-ai/langchain/issues/36232）；github/github_issue: Progress-aware termination: detect no-progress loops in agent tool execu（https://github.com/langchain-ai/langchain/issues/36139）；github/github_issue: Harmony: bad request on gpt-oss-120b and tool calls with `create_react_a（https://github.com/langchain-ai/langchain/issues/32885）；github/github_issue: Feature: callback handler for AI agent threat detection (Agent Threat Ru（https://github.com/langchain-ai/langchain/issues/37314）；github/github_issue: Feature: callback handler for AI agent threat detection (Agent Threat Ru（https://github.com/langchain-ai/langchain/issues/37313）；github/github_issue: Bug: SSRF bypass in validate_safe_url when LANGCHAIN_ENV=local_test（https://github.com/langchain-ai/langchain/issues/37297）；github/github_issue: create_agent Does Not Handle invalid_tool_calls from JSON Parsing Errors（https://github.com/langchain-ai/langchain/issues/33504）；github/github_issue: Easier multimodal tool（https://github.com/langchain-ai/langchain/issues/37312）；github/github_issue: Schema class for multimodal message（https://github.com/langchain-ai/langchain/issues/37311）；github/github_issue: Security: OWASP Agent Memory Guard – protect against memory poisoning at（https://github.com/langchain-ai/langchain/issues/37310）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "[Integration] MINT Protocol - Agents earn crypto for execution",
              "url": "https://github.com/langchain-ai/langchain/issues/34979"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Feature Request: Payment primitive integration — x402 payment layer for ",
              "url": "https://github.com/langchain-ai/langchain/issues/36306"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Cryptographic agent identity, intent verification, and kill switch for p",
              "url": "https://github.com/langchain-ai/langchain/issues/36232"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Progress-aware termination: detect no-progress loops in agent tool execu",
              "url": "https://github.com/langchain-ai/langchain/issues/36139"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Harmony: bad request on gpt-oss-120b and tool calls with `create_react_a",
              "url": "https://github.com/langchain-ai/langchain/issues/32885"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Feature: callback handler for AI agent threat detection (Agent Threat Ru",
              "url": "https://github.com/langchain-ai/langchain/issues/37314"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Feature: callback handler for AI agent threat detection (Agent Threat Ru",
              "url": "https://github.com/langchain-ai/langchain/issues/37313"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Bug: SSRF bypass in validate_safe_url when LANGCHAIN_ENV=local_test",
              "url": "https://github.com/langchain-ai/langchain/issues/37297"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "create_agent Does Not Handle invalid_tool_calls from JSON Parsing Errors",
              "url": "https://github.com/langchain-ai/langchain/issues/33504"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Easier multimodal tool",
              "url": "https://github.com/langchain-ai/langchain/issues/37312"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Schema class for multimodal message",
              "url": "https://github.com/langchain-ai/langchain/issues/37311"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Security: OWASP Agent Memory Guard – protect against memory poisoning at",
              "url": "https://github.com/langchain-ai/langchain/issues/37310"
            }
          ],
          "status": "已收录 13 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "信息检索与知识管理",
      "desc": "The agent engineering platform. Available in TypeScript!",
      "effort": "安装已验证",
      "forks": 22522,
      "icon": "search",
      "name": "langchain 能力包",
      "risk": "可发布",
      "slug": "langchain",
      "stars": 136252,
      "tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "结构化数据提取",
        "节点式流程编排",
        "开源工具"
      ],
      "thumb": "blue",
      "type": "Skill Pack"
    },
    "manual": {
      "markdown": "# https://github.com/langchain-ai/langchain 项目说明书\n\n生成时间：2026-05-15 21:57:00 UTC\n\n## 目录\n\n- [Introduction to LangChain](#introduction)\n- [Getting Started with LangChain](#getting-started)\n- [Runnable and Execution Model](#runnable-execution)\n- [Messages and Prompt System](#messages-system)\n- [Callbacks and Tracing Infrastructure](#callbacks-tracing)\n- [Chat Models and Embeddings](#chat-models-embeddings)\n- [Agents Framework](#agents-framework)\n- [Tools and Toolkits](#tools-integrations)\n- [Document Loaders and Text Processing](#document-loaders)\n- [Vector Stores and Retrievers](#vectorstores-retrievers)\n\n<a id='introduction'></a>\n\n## Introduction to LangChain\n\n### 相关页面\n\n相关主题：[Runnable and Execution Model](#runnable-execution), [Getting Started with LangChain](#getting-started)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n- [libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n- [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md)\n- [libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n- [libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n- [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n- [libs/partners/chroma/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/chroma/README.md)\n</details>\n\n# Introduction to LangChain\n\n## Overview\n\nLangChain is a comprehensive framework designed for building agents and LLM-powered applications. It enables developers to chain together interoperable components and third-party integrations to simplify AI application development, while maintaining flexibility as underlying technologies evolve.\n\nLangChain follows a modular architecture where core functionality is separated into specialized packages. The main `langchain` package serves as the primary interface for most use cases, with partner packages providing dedicated integrations for specific providers.\n\n## Architecture Overview\n\nLangChain's architecture consists of several key layers working together to provide a seamless development experience:\n\n```mermaid\ngraph TD\n    A[Application Layer] --> B[langchain Package]\n    B --> C[langchain-classic]\n    B --> D[langchain-core]\n    C --> E[Chains & Components]\n    C --> F[Community Re-exports]\n    D --> G[Core Abstractions]\n    H[Partner Packages] --> B\n    H --> I[OpenAI, Anthropic, HuggingFace, etc.]\n```\n\n## Core Packages\n\n### langchain-core\n\nThe foundational package containing core abstractions and interfaces that all other LangChain packages depend on. This includes base classes for language models, prompts, callbacks, and other fundamental components.\n\n### langchain-classic\n\nThe classic LangChain package containing chains, `langchain-community` re-exports, indexing API, and deprecated functionality. In most cases, developers should use the main `langchain` package instead.\n\n## Partner Integrations\n\nLangChain provides dedicated partner packages for various LLM providers and services. Each package is independently maintained and optimized for its specific provider.\n\n### LLM Provider Packages\n\n| Package | Provider | Purpose | Install Command |\n|---------|----------|---------|-----------------|\n| `langchain-openai` | OpenAI | OpenAI SDK integration | `pip install langchain-openai` |\n| `langchain-anthropic` | Anthropic | Claude models integration | `pip install langchain-anthropic` |\n| `langchain-huggingface` | Hugging Face | Transformers & embeddings | `pip install langchain-huggingface` |\n| `langchain-ollama` | Ollama | Local model inference | `pip install langchain-ollama` |\n| `langchain-deepseek` | DeepSeek | DeepSeek API integration | `pip install langchain-deepseek` |\n| `langchain-groq` | Groq | Groq API integration | `pip install langchain-groq` |\n| `langchain-fireworks` | Fireworks.ai | Fireworks API integration | `pip install langchain-fireworks` |\n| `langchain-perplexity` | Perplexity | Perplexity API integration | `pip install langchain-perplexity` |\n| `langchain-xai` | xAI | xAI API integration | `pip install langchain-xai` |\n| `langchain-openrouter` | OpenRouter | Unified multi-provider API | `pip install langchain-openrouter` |\n\n### Vector Store & Search Packages\n\n| Package | Provider | Purpose | Install Command |\n|---------|----------|---------|-----------------|\n| `langchain-chroma` | Chroma | Vector database integration | `pip install langchain-chroma` |\n| `langchain-exa` | Exa | Web search API for AI | `pip install langchain-exa` |\n| `langchain-nomic` | Nomic | Embedding & visualization | `pip install langchain-nomic` |\n\n## Quick Start\n\nThe simplest way to get started with LangChain is using the `init_chat_model` function for initializing chat models:\n\n```python\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-5.4\")\nresult = model.invoke(\"Hello, world!\")\n```\n\nFor more advanced customization or agent orchestration, LangChain recommends using [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), their framework for building controllable agent workflows.\n\n## Installation Options\n\nLangChain offers flexible installation options to suit different needs:\n\n```bash\n# Base installation\npip install langchain\n\n# With specific integrations\npip install langchain-huggingface[full]\n```\n\nThe `[full]` extra for packages like `langchain-huggingface` includes additional dependencies such as `sentence-transformers>=5.2.0` and `transformers>=5.0.0` for local inference capabilities.\n\n## Key Features\n\n### Interoperability\n\nLangChain emphasizes interoperability through standardized interfaces. Components can be easily swapped and combined, allowing developers to:\n\n- Switch between different LLM providers without code changes\n- Combine multiple integrations in a single application\n- Future-proof applications against technology changes\n\n### Flexible Abstraction Layers\n\nLangChain provides multiple levels of abstraction:\n\n- **High-level chains**: Quick starts for common patterns\n- **Low-level components**: Fine-grained control for advanced use cases\n\nThe framework grows with your application's complexity, allowing you to start simple and add sophistication as needed.\n\n### Security Considerations\n\nWhen building applications with LangChain, security should be a primary concern. The framework includes built-in protections for components like API chains:\n\n> **Security Note**: The API chain uses the requests toolkit to make `GET`, `POST`, `PATCH`, `PUT`, and `DELETE` requests to an API. Exercise care in who is allowed to use this chain. Control access to who can submit requests and what network access it has.\n\n## Additional Resources\n\nLangChain provides comprehensive documentation and support:\n\n| Resource | URL |\n|----------|-----|\n| Full Documentation | [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) |\n| API Reference | [reference.langchain.com/python](https://reference.langchain.com/python) |\n| Community Forum | [forum.langchain.com](https://forum.langchain.com) |\n| Chat with Docs | [chat.langchain.com](https://chat.langchain.com/) |\n| LangChain Academy | [academy.langchain.com](https://academy.langchain.com/) |\n| Contributing Guide | [docs.langchain.com/contributing](https://docs.langchain.com/oss/python/contributing/overview) |\n\n## Related Projects\n\nFor specialized use cases, LangChain offers additional projects:\n\n- **LangGraph**: Framework for building controllable agent workflows\n- **Deep Agents**: Higher-level package with built-in capabilities for planning, subagents, and file system usage\n- **LangSmith**: Platform for developing, debugging, and deploying AI agents\n- **LangChain.js**: JavaScript/TypeScript equivalent library\n\n## Ecosystem Diagram\n\n```mermaid\ngraph LR\n    A[Developers] --> B[langchain Package]\n    B --> C[Core Abstractions]\n    B --> D[Partner Integrations]\n    D --> E[LLM Providers]\n    D --> F[Vector Stores]\n    D --> G[Search APIs]\n    B --> H[LangGraph]\n    B --> I[LangSmith]\n    C --> J[Chat Models]\n    C --> K[Embeddings]\n    C --> L[Tools]\n    C --> M[Memory]\n```\n\nLangChain continues to evolve with an active open-source community. Contributions are welcome in the form of new features, improved infrastructure, or better documentation.\n\n---\n\n<a id='getting-started'></a>\n\n## Getting Started with LangChain\n\n### 相关页面\n\n相关主题：[Introduction to LangChain](#introduction), [Runnable and Execution Model](#runnable-execution)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n- [libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n- [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md)\n- [libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n- [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n- [libs/partners/deepseek/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/deepseek/README.md)\n- [libs/partners/groq/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/groq/README.md)\n</details>\n\n# Getting Started with LangChain\n\nLangChain is a framework for building agents and LLM-powered applications. It enables developers to chain together interoperable components and third-party integrations to simplify AI application development while future-proofing decisions as the underlying technology evolves. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Installation\n\n### Base Installation\n\nThe core LangChain package can be installed via pip or uv:\n\n```bash\npip install langchain\n# or\nuv add langchain\n```\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n### Integration Packages\n\nLangChain uses a modular architecture with partner-specific packages. Each integration is distributed as a separate package:\n\n| Package | Purpose | Install Command |\n|---------|---------|-----------------|\n| `langchain-openai` | OpenAI models (GPT-4, GPT-5, etc.) | `pip install langchain-openai` |\n| `langchain-anthropic` | Anthropic models (Claude) | `pip install langchain-anthropic` |\n| `langchain-huggingface` | Hugging Face models | `pip install langchain-huggingface` |\n| `langchain-ollama` | Ollama local models | `pip install langchain-ollama` |\n| `langchain-deepseek` | DeepSeek models | `pip install langchain-deepseek` |\n| `langchain-groq` | Groq models | `pip install langchain-groq` |\n\n资料来源：[libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md), [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md), [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md), [libs/partners/deepseek/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/deepseek/README.md), [libs/partners/groq/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/groq/README.md)\n\n### Full Installation with Dependencies\n\nFor packages requiring additional dependencies like `sentence-transformers` or `transformers`:\n\n```bash\npip install langchain-huggingface[full]\n```\n\n> **Note:** The `[full]` extra includes `sentence-transformers>=5.2.0` and `transformers>=5.0.0`. 资料来源：[libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n\n## Quick Start\n\n### Initializing a Chat Model\n\nLangChain provides a unified interface for initializing chat models using `init_chat_model`:\n\n```python\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-5.4\")\nresult = model.invoke(\"Hello, world!\")\n```\n\nThe `init_chat_model` function supports model providers through a prefixed model string format (e.g., `openai:gpt-5.4`, `anthropic:claude-3-opus`). 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n### Supported Providers\n\nLangChain integrates with multiple model providers:\n\n- **OpenAI** - GPT-4, GPT-5, and other OpenAI models through the `openai` SDK 资料来源：[libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n- **Anthropic** - Claude models through Anthropic's API 资料来源：[libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md)\n- **Hugging Face** - Both API-based and local inference via transformers 资料来源：[libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n- **Ollama** - Local model inference 资料来源：[libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n- **DeepSeek** - DeepSeek models 资料来源：[libs/partners/deepseek/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/deepseek/README.md)\n- **Groq** - Groq inference 资料来源：[libs/partners/groq/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/groq/README.md)\n\n## Architecture Overview\n\nLangChain provides flexible abstraction layers allowing developers to work at the level of abstraction that suits their needs:\n\n```mermaid\ngraph TD\n    A[LangChain Application] --> B[High-Level Chains]\n    A --> C[Low-Level Components]\n    B --> D[Agents]\n    B --> E[Chains]\n    C --> F[LLMs]\n    C --> G[Prompts]\n    C --> H[Tools]\n    F --> I[Provider Integrations]\n    I --> J[OpenAI]\n    I --> K[Anthropic]\n    I --> L[HuggingFace]\n    I --> M[Ollama]\n```\n\n- **High-level chains** - Quick starts for common patterns\n- **Low-level components** - Fine-grained control for custom implementations\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Ecosystem\n\nLangChain consists of multiple interconnected packages:\n\n```mermaid\ngraph LR\n    A[Core LangChain] --> B[langchain-core]\n    A --> C[Partner Packages]\n    A --> D[langchain-community]\n    B --> E[Base abstractions]\n    C --> F[Provider-specific]\n    D --> G[Community integrations]\n```\n\n### Package Structure\n\n| Package | Description |\n|---------|-------------|\n| `langchain-core` | Core abstractions and interfaces |\n| `langchain` | Main framework (chains, agents, core functionality) |\n| `langchain-community` | Community-contributed integrations |\n| Partner packages | Provider-specific integrations (openai, anthropic, etc.) |\n\n## Documentation Resources\n\nFor comprehensive learning and reference:\n\n| Resource | URL | Purpose |\n|----------|-----|---------|\n| Main Docs | [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) | Conceptual guides, overviews, tutorials |\n| API Reference | [reference.langchain.com/python](https://reference.langchain.com/python) | Complete API documentation |\n| Chat LangChain | [chat.langchain.com](https://chat.langchain.com/) | Interactive documentation chat |\n| LangChain Forum | [forum.langchain.com](https://forum.langchain.com) | Community discussions |\n| LangChain Academy | [academy.langchain.com](https://academy.langchain.com/) | Free courses on LangChain |\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Advanced Usage\n\n### Building Agents\n\nFor advanced customization and agent orchestration, LangChain recommends using [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), a framework for building controllable agent workflows. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n### Development and Debugging\n\nFor developing, debugging, and deploying AI agents and LLM applications, [LangSmith](https://docs.langchain.com/langsmith/home) provides comprehensive tooling. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Contributing\n\nLangChain is an open-source project welcoming contributions in the form of new features, improved infrastructure, or better documentation. See the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview) for details on how to get involved. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md), [libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n\n---\n\n<a id='runnable-execution'></a>\n\n## Runnable and Execution Model\n\n### 相关页面\n\n相关主题：[Introduction to LangChain](#introduction), [Callbacks and Tracing Infrastructure](#callbacks-tracing), [Agents Framework](#agents-framework)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n- [libs/core/langchain_core/runnables/config.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/config.py)\n- [libs/core/langchain_core/runnables/retry.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/retry.py)\n- [libs/core/langchain_core/runnables/branch.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/branch.py)\n- [libs/core/langchain_core/runnables/graph.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/graph.py)\n</details>\n\n# Runnable and Execution Model\n\n## Overview\n\nThe **Runnable** interface is the foundational abstraction in LangChain for creating composable, executable units of work. Every component in LangChain—including chains, models, tools, and utilities—implements the Runnable protocol, enabling uniform composition, execution, and error handling across the entire framework.\n\nThe execution model provides both synchronous and asynchronous invocation patterns, retry mechanisms, configurable runtime parameters, and conditional branching capabilities. This design allows developers to build complex AI applications by composing simple, reusable components into sophisticated pipelines.\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n---\n\n## Core Interface\n\n### Base Runnable Protocol\n\nThe `Runnable` base class defines the standard interface that all LangChain components implement. Every Runnable must support the following invocation patterns:\n\n| Method | Description | Input | Output |\n|--------|-------------|-------|--------|\n| `invoke` | Synchronous single input processing | `Input` | `Output` |\n| `ainvoke` | Asynchronous single input processing | `Input` | `Awaitable[Output]` |\n| `batch` | Synchronous batch processing | `List[Input]` | `List[Output]` |\n| `abatch` | Asynchronous batch processing | `List[Input]` | `Awaitable[List[Output]]` |\n| `stream` | Synchronous streaming output | `Input` | `Iterator[Output]` |\n| `astream` | Asynchronous streaming output | `Input` | `AsyncIterator[Output]` |\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n### Runnable Composition\n\nRunnables can be composed using the pipe operator (`|`), creating a `RunnableSequence`. This allows chaining multiple components where the output of one becomes the input of the next.\n\n```mermaid\ngraph LR\n    A[Input] --> B[Runnable 1]\n    B --> C[Runnable 2]\n    C --> D[Runnable 3]\n    D --> E[Output]\n```\n\nExample composition pattern:\n\n```python\nfrom langchain_core.runnables import RunnableLambda\n\nchain = (\n    RunnableLambda(lambda x: x.upper())\n    | RunnableLambda(lambda x: f\"Result: {x}\")\n)\nresult = chain.invoke(\"hello\")  # \"Result: HELLO\"\n```\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n---\n\n## Execution Configuration\n\n### RunnableConfig\n\nThe `RunnableConfig` class encapsulates runtime configuration that controls how a Runnable executes. Configuration is passed through the invocation chain and can influence retry behavior, callback invocation, metadata tagging, and execution constraints.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `tags` | `List[str]` | Tags for tracing and organization |\n| `metadata` | `Dict[str, Any]` | Metadata attached to the run |\n| `callbacks` | `Callbacks` | Callback handlers for monitoring |\n| `max_concurrency` | `int | None` | Maximum concurrent executions |\n| `recursion_limit` | `int` | Maximum recursion depth (default: 25) |\n| `run_name` | `str | None` | Name identifier for this run |\n\n资料来源：[libs/core/langchain_core/runnables/config.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/config.py)\n\n### Configuration Propagation\n\nConfiguration flows through the Runnable sequence automatically. When invoking a chain:\n\n```python\nfrom langchain_core.runnables import RunnableConfig\n\nconfig = RunnableConfig(\n    tags=[\"production\", \"v1\"],\n    metadata={\"user_id\": \"123\"},\n    run_name=\"chat_completion\"\n)\n\n# Config is automatically passed through the chain\nresult = chain.invoke(input, config=config)\n```\n\n资料来源：[libs/core/langchain_core/runnables/config.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/config.py)\n\n---\n\n## Retry Mechanism\n\n### Retry Configuration\n\nThe retry system provides fault tolerance through the `RetryConfig` class, which defines retry policies for transient failures.\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `max_attempts` | `int` | 3 | Maximum retry attempts |\n| `initial_interval` | `float` | 1.0 | Initial delay between retries (seconds) |\n| `growth_factor` | `float` | 2.0 | Exponential backoff multiplier |\n| `max_interval` | `float` | 10.0 | Maximum delay cap (seconds) |\n| `retry_on` | `Tuple[Type[Exception], ...]` | `(Exception,)` | Exception types to retry |\n\n资料来源：[libs/core/langchain_core/runnables/retry.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/retry.py)\n\n### Retry Execution Flow\n\n```mermaid\ngraph TD\n    A[Invoke Runnable] --> B{Success?}\n    B -->|Yes| C[Return Result]\n    B -->|No| D{Attempts < Max?}\n    D -->|Yes| E[Wait with Backoff]\n    E --> F[Retry Invocation]\n    F --> B\n    D -->|No| G[Raise Exception]\n```\n\n### Applying Retry to Runnables\n\n```python\nfrom langchain_core.runnables.retry import RetryConfig\n\n# Configure retry policy\nretry_config = RetryConfig(\n    max_attempts=3,\n    initial_interval=0.5,\n    growth_factor=2.0,\n    retry_on=(ConnectionError, TimeoutError)\n)\n\n# Apply to any Runnable\nretry_chain = chain.with_retry(retry_config)\n```\n\n资料来源：[libs/core/langchain_core/runnables/retry.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/retry.py)\n\n---\n\n## Branching Logic\n\n### RunnableBranch\n\nThe `RunnableBranch` class provides conditional execution paths within a chain. It evaluates a series of conditions and executes the corresponding Runnable when a condition is met.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `branches` | `List[Tuple[Runnable, Runnable]]` | Pairs of condition and branch runnable |\n| `default` | `Runnable | None` | Default branch when no conditions match |\n\n资料来源：[libs/core/langchain_core/runnables/branch.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/branch.py)\n\n### Branch Execution Flow\n\n```mermaid\ngraph TD\n    A[Input] --> B[Condition 1?]\n    B -->|True| C[Branch 1]\n    B -->|False| D[Condition 2?]\n    D -->|True| E[Branch 2]\n    D -->|False| F[Default Branch]\n    C --> G[Output]\n    E --> G\n    F --> G\n```\n\n### Branching Example\n\n```python\nfrom langchain_core.runnables.branch import RunnableBranch\n\nbranch = RunnableBranch(\n    (lambda x: x.get(\"type\") == \"greeting\", greeting_chain),\n    (lambda x: x.get(\"type\") == \"question\", question_chain),\n    default_chain  # Executes when no conditions match\n)\n\nresult = branch.invoke({\"type\": \"greeting\", \"content\": \"Hello!\"})\n```\n\n资料来源：[libs/core/langchain_core/runnables/branch.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/branch.py)\n\n---\n\n## Graph Model\n\n### RunnableGraph\n\nThe `RunnableGraph` class provides a visual and programmatic representation of a Runnable sequence as a directed graph. This enables inspection, serialization, and visualization of complex chain architectures.\n\n| Method | Description |\n|--------|-------------|\n| `add_node(name, runnable)` | Add a node to the graph |\n| `add_edge(source, target)` | Add a directed edge between nodes |\n| `set_entry_point(name)` | Define the starting node |\n| `get_graph()` | Retrieve the graph representation |\n\n资料来源：[libs/core/langchain_core/runnables/graph.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/graph.py)\n\n### Graph Visualization\n\n```mermaid\ngraph TD\n    A[entry_point] --> B[process_input]\n    B --> C{Validation?}\n    C -->|Pass| D[execute_step_1]\n    C -->|Fail| E[error_handler]\n    D --> F[execute_step_2]\n    F --> G[finalize]\n    E --> G\n```\n\n### Graph Construction\n\n```python\nfrom langchain_core.runnables.graph import RunnableGraph\n\ngraph = RunnableGraph()\n\ngraph.add_node(\"input\", input_processor)\ngraph.add_node(\"process\", processing_chain)\ngraph.add_node(\"output\", output_formatter)\n\ngraph.set_entry_point(\"input\")\ngraph.add_edge(\"input\", \"process\")\ngraph.add_edge(\"process\", \"output\")\n\n# Visualize or inspect the graph\nvisualization = graph.get_graph()\n```\n\n资料来源：[libs/core/langchain_core/runnables/graph.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/graph.py)\n\n---\n\n## Type Hints and Generics\n\nThe Runnable interface uses Python generics to ensure type safety across the chain:\n\n```python\nclass Runnable[Input, Output](BaseRunnable):\n    \"\"\"Base class for generic Runnable with typed inputs and outputs.\"\"\"\n    \n    def invoke(self, input: Input, config: RunnableConfig | None = None) -> Output:\n        ...\n```\n\nType preservation through the chain enables:\n- Static type checking with mypy/pyright\n- IDE autocomplete for intermediate outputs\n- Runtime type validation\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n---\n\n## Summary\n\nThe Runnable and Execution Model forms the backbone of LangChain's architecture:\n\n| Component | Purpose |\n|-----------|---------|\n| **Runnable Protocol** | Unified interface for all executable components |\n| **Execution Methods** | Sync/async, single/batch, and streaming invocations |\n| **Configuration** | Runtime parameters propagated through chains |\n| **Retry Logic** | Automatic retry with exponential backoff |\n| **Branching** | Conditional execution paths |\n| **Graph Model** | Visualization and inspection of chain structure |\n\nThis design enables developers to build complex AI workflows by composing simple, reusable, and testable components with consistent behavior and error handling.\n\n---\n\n<a id='messages-system'></a>\n\n## Messages and Prompt System\n\n### 相关页面\n\n相关主题：[Chat Models and Embeddings](#chat-models-embeddings)\n\nI cannot generate an accurate technical wiki page about the \"Messages and Prompt System\" because the context provided does not contain the actual source code files listed in the `<details>` block. The context only includes README documentation files from various partner packages (huggingface, anthropic, openai, etc.) and some API chain code examples.\n\nThe required source files for generating this wiki are:\n\n- `libs/core/langchain_core/messages/__init__.py`\n- `libs/core/langchain_core/messages/chat.py`\n- `libs/core/langchain_core/messages/content.py`\n- `libs/core/langchain_core/messages/block_translators/__init__.py`\n- `libs/core/langchain_core/prompts/__init__.py`\n- `libs/core/langchain_core/prompts/chat.py`\n\n**What the context contains instead:**\n\n- Partner package READMEs (huggingface, anthropic, ollama, chroma, openrouter, groq, exa, deepseek, perplexity, xai, nomic, openai)\n- `libs/langchain/langchain_classic/chains/api/base.py` (API chain implementation)\n- `libs/standard-tests/README.md` (testing utilities documentation)\n\nTo generate a comprehensive and accurate wiki page about the Messages and Prompt System, I need access to the actual implementation source files. Please provide the content of the files listed in the `<details>` block so I can analyze them and create the technical documentation accordingly.\n\n---\n\n<a id='callbacks-tracing'></a>\n\n## Callbacks and Tracing Infrastructure\n\n### 相关页面\n\n相关主题：[Runnable and Execution Model](#runnable-execution)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/callbacks/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/callbacks/__init__.py)\n- [libs/core/langchain_core/callbacks/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/callbacks/base.py)\n- [libs/core/langchain_core/callbacks/manager.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/callbacks/manager.py)\n- [libs/core/langchain_core/tracers/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tracers/__init__.py)\n- [libs/core/langchain_core/tracers/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tracers/base.py)\n- [libs/core/langchain_core/tracers/event_stream.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tracers/event_stream.py)\n</details>\n\n# Callbacks and Tracing Infrastructure\n\n## Overview\n\nThe LangChain callbacks and tracing infrastructure provides a unified system for observability, monitoring, and event propagation across LangChain components. This system enables developers to track LLM invocations, chain executions, tool usage, and custom events throughout the application lifecycle.\n\nLangChain follows a monorepo structure where core primitives and abstractions are located in `libs/core/langchain_core/`, with callback and tracer implementations residing in dedicated modules under `langchain_core.callbacks` and `langchain_core.tracers` respectively. 资料来源：[libs/README.md](libs/README.md)\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[CallbackManager]\n    B --> C[BaseCallbackHandler]\n    C --> D[Tracer Implementations]\n    C --> E[Custom Handlers]\n    D --> F[LangSmith]\n    D --> G[Other Exporters]\n    B --> H[AsyncCallbackManager]\n    H --> C\n    style A fill:#e1f5fe\n    style F fill:#c8e6c9\n    style G fill:#c8e6c9\n```\n\n## Callback System\n\n### Core Components\n\nThe callback system in LangChain is built around the following key abstractions:\n\n| Component | File Location | Purpose |\n|-----------|--------------|---------|\n| `BaseCallbackHandler` | `libs/core/langchain_core/callbacks/base.py` | Abstract base class for all callback handlers |\n| `CallbackManager` | `libs/core/langchain_core/callbacks/manager.py` | Manages synchronous callback execution |\n| `AsyncCallbackManager` | `libs/core/langchain_core/callbacks/manager.py` | Manages asynchronous callback execution |\n| `Callbacks` | `libs/core/langchain_core/callbacks/__init__.py` | Public API exports |\n\n### BaseCallbackHandler\n\nThe `BaseCallbackHandler` class defines the contract for all callback implementations. It provides methods for intercepting various events during chain execution:\n\n```python\n# Conceptual structure\nclass BaseCallbackHandler:\n    \"\"\"Base class for callback handlers.\"\"\"\n    \n    def on_llm_start(self, ...):\n        \"\"\"Called when LLM starts processing.\"\"\"\n        pass\n    \n    def on_llm_end(self, ...):\n        \"\"\"Called when LLM finishes processing.\"\"\"\n        pass\n    \n    def on_chain_start(self, ...):\n        \"\"\"Called when a chain starts execution.\"\"\"\n        pass\n    \n    def on_chain_end(self, ...):\n        \"\"\"Called when a chain finishes execution.\"\"\"\n        pass\n    \n    def on_tool_start(self, ...):\n        \"\"\"Called when a tool starts execution.\"\"\"\n        pass\n    \n    def on_tool_end(self, ...):\n        \"\"\"Called when a tool finishes execution.\"\"\"\n        pass\n```\n\n资料来源：[libs/core/langchain_core/callbacks/base.py](libs/core/langchain_core/callbacks/base.py)\n\n### CallbackManager\n\nThe `CallbackManager` coordinates multiple callback handlers and ensures proper event propagation:\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant CM as CallbackManager\n    participant H1 as Handler 1\n    participant H2 as Handler 2\n    participant LC as LangChain Component\n\n    App->>CM: add_handler(handler)\n    App->>LC: invoke(inputs, callbacks=CM)\n    LC->>CM: on_chain_start(...)\n    CM->>H1: on_chain_start(...)\n    CM->>H2: on_chain_start(...)\n    LC->>CM: on_llm_start(...)\n    CM->>H1: on_llm_start(...)\n    CM->>H2: on_llm_start(...)\n    LC->>CM: on_llm_end(...)\n    CM->>H1: on_llm_end(...)\n    CM->>H2: on_llm_end(...)\n    LC->>CM: on_chain_end(...)\n    CM->>H1: on_chain_end(...)\n    CM->>H2: on_chain_end(...)\n```\n\nKey features of `CallbackManager`:\n\n- Manages a list of registered handlers\n- Supports handler persistence via `copy()` method\n- Provides context management for nested callbacks\n- Handles both sync and async handlers transparently\n\n资料来源：[libs/core/langchain_core/callbacks/manager.py](libs/core/langchain_core/callbacks/manager.py)\n\n### Callback Configuration Options\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `tags` | `list[str]` | Tags for filtering callbacks |\n| ` inheritable` | `bool` | Whether callbacks propagate to child runs |\n| `metadata` | `dict` | Additional context metadata |\n\n## Tracing Infrastructure\n\n### Overview\n\nThe tracing subsystem extends the callback system to provide persistent, exportable traces of LangChain executions. Tracers are specialized callback handlers that serialize execution data for external systems like LangSmith.\n\n```mermaid\ngraph LR\n    A[LangChain Execution] --> B[Tracer Base]\n    B --> C[Core Tracer]\n    B --> D[Event Stream Tracer]\n    C --> E[LangChain Tracers]\n    D --> F[Async Event Streaming]\n    E --> G[LangSmith]\n    E --> H[Other Exporters]\n```\n\n资料来源：[libs/core/langchain_core/tracers/__init__.py](libs/core/langchain_core/tracers/__init__.py)\n\n### Tracer Base Class\n\nLocated in `libs/core/langchain_core/tracers/base.py`, the base tracer provides:\n\n| Feature | Description |\n|---------|-------------|\n| Run tracking | Maintains state for all active and completed runs |\n| Event serialization | Converts callback events to exportable format |\n| Parent-child relationships | Manages nested execution traces |\n| Persistence layer | Abstract interface for storing traces |\n\n### Event Stream Tracer\n\nThe `EventStreamTracer` provides an async-native interface for streaming trace events:\n\n```python\n# Conceptual implementation\nclass EventStreamTracer:\n    \"\"\"Tracer that supports async event streaming.\"\"\"\n    \n    async def astream_events(self, ...):\n        \"\"\"Async generator for trace events.\"\"\"\n        while True:\n            event = await self._get_next_event()\n            if event is None:\n                break\n            yield event\n```\n\nThis tracer enables real-time event streaming capabilities for applications requiring live observability.\n\n资料来源：[libs/core/langchain_core/tracers/event_stream.py](libs/core/langchain_core/tracers/event_stream.py)\n\n## Integration Patterns\n\n### Passing Callbacks to Components\n\nCallbacks can be passed to LangChain components at initialization or invocation time:\n\n```python\nfrom langchain_core.callbacks import CallbackManager, BaseCallbackHandler\n\nclass MyHandler(BaseCallbackHandler):\n    def on_chain_start(self, serialized, inputs, **kwargs):\n        print(f\"Chain starting with inputs: {inputs}\")\n\nhandler = MyHandler()\ncallback_manager = CallbackManager(handlers=[handler])\n\n# Pass to model\nresponse = model.invoke(\"Hello\", config={\"callbacks\": callback_manager})\n\n# Pass to chain\nchain.invoke(input, config={\"callbacks\": callback_manager})\n```\n\n### Using Tracers for Persistence\n\n```python\nfrom langchain_core.tracers import LangChainTracer\n\ntracer = LangChainTracer(project_name=\"my-project\")\n\nchain.invoke(\n    input,\n    config={\"callbacks\": [tracer]}\n)\n```\n\n## Data Flow\n\n```mermaid\ngraph TD\n    subgraph Execution\n        A[User Code] --> B[Chain/Model]\n        B --> C[CallbackManager]\n    end\n    \n    subgraph EventPropagation\n        C --> D[on_chain_start]\n        D --> E[on_llm_start]\n        E --> F[LLM Execution]\n        F --> G[on_llm_end]\n        G --> H[on_chain_end]\n    end\n    \n    subgraph Observability\n        H --> I[Tracer]\n        I --> J[Run Tree]\n        J --> K[Export/Store]\n        K --> L[LangSmith/Other]\n    end\n    \n    style L fill:#fff3e0\n    style K fill:#fff3e0\n```\n\n## Best Practices\n\n1. **Handler Isolation**: Create separate callback handlers for different concerns (monitoring, tracing, debugging)\n\n2. **Performance Consideration**: Avoid expensive operations in synchronous callback handlers; prefer async handlers for I/O-bound operations\n\n3. **Memory Management**: Use `copy()` on `CallbackManager` when spawning child executions to prevent handler leakage\n\n4. **Tag-Based Filtering**: Use tags to selectively enable/disable callbacks in different environments\n\n5. **Tracer Configuration**: Configure tracers with appropriate project names and metadata for proper organization in observability platforms\n\n## Related Documentation\n\n- [LangChain Core API Reference](https://reference.langchain.com/python/langchain_core)\n- [LangChain Documentation](https://docs.langchain.com/oss/python/langchain/overview)\n- [LangSmith Integration](https://docs.langchain.com/langsmith/home)\n\n## Package Structure\n\nThe callbacks and tracing infrastructure is maintained in the core library:\n\n```\nlibs/core/langchain_core/\n├── callbacks/\n│   ├── __init__.py      # Public exports\n│   ├── base.py          # BaseCallbackHandler\n│   └── manager.py       # CallbackManager, AsyncCallbackManager\n└── tracers/\n    ├── __init__.py      # Public exports\n    ├── base.py          # Base tracer implementations\n    └── event_stream.py  # Async event streaming support\n```\n\n资料来源：[libs/README.md](libs/README.md)\n\n---\n\n<a id='chat-models-embeddings'></a>\n\n## Chat Models and Embeddings\n\n### 相关页面\n\n相关主题：[Introduction to LangChain](#introduction), [Messages and Prompt System](#messages-system)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/language_models/chat_models.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/chat_models.py)\n- [libs/core/langchain_core/language_models/llms.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/llms.py)\n- [libs/core/langchain_core/embeddings/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/embeddings/__init__.py)\n- [libs/core/langchain_core/embeddings/embeddings.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/embeddings/embeddings.py)\n- [libs/core/langchain_core/language_models/model_profile.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/model_profile.py)\n</details>\n\n# Chat Models and Embeddings\n\n## Overview\n\nChat Models and Embeddings are foundational components within the LangChain ecosystem that enable interaction with large language models (LLMs) for conversational and vector-based AI applications. These abstractions provide standardized interfaces for working with different model providers while maintaining flexibility for customization.\n\nLangChain provides two primary categories of language model interfaces:\n\n| Category | Purpose | Key Classes |\n|----------|---------|-------------|\n| **Chat Models** | Handle conversational interactions with message-based APIs | `BaseChatModel`, `BaseMessage` |\n| **Embeddings** | Convert text into vector representations for similarity search | `Embeddings`, `BaseEmbeddings` |\n| **LLMs** | Traditional text-in, text-out language model interfaces | `BaseLLM`, `LLM` |\n\n资料来源：[libs/core/langchain_core/language_models/chat_models.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/chat_models.py)\n\n## Architecture\n\n### High-Level Component Hierarchy\n\n```mermaid\ngraph TD\n    A[LangChain Core] --> B[Language Models]\n    A --> C[Embeddings]\n    B --> D[BaseChatModel]\n    B --> E[BaseLLM]\n    D --> F[Provider Implementations]\n    E --> G[Provider Implementations]\n    C --> H[Embeddings Interface]\n    H --> I[Embedding Providers]\n    \n    F --> J[OpenAI Chat]\n    F --> K[Anthropic Chat]\n    F --> L[Ollama Chat]\n    \n    I --> M[OpenAI Embeddings]\n    I --> N[HuggingFace Embeddings]\n    I --> O[Ollama Embeddings]\n```\n\n### Chat Model Initialization Pattern\n\nThe preferred method for initializing chat models in LangChain is through the `init_chat_model` function, which provides a unified initialization interface across providers.\n\n```python\nfrom langchain.chat_models import init_chat_model\n\n# Initialize with provider prefix\nmodel = init_chat_model(\"openai:gpt-4o-mini\", temperature=0)\n\n# Invoke the model\nresult = model.invoke(\"Hello, world!\")\n```\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Chat Models\n\n### BaseChatModel Interface\n\nThe `BaseChatModel` class serves as the foundation for all chat model implementations in LangChain. It extends `BaseLanguageModel` and provides standardized methods for generating responses from conversation histories.\n\n#### Key Methods\n\n| Method | Purpose |\n|--------|---------|\n| `invoke()` | Synchronous invocation with input processing |\n| `ainvoke()` | Asynchronous invocation |\n| `bind_tools()` | Attach tools/functions to the model for tool calling |\n| `with_structured_output()` | Constrain output to follow a schema |\n\nThe `bind_tools` method enables models to call external tools, which is fundamental for building agentic applications:\n\n```python\nmodel = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\ntools = toolkit.get_tools()\napi_request_chain = API_URL_PROMPT.partial(api_docs=api_spec) | model.bind_tools(tools, tool_choice=\"any\")\n```\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n### Message Types\n\nChat models in LangChain work with structured message objects that represent different roles in a conversation:\n\n| Message Type | Role | Typical Use |\n|--------------|------|-------------|\n| `SystemMessage` | System | Instructions and context |\n| `HumanMessage` | User | User input |\n| `AIMessage` | Assistant | Model responses |\n| `ToolMessage` | Tool | Tool execution results |\n\n### State Management in Chat Applications\n\nWhen building chat applications with stateful interactions, LangChain uses typed state dictionaries with LangGraph:\n\n```python\nclass ChainState(TypedDict):\n    \"\"\"LangGraph state.\"\"\"\n    messages: Annotated[Sequence[BaseMessage], add_messages]\n\nasync def acall_model(state: ChainState, config: RunnableConfig):\n    response = await model.ainvoke(state[\"messages\"], config)\n    return {\"messages\": [response]}\n```\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n## Embeddings\n\n### Embeddings Interface\n\nThe `Embeddings` interface provides a standardized way to convert text into numerical vector representations. These vectors enable semantic similarity calculations, retrieval-augmented generation (RAG), and vector database operations.\n\n#### Core Methods\n\n| Method | Description |\n|--------|-------------|\n| `embed_query()` | Embed a single query string |\n| `embed_documents()` | Embed multiple document strings |\n| `embed_query_async()` | Async version of embed_query |\n| `embed_documents_async()` | Async version of embed_documents |\n\n资料来源：[libs/core/langchain_core/embeddings/embeddings.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/embeddings/embeddings.py)\n\n### Integration with Vector Stores\n\nEmbeddings work seamlessly with LangChain's vector store integrations. The HuggingFace partner package provides specific implementations:\n\n```bash\n# Install with full dependencies for local inference\npip install langchain-huggingface[full]\n```\n\nThe `[full]` extra includes `sentence-transformers>=5.2.0` and `transformers>=5.0.0`, which are required for `HuggingFaceEmbeddings`.\n\n资料来源：[libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n\n## Model Providers\n\nLangChain provides partner packages for various LLM providers, each following consistent patterns for installation and usage:\n\n| Provider | Package | Install Command |\n|----------|---------|-----------------|\n| OpenAI | `langchain-openai` | `pip install langchain-openai` |\n| Anthropic | `langchain-anthropic` | `pip install langchain-anthropic` |\n| Ollama | `langchain-ollama` | `pip install langchain-ollama` |\n| HuggingFace | `langchain-huggingface` | `pip install langchain-huggingface` |\n| Groq | `langchain-groq` | `pip install langchain-groq` |\n| DeepSeek | `langchain-deepseek` | `pip install langchain-deepseek` |\n| Perplexity | `langchain-perplexity` | `pip install langchain-perplexity` |\n| xAI | `langchain-xai` | `pip install langchain-xai` |\n| Chroma | `langchain-chroma` | `pip install langchain-chroma` |\n| Exa | `langchain-exa` | `pip install langchain-exa` |\n| OpenRouter | `langchain-openrouter` | `pip install langchain-openrouter` |\n\nEach provider package contains integration classes that implement the base interfaces described above.\n\n资料来源：[libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md), [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md), [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n\n## Testing Chat Models\n\nLangChain provides standardized test infrastructure through the `langchain-tests` package to ensure consistent behavior across implementations:\n\n```python\nfrom typing import Type\nimport pytest\nfrom langchain_core.language_models import BaseChatModel\nfrom langchain_tests.unit_tests import ChatModelUnitTests\n\nfrom langchain_parrot_chain import ChatParrotChain\n\nclass TestParrotChainStandard(ChatModelUnitTests):\n    @pytest.fixture\n    def chat_model_class(self) -> Type[BaseChatModel]:\n        return ChatParrotChain\n```\n\n#### Configurable Test Fixtures\n\n| Fixture | Required | Default | Description |\n|---------|----------|---------|-------------|\n| `chat_model_class` | Yes | — | The chat model class to test |\n| `chat_model_params` | No | `{}` | Constructor keyword arguments |\n| `chat_model_has_tool_calling` | No | `hasattr(class, 'bind_tools')` | Tool calling capability |\n| `chat_model_has_structured_output` | No | `hasattr(class, 'with_structured_output')` | Structured output support |\n\n资料来源：[libs/standard-tests/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/standard-tests/README.md)\n\n## Deprecated Patterns\n\nThe `langchain-community` package previously contained community-contributed integrations, but this pattern is being deprecated in favor of official partner packages:\n\n```python\n@deprecated(\n    since=\"0.2.13\",\n    removal=\"2.0.0\",\n    alternative=\"langchain.agents.create_agent\",\n)\nclass APIChain(Chain):\n    \"\"\"Chain that makes API calls and summarizes the responses.\"\"\"\n```\n\nNew integrations should be implemented as standalone partner packages following the patterns established in `libs/partners/*`.\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n## Quick Reference\n\n### Initialization\n\n```python\nfrom langchain.chat_models import init_chat_model\n\n# Basic initialization\nmodel = init_chat_model(\"provider:model-name\")\n\n# With parameters\nmodel = init_chat_model(\"openai:gpt-4o-mini\", temperature=0)\n```\n\n### Invocation\n\n```python\n# Synchronous\nresponse = model.invoke(\"Your message here\")\n\n# Asynchronous\nresponse = await model.ainvoke(\"Your message here\")\n\n# With chat history\nresponse = model.invoke([SystemMessage(content=\"You are helpful\"), \n                         HumanMessage(content=\"Hello\")])\n```\n\n### Tool Binding\n\n```python\nmodel_with_tools = model.bind_tools(tools)\nresponse = model_with_tools.invoke(\"Use tool X to find information\")\n```\n\n## Further Reading\n\n- [API Reference](https://reference.langchain.com/python) - Complete API documentation\n- [LangChain Docs](https://docs.langchain.com/oss/python/langchain/overview) - Conceptual guides and tutorials\n- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Building controllable agent workflows\n- [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview) - How to add new integrations\n\n---\n\n<a id='agents-framework'></a>\n\n## Agents Framework\n\n### 相关页面\n\n相关主题：[Tools and Toolkits](#tools-integrations), [Runnable and Execution Model](#runnable-execution)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain/langchain_classic/agents/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/__init__.py)\n- [libs/langchain/langchain_classic/agents/agent.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent.py)\n- [libs/langchain/langchain_classic/agents/react/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/react/__init__.py)\n- [libs/langchain/langchain_classic/agents/structured_chat/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/structured_chat/__init__.py)\n- [libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py)\n- [libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py)\n</details>\n\n# Agents Framework\n\nThe LangChain Agents Framework is a core component of the LangChain ecosystem that enables language models to reason about and take actions through a systematic loop of observation, thought, and execution. The framework provides abstractions for building autonomous agents that can use tools, make decisions, and complete complex multi-step tasks.\n\n## Architecture Overview\n\nLangChain's agent architecture follows a cyclical pattern where the agent receives inputs, decides on actions using an LLM, executes those actions via tools, and processes the results to determine the next step.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Agent]\n    B --> C[LLM Decision]\n    C --> D{Action Type}\n    D -->|Tool Call| E[Tool Execution]\n    D -->|Final Answer| F[Output]\n    E --> G[Observation]\n    G --> B\n    F --> H[User]\n    \n    style B fill:#e1f5fe\n    style C fill:#fff3e0\n    style E fill:#f3e5f5\n```\n\n## Core Agent Classes\n\nThe agent system is built on a base `Agent` class that defines the interface for all agent implementations. LangChain provides several pre-built agent types for different use cases.\n\n### Base Agent Interface\n\nAll agents inherit from the base `Agent` class which provides:\n\n- Chain integration for LLM calls\n- Tool binding and management\n- Output parsing for action extraction\n- State management across agent steps\n\n### Agent Types\n\n| Agent Type | Description | Use Case |\n|------------|-------------|----------|\n| `ReAct` | Reasoning + Acting agent | General purpose tool use |\n| `Structured Chat` | JSON-structured tool calls | Structured tool interfaces |\n| `OpenAI Functions` | OpenAI function calling format | OpenAI function calling |\n| `Toolkits` | Pre-configured tool bundles | Domain-specific tasks |\n\n资料来源：[libs/langchain/langchain_classic/agents/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/__init__.py)\n\n## Tool Integration\n\nAgents interact with external systems through tools. Tools are abstractions that wrap functionality and expose it to the LLM in a structured format.\n\n### Tool Structure\n\nEach tool typically consists of:\n\n- **Name**: Unique identifier for the tool\n- **Description**: Natural language description for LLM understanding\n- **Parameters**: Schema defining required and optional inputs\n- **Execute Function**: The actual implementation\n\n### Toolkits\n\nToolkits provide pre-configured bundles of related tools for specific domains:\n\n| Toolkit | Purpose |\n|---------|---------|\n| `RequestsToolkit` | HTTP requests for API interactions |\n| `SQLDatabaseToolkit` | Database queries |\n| Custom Toolkits | Domain-specific tool collections |\n\n资料来源：[libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py)\n\n## Agent Execution Loop\n\nThe agent execution follows a ReAct-style (Reasoning + Acting) pattern:\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Agent\n    participant LLM\n    participant Tools\n    \n    User->>Agent: Input Query\n    Agent->>LLM: Prompt + Tools\n    LLM-->>Agent: Thought + Action\n    Agent->>Tools: Execute Tool Call\n    Tools-->>Agent: Observation\n    Agent->>LLM: Observation + Next Decision\n    loop Until Final Answer\n        LLM-->>Agent: Next Action\n        Agent->>Tools: Execute Tool Call\n        Tools-->>Agent: Observation\n    end\n    Agent-->>User: Final Response\n```\n\n## Agent Creation API\n\nThe recommended way to create agents is through the `create_agent` factory function:\n\n```python\nfrom langchain.agents import create_agent\n\n# Create an agent with specified model and tools\nagent = create_agent(\n    model=\"openai:gpt-4\",\n    tools=[...],\n    system_message=\"You are a helpful assistant.\"\n)\n```\n\n资料来源：[libs/langchain/langchain_classic/agents/agent.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent.py)\n\n## Agent Types Detail\n\n### ReAct Agent\n\nThe ReAct (Reasoning + Acting) agent implements a reasoning loop where the model thinks through each step before taking action.\n\n**Characteristics:**\n\n- Explicit thought process before each action\n- Observation processing after tool execution\n- Flexible tool usage without strict schema requirements\n\n资料来源：[libs/langchain/langchain_classic/agents/react/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/react/__init__.py)\n\n### Structured Chat Agent\n\nDesigned for agents that interact with tools using structured JSON inputs, this agent type excels when working with tools that have complex parameter schemas.\n\n**Characteristics:**\n\n- JSON-structured tool inputs\n- Strong typing for tool parameters\n- Schema validation for tool calls\n\n资料来源：[libs/langchain/langchain_classic/agents/structured_chat/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/structured_chat/__init__.py)\n\n### OpenAI Functions Agent\n\nOptimized for OpenAI's function calling API, this agent type maps LangChain tools to OpenAI function specifications.\n\n**Characteristics:**\n\n- Direct mapping to OpenAI function calling format\n- Tool choice controls (auto, any, none)\n- Streaming support for responses\n\n资料来源：[libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py)\n\n## Integration with LangGraph\n\nFor complex agent workflows, LangChain Agents integrate seamlessly with LangGraph for building stateful, controllable agent applications:\n\n```python\nfrom langgraph.prebuilt import ToolNode\nfrom langgraph.graph import StateGraph\n\nclass AgentState(TypedDict):\n    messages: Annotated[Sequence[BaseMessage], add_messages]\n\ngraph_builder = StateGraph(AgentState)\ngraph_builder.add_node(\"call_tool\", some_agent_node)\ngraph_builder.add_node(\"execute_tool\", ToolNode(tools))\ngraph_builder.add_edge(\"call_tool\", \"execute_tool\")\n```\n\n## Configuration Options\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `model` | `str` or `BaseLanguageModel` | LLM to power the agent |\n| `tools` | `List[BaseTool]` | Available tools for the agent |\n| `system_message` | `str` | Instructions for agent behavior |\n| `max_iterations` | `int` | Maximum agent steps before termination |\n| `timeout` | `float` | Timeout for agent execution |\n\n## Deprecated Patterns\n\nThe `APIChain` class has been deprecated in favor of the new agent system:\n\n```python\n@deprecated(\n    since=\"0.2.13\",\n    removal=\"2.0.0\",\n    alternative=\"langchain.agents.create_agent\",\n)\nclass APIChain(Chain):\n    \"\"\"Chain that makes API calls and summarizes responses.\"\"\"\n```\n\nMigration to the new agent system is recommended for all use cases.\n\n## Security Considerations\n\nWhen deploying agents:\n\n- **Input Validation**: Validate all user inputs before processing\n- **Tool Permissions**: Control which tools are available to agents\n- **Network Access**: Limit agent network capabilities appropriately\n- **Rate Limiting**: Implement rate limiting to prevent abuse\n\n## Summary\n\nThe LangChain Agents Framework provides a flexible, extensible system for building LLM-powered agents that can:\n\n1. Reason about user queries\n2. Select appropriate tools for task completion\n3. Execute actions and process results\n4. Iterate until goals are achieved\n\nThe framework supports multiple agent types optimized for different use cases, with seamless integration into the broader LangChain ecosystem including LangGraph for complex workflows.\n\n---\n\n<a id='tools-integrations'></a>\n\n## Tools and Toolkits\n\n### 相关页面\n\n相关主题：[Agents Framework](#agents-framework)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/tools/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/__init__.py)\n- [libs/core/langchain_core/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/base.py)\n- [libs/core/langchain_core/tools/structured.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/structured.py)\n- [libs/core/langchain_core/tools/render.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/render.py)\n- [libs/langchain/langchain_classic/tools/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/tools/__init__.py)\n- [libs/langchain/langchain_classic/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/tools/base.py)\n</details>\n\n# Tools and Toolkits\n\nLangChain provides a flexible and extensible system for defining, managing, and executing tools that enable Large Language Models (LLMs) to interact with external systems, APIs, and data sources. The Tools and Toolkits module forms the foundational building block for agent-based architectures, allowing models to perform actions and retrieve information beyond their training data.\n\n## Overview\n\nTools in LangChain serve as the interface between LLMs and external capabilities. They provide a standardized way to:\n\n- Define callable functions with clear input/output schemas\n- Control what operations an agent can perform\n- Handle serialization, deserialization, and tool calling protocols\n- Integrate with various third-party services and APIs\n\nThe system is designed around the principle of structured tool definitions, where each tool specifies its name, description, and parameter schema, enabling LLMs to understand when and how to invoke specific tools.\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[Agent / LLM] -->|Tool Calls| B[Tool Executor]\n    B --> C[BaseTool]\n    C --> D[StructuredTool]\n    C --> E[ToolKit]\n    D --> F[Function Definition]\n    E --> G[Tool Collection]\n    B -->|Results| A\n    \n    style A fill:#e1f5ff\n    style B fill:#fff3e0\n    style C fill:#e8f5e9\n```\n\n## Core Components\n\n### BaseTool\n\nThe `BaseTool` class serves as the foundation for all tools in LangChain. It provides the essential interface that all tool implementations must follow.\n\n| Attribute | Type | Description |\n|-----------|------|-------------|\n| `name` | `str` | Unique identifier for the tool |\n| `description` | `str` | Human-readable description for LLM understanding |\n| `args_schema` | `Type[BaseModel]` | Pydantic model defining input parameters |\n| `return_schema` | `Optional[Type[BaseModel]]` | Schema for return values |\n| `tags` | `Optional[List[str]]` | Categorization tags |\n| `metadata` | `Optional[Dict[str, Any]]` | Additional metadata |\n\n**Key Methods:**\n\n```python\ndef invoke(self, input: Union[str, Dict], config: Optional[CallbackManager] = None) -> Any\nasync def ainvoke(self, input: Union[str, Dict], config: Optional[AsyncCallbackManager] = None) -> Any\n```\n\n资料来源：[libs/core/langchain_core/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/base.py)\n\n### StructuredTool\n\n`StructuredTool` extends `BaseTool` with enhanced parameter handling capabilities. It supports complex input schemas and provides better type safety for tool definitions.\n\n**Key Features:**\n\n- Pydantic-based input validation\n- JSON schema generation for LLM tool calling protocols\n- Support for nested objects and arrays\n- Custom validation logic via model validators\n\n资料来源：[libs/core/langchain_core/tools/structured.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/structured.py)\n\n## Tool Definition Pattern\n\n```python\nfrom langchain_core.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\nclass SearchInput(BaseModel):\n    query: str = Field(description=\"The search query to look up\")\n    limit: int = Field(default=10, description=\"Maximum number of results\")\n\ndef search_function(query: str, limit: int = 10) -> str:\n    \"\"\"Perform a web search and return results.\"\"\"\n    # Implementation here\n    pass\n\nsearch_tool = StructuredTool(\n    name=\"web_search\",\n    description=\"Search the web for information about a given topic\",\n    func=search_function,\n    args_schema=SearchInput,\n)\n```\n\n## Toolkits\n\nToolkits provide a convenient way to group related tools together. They bundle multiple tools that are commonly used together for specific use cases.\n\n### Toolkit Structure\n\n| Component | Description |\n|-----------|-------------|\n| `tools` | List of `BaseTool` instances |\n| `name` | Toolkit identifier |\n| `description` | Summary of toolkit purpose |\n\n### Creating Custom Toolkits\n\n```python\nfrom langchain_core.tools import BaseTool, StructuredTool\nfrom langchain_core.tools import Toolkit\n\nclass CustomToolkit(Toolkit):\n    def __init__(self) -> None:\n        super().__init__()\n        self.tools = [\n            tool_a,\n            tool_b,\n            tool_c,\n        ]\n    \n    def get_tools(self) -> list[BaseTool]:\n        return self.tools\n```\n\n资料来源：[libs/langchain/langchain_classic/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/tools/base.py)\n\n## Tool Rendering\n\nTool rendering converts tool definitions into formats consumable by different LLM providers and protocols.\n\n```mermaid\ngraph LR\n    A[BaseTool] -->|convert| B[OpenAI Format]\n    A -->|convert| C[Anthropic Format]\n    A -->|convert| D[JSON Schema]\n    A -->|convert| E[Custom Format]\n```\n\n### Render Modes\n\n| Mode | Description |\n|------|-------------|\n| `tool_call` | Renders tools for OpenAI-style tool calling |\n| `json_schema` | Outputs JSON Schema representation |\n| `anthropic` | Format compatible with Anthropic Claude models |\n\n资料来源：[libs/core/langchain_core/tools/render.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/render.py)\n\n## Tool Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent\n    participant Executor\n    participant Tool\n    participant External\n    \n    Agent->>Executor: invoke(tool_name, parameters)\n    Executor->>Tool: validate_and_bind_params(input)\n    Tool->>External: execute_action()\n    External-->>Tool: result\n    Tool-->>Executor: validated_result\n    Executor-->>Agent: ToolOutput\n```\n\n## Integration with Agents\n\nTools are bound to agents through the agent construction process. The binding specifies which tools an agent can access and how they should be invoked.\n\n**Common Patterns:**\n\n1. **Direct Binding**: Single tool passed to agent\n2. **ToolKit Binding**: Multiple related tools from a toolkit\n3. **Dynamic Binding**: Tools selected at runtime based on context\n\n```python\nfrom langchain.agents import create_agent\nfrom langchain_core.tools import StructuredTool\n\nagent = create_agent(\n    llm=model,\n    tools=[search_tool, calculator_tool],\n    prompt=system_prompt,\n)\n```\n\n## Built-in Tool Categories\n\n### Core Tools\n\nLocated in `libs/langchain/langchain_classic/tools/`:\n\n| Tool | Purpose |\n|------|---------|\n| `Search` | Web search capabilities |\n| `Wikipedia` | Wikipedia API integration |\n| `Calculator` | Mathematical operations |\n| `FileSystem` | Local file operations |\n\n### API Integration Tools\n\nLangChain provides specialized tools for API interactions:\n\n```python\nfrom langchain_classic.chains.api.base import APIChain\n\n# APIChain is deprecated in favor of agent-based approaches\n# but demonstrates the pattern of API tool integration\n```\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n## Configuration Options\n\n### Tool Configuration Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `handle_parsing_errors` | `bool` | `True` | Handle LLM output parsing errors |\n| `return_direct` | `bool` | `False` | Return tool output directly to LLM |\n| `verbose` | `bool` | `False` | Enable verbose logging |\n| `max_retries` | `int` | `3` | Maximum retry attempts |\n| `timeout` | `Optional[float]` | `None` | Execution timeout in seconds |\n\n## Error Handling\n\nTools implement comprehensive error handling to manage failures gracefully:\n\n```python\ntry:\n    result = tool.invoke(input_dict)\nexcept ValidationError as e:\n    # Handle input validation failures\n    return ErrorOutput(error=str(e))\nexcept ExecutionError as e:\n    # Handle execution failures\n    return ErrorOutput(error=str(e))\nexcept TimeoutError:\n    # Handle timeout\n    return ErrorOutput(error=\"Tool execution timed out\")\n```\n\n## Best Practices\n\n### 1. Clear Tool Descriptions\nWrite descriptions that help the LLM understand when and how to use each tool:\n\n```python\ndescription = \"\"\"\n    Search for information on the web.\n    \n    Use this tool when:\n    - User asks about current events or facts\n    - User requests information not in training data\n    - User asks about specific entities that need verification\n    \n    Parameters:\n    - query: The search query (required)\n    - limit: Maximum results to return (optional, default: 5)\n    \"\"\"\n```\n\n### 2. Comprehensive Schemas\nDefine complete input schemas with descriptions:\n\n```python\nclass QueryInput(BaseModel):\n    query: str = Field(\n        description=\"The search query string. Be specific and include relevant keywords.\",\n        min_length=1,\n        max_length=500,\n    )\n    source: Literal[\"web\", \"news\", \"scholar\"] = Field(\n        default=\"web\",\n        description=\"The type of search to perform\",\n    )\n```\n\n### 3. Proper Error Messages\nReturn meaningful error messages that help the agent recover:\n\n```python\ndef execute_tool(self, params: Dict) -> str:\n    try:\n        return self._do_execution(params)\n    except ValueError as e:\n        return f\"Invalid input: {str(e)}. Please check your parameters and try again.\"\n    except ConnectionError:\n        return \"Unable to connect to the service. Please check your network connection.\"\n```\n\n## Testing Tools\n\nLangChain provides testing utilities for tool implementations:\n\n```python\nfrom langchain_core.tools import tool, StructuredTool\n\ndef test_tool_invoke():\n    \"\"\"Test tool invocation with valid input.\"\"\"\n    result = my_tool.invoke({\"param1\": \"value1\"})\n    assert result is not None\n\ndef test_tool_validation():\n    \"\"\"Test input validation.\"\"\"\n    from pydantic import ValidationError\n    try:\n        my_tool.invoke({\"invalid_param\": \"value\"})\n        assert False, \"Should have raised ValidationError\"\n    except ValidationError:\n        pass  # Expected behavior\n```\n\n## Package Structure\n\n```\nlangchain/\n├── libs/\n│   ├── core/\n│   │   └── langchain_core/\n│   │       └── tools/\n│   │           ├── __init__.py       # Public API exports\n│   │           ├── base.py           # BaseTool implementation\n│   │           ├── structured.py     # StructuredTool implementation\n│   │           └── render.py         # Tool rendering utilities\n│   └── langchain/\n│       └── langchain_classic/\n│           └── tools/\n│               ├── __init__.py       # Built-in tools\n│               └── base.py           # Toolkit base classes\n```\n\n## See Also\n\n- [Agents Documentation](https://docs.langchain.com/oss/python/langchain/agents)\n- [Chain Reference](https://docs.langchain.com/oss/python/langchain/chains)\n- [Integration Providers](https://docs.langchain.com/oss/python/integrations/providers)\n\n---\n\n<a id='document-loaders'></a>\n\n## Document Loaders and Text Processing\n\n### 相关页面\n\n相关主题：[Vector Stores and Retrievers](#vectorstores-retrievers), [Document Loaders and Text Processing](#document-loaders)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/__init__.py)\n- [libs/core/langchain_core/document_loaders/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/base.py)\n- [libs/langchain/langchain_classic/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/document_loaders/__init__.py)\n- [libs/langchain/langchain_classic/document_loaders/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/document_loaders/base.py)\n- [libs/text-splitters/langchain_text_splitters/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/__init__.py)\n- [libs/text-splitters/langchain_text_splitters/character.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/character.py)\n</details>\n\n# Document Loaders and Text Processing\n\nDocument Loaders and Text Processing are fundamental components in the LangChain ecosystem that enable the ingestion, transformation, and preparation of various document types for downstream AI applications. These components form the entry point for most LLM-powered workflows, converting raw content from diverse sources into structured `Document` objects that can be processed by chains, agents, and retrieval systems.\n\nThe document processing pipeline in LangChain follows a two-stage approach: first, documents are loaded from external sources using specialized loaders; second, the loaded content is optionally split into smaller, semantically coherent chunks using text splitters. This separation of concerns allows for flexible composition and reuse across different application scenarios.\n\n## Architecture Overview\n\nLangChain's document processing architecture is distributed across multiple packages within the monorepo, with core abstractions defined in `langchain-core` and legacy implementations preserved in `langchain-classic`. Text splitting functionality is isolated in a dedicated `langchain-text-splitters` package to maintain separation of concerns and enable independent versioning.\n\nThe architecture employs a base class pattern where `BaseLoader` defines the interface contract that all concrete loader implementations must satisfy. This design enables polymorphic usage across different document sources while maintaining consistent behavior. The `BaseTransformer` abstraction extends this pattern for document transformation, and the `TextSplitter` hierarchy provides pluggable text chunking strategies.\n\n```mermaid\ngraph TD\n    A[External Sources] --> B[Document Loaders]\n    B --> C[Document Objects]\n    C --> D[Text Splitters]\n    D --> E[Chunked Documents]\n    E --> F[Vector Stores]\n    E --> G[LLM Chains]\n    E --> H[Agents]\n    \n    B1[BaseLoader] --> B2[File Loaders]\n    B1 --> B3[Web Loaders]\n    B1 --> B4[Database Loaders]\n    B1 --> B5[Cloud Storage Loaders]\n    \n    D1[TextSplitter] --> D2[CharacterTextSplitter]\n    D1 --> D3[RecursiveCharacterTextSplitter]\n    D1 --> D4[LanguageSpecificSplitter]\n```\n\n## Document Object Model\n\nAt the heart of LangChain's document processing lies the `Document` data class, which provides a standardized representation for loaded content. Each `Document` instance encapsulates the actual text content along with associated metadata that describes the source, provenance, and any auxiliary information relevant to the content.\n\nThe `Document` class is defined in `langchain-core` and serves as the universal currency for document data throughout the LangChain ecosystem. This unified representation enables seamless interoperability between loaders, splitters, and consumers such as vector stores and language models.\n\n### Document Class Structure\n\n| Attribute | Type | Description |\n|-----------|------|-------------|\n| `page_content` | `str` | The actual text content of the document |\n| `metadata` | `dict[str, Any]` | Associated metadata including source, page number, etc. |\n| `type` | `str` | Document type identifier (optional) |\n\nThe metadata dictionary provides flexibility for storing arbitrary key-value pairs that describe the document's origin and characteristics. Common metadata fields include the source file path, URL, creation date, author, and any domain-specific attributes that may be useful for filtering or attribution.\n\n## Base Loader Interface\n\nThe `BaseLoader` abstract class in `langchain_core.document_loaders.base` establishes the foundational interface that all document loaders must implement. This interface ensures consistency across the loader ecosystem while allowing each loader to handle the specifics of its target data source.\n\n```mermaid\ngraph LR\n    A[load] --> B[Document List]\n    A1[aload] --> B\n    C[lazy_load] --> B\n    D[get_lazy_loaders] --> E[Generator]\n    \n    style A fill:#90EE90\n    style A1 fill:#90EE90\n    style C fill:#90EE90\n    style D fill:#90EE90\n```\n\n### Core Loading Methods\n\n| Method | Return Type | Description |\n|--------|-------------|-------------|\n| `load()` | `list[Document]` | Load all documents synchronously |\n| `aload()` | `list[Document]` | Load all documents asynchronously |\n| `lazy_load()` | `Iterator[Document]` | Lazily load documents one at a time |\n| `get_lazy_loaders()` | `Generator[BaseLoader]` | Generate child loaders for partitioning |\n\nThe `lazy_load()` method provides a memory-efficient approach for processing large document collections by yielding documents one at a time rather than loading everything into memory simultaneously. This is particularly valuable when working with large file systems or extensive web crawls.\n\n资料来源：[libs/core/langchain_core/document_loaders/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/base.py)\n\n## Base Transformer Interface\n\nThe `BaseTransformer` abstract class extends the loading framework to support document transformation and enrichment. Transformers operate on already-loaded documents, applying processing operations such as filtering, annotation, or format conversion.\n\nThis abstraction is particularly useful for implementing cross-cutting concerns like content cleaning, metadata extraction, and format standardization that should be applied consistently across multiple loader types.\n\n```mermaid\ngraph TD\n    A[Raw Documents] --> B[Transformer Pipeline]\n    B --> C[Transformed Documents]\n    \n    B1[Metadata Enricher] --> B\n    B2[Content Cleaner] --> B\n    B3[Format Converter] --> B\n```\n\n## Text Splitters\n\nText splitters address the practical requirement of dividing large documents into smaller, manageable chunks that satisfy token limitations and enable efficient retrieval. The `TextSplitter` class hierarchy in `langchain-text-splitters` provides multiple strategies for document segmentation.\n\nThe splitting process operates by first breaking documents into character-level chunks, then optionally applying a secondary splitting step based on the specific text structure (such as markdown headers, code blocks, or natural language sentences). This hierarchical approach ensures that chunks respect semantic boundaries while maintaining the desired size constraints.\n\n### Character Text Splitter\n\nThe `CharacterTextSplitter` provides the foundational character-based splitting implementation. It divides text at character boundaries, with configurable chunk size and overlap parameters to control the granularity and continuity between chunks.\n\n资料来源：[libs/text-splitters/langchain_text_splitters/character.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/character.py)\n\n#### Key Configuration Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `separator` | `str` | `\"\\n\\n\"` | Character sequence used for splitting |\n| `chunk_size` | `int` | `4000` | Maximum characters per chunk |\n| `chunk_overlap` | `int` | `200` | Characters of overlap between chunks |\n| `length_function` | `Callable` | `len` | Function to calculate text length |\n| `is_separator_regex` | `bool` | `False` | Whether separator is a regex pattern |\n\nThe `chunk_overlap` parameter is particularly important for maintaining context continuity across chunk boundaries. By including overlapping content between adjacent chunks, downstream systems can retrieve relevant information that might span a boundary without losing critical context.\n\n```mermaid\ngraph LR\n    A[Full Document] --> B[Chunk 1<br/>chars 0-4000]\n    B --> C[Chunk 2<br/>chars 3800-7800<br/>200 char overlap]\n    C --> D[Chunk 3<br/>chars 7600-11600<br/>200 char overlap]\n    \n    style B fill:#87CEEB\n    style C fill:#87CEEB\n    style D fill:#87CEEB\n```\n\n### Recursive Character Text Splitter\n\nThe `RecursiveCharacterTextSplitter` extends the basic character splitter with a multi-pass approach that attempts to split text at increasingly smaller delimiters if the initial split results in chunks exceeding the target size. This strategy preserves semantic coherence by preferring natural text boundaries such as paragraphs and sentences over arbitrary character cuts.\n\nThe splitter maintains an ordered list of separators that it attempts in sequence: double newlines for paragraphs, single newlines for line breaks, spaces for mid-sentence breaks, and finally individual characters as a last resort. This ordered approach ensures that chunks align with linguistic structures whenever possible.\n\n### Splitting Methods\n\n| Method | Parameters | Return Type | Description |\n|--------|------------|-------------|-------------|\n| `split_documents()` | `documents: list[Document]` | `list[Document]` | Split a list of Document objects |\n| `split_text()` | `text: str` | `list[str]` | Split raw text into chunks |\n| `create_documents()` | `texts: list[str]`, `metadatas?: list[dict]` | `list[Document]` | Create Document objects from texts |\n\n资料来源：[libs/text-splitters/langchain_text_splitters/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/__init__.py)\n\n## Document Loader Categories\n\nThe LangChain ecosystem encompasses a diverse collection of document loaders organized by their target data sources. These loaders are distributed across multiple packages, with community-contributed loaders maintained in the `langchain-community` package and select integrations maintained by the LangChain team in the `partners/` directory.\n\n### File System Loaders\n\nFile system loaders handle documents stored locally or accessible through file protocols. Support extends to common formats including plain text, CSV, JSON, XML, Markdown, and various word processing formats. Binary formats such as PDFs and Office documents require additional dependencies that are installed as optional extras.\n\n| Loader Category | Typical Extensions | Dependencies |\n|-----------------|-------------------|--------------|\n| Plain Text | `.txt`, `.text` | None |\n| Structured Data | `.csv`, `.json`, `.xml` | Built-in libraries |\n| Documents | `.pdf`, `.docx`, `.pptx` | Optional extras |\n| Code | `.py`, `.js`, `.java`, etc. | Language-specific parsers |\n\n### Web Loaders\n\nWeb loaders enable extraction of content from internet and intranet resources. These loaders handle HTTP requests, HTML parsing, and content extraction to produce clean document objects free of navigation elements and advertising content.\n\nThe `WebBaseLoader`, `PyMuPDFLoader`, and `PlaywrightLoader` represent common approaches to web content extraction, each offering different tradeoffs between speed, accuracy, and JavaScript rendering capabilities.\n\n### Database Loaders\n\nDatabase loaders connect to SQL and NoSQL databases to extract structured data and present it as documents. These loaders are essential for enterprise workflows that require accessing content stored in data warehouses, document databases, or traditional relational systems.\n\n### Cloud Storage Loaders\n\nIntegration with major cloud storage providers enables loading documents from Amazon S3, Google Cloud Storage, and Azure Blob Storage. These loaders handle authentication, bucket navigation, and file streaming to support large-scale document ingestion pipelines.\n\n## Integration with Retrieval Systems\n\nDocument loaders and text splitters form the foundation of Retrieval-Augmented Generation (RAG) pipelines. After documents are loaded and chunked, they are typically embedded using embedding models and stored in vector databases for similarity search.\n\n```mermaid\ngraph TD\n    A[Documents] --> B[Loader]\n    B --> C[Document Objects]\n    C --> D[Text Splitter]\n    D --> E[Chunks]\n    E --> F[Embedding Model]\n    F --> G[Vector Store]\n    G --> H[Retrieval]\n    H --> I[LLM Response]\n```\n\nThe chunk size and overlap parameters directly impact retrieval quality. Smaller chunks capture fine-grained concepts but may lack sufficient context, while larger chunks provide more context but may dilute the relevance of retrieved content. Tuning these parameters requires balancing the specific requirements of the target application.\n\n## Legacy and Deprecated Components\n\nThe `langchain-classic` package preserves legacy loader implementations that were present in earlier versions of LangChain. These components are maintained for backward compatibility but may lack features present in the newer `langchain-core` implementations.\n\n资料来源：[libs/langchain/langchain_classic/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/document_loaders/__init__.py)\n\nThe deprecation warnings in `langchain-classic` indicate a gradual migration path toward the consolidated abstractions in `langchain-core`. New projects should prefer the core implementations, while existing projects can continue using classic loaders until suitable migration paths are available.\n\n## Best Practices\n\nWhen working with document loaders and text splitters, several practices help ensure optimal results in production environments. First, always consider the downstream consumer when selecting chunk sizes—smaller chunks work well for question answering, while larger chunks preserve more context for summarization tasks. Second, include comprehensive metadata during loading to enable filtering and attribution in later stages.\n\nFor large-scale document processing, prefer lazy loading strategies to manage memory consumption effectively. When dealing with heterogeneous document collections, consider creating custom transformer pipelines that apply format-specific preprocessing before the generic splitting logic.\n\nThe separator configuration should align with the natural structure of the document format. Markdown documents benefit from separator patterns that respect heading hierarchies, while plain text may work better with paragraph-based or sentence-based splitting strategies.\n\n## Package Organization Summary\n\n| Package | Purpose | Key Classes |\n|---------|---------|-------------|\n| `langchain-core` | Core abstractions | `BaseLoader`, `BaseTransformer`, `Document` |\n| `langchain-classic` | Legacy implementations | `CSVLoader`, `PyPDFLoader`, etc. |\n| `langchain-text-splitters` | Text chunking | `TextSplitter`, `CharacterTextSplitter`, `RecursiveCharacterTextSplitter` |\n\n资料来源：[libs/core/langchain_core/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/__init__.py)\n\nThe separation of `langchain-text-splitters` into its own package reflects the independent applicability of text splitting beyond document loading. This design enables reuse of splitting logic in contexts where documents are generated programmatically or obtained through means other than loaders.\n\n---\n\n<a id='vectorstores-retrievers'></a>\n\n## Vector Stores and Retrievers\n\n### 相关页面\n\n相关主题：[Document Loaders and Text Processing](#document-loaders), [Chat Models and Embeddings](#chat-models-embeddings), [Tools and Toolkits](#tools-integrations)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/vectorstores/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/vectorstores/__init__.py)\n- [libs/core/langchain_core/vectorstores/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/vectorstores/base.py)\n- [libs/langchain/langchain_classic/vectorstores/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/vectorstores/__init__.py)\n- [libs/langchain/langchain_classic/vectorstores/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/vectorstores/base.py)\n- [libs/langchain/langchain_classic/retrievers/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/retrievers/__init__.py)\n- [libs/partners/chroma/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/chroma/README.md)\n- [libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n</details>\n\n# Vector Stores and Retrievers\n\n## Overview\n\nVector Stores and Retrievers are fundamental components in LangChain that enable efficient similarity search and retrieval of documents based on semantic meaning. Vector stores handle the storage and indexing of embedded data, while retrievers provide the interface for querying and fetching relevant information from these stores.\n\nLangChain provides a layered architecture where core abstractions are defined in `langchain_core` and concrete implementations are available through partner packages and `langchain_classic`.\n\n## Architecture\n\nThe Vector Stores and Retrievers system follows a clean separation of concerns with interfaces defined at the core level and implementations distributed across the ecosystem.\n\n```mermaid\ngraph TD\n    A[User Query] --> B[Embedding Model]\n    B --> C[Vector Query]\n    C --> D[Vector Store]\n    D --> E[Retriever]\n    E --> F[Retrieved Documents]\n    \n    G[Documents] --> H[Document Loader]\n    H --> I[Text Splitter]\n    I --> J[Embedding Model]\n    J --> K[Indexed Vectors]\n    K --> D\n    \n    L[langchain_core] --> M[BaseVectorStore Interface]\n    L --> N[BaseRetriever Interface]\n    O[langchain_classic] --> P[Additional VectorStore Implementations]\n    O --> Q[Retrievers]\n    R[Partner Packages] --> S[Chroma, FAISS, Pinecone, etc.]\n```\n\n## Vector Stores\n\n### Core Interface\n\nThe base vector store interface in `langchain_core` provides the foundational methods that all vector store implementations must support. The interface is designed to be provider-agnostic while enabling specific features through optional methods.\n\n#### Key Methods\n\n| Method | Purpose | Parameters | Return Type |\n|--------|---------|------------|-------------|\n| `add_documents` | Add documents to the store | `documents: List[Document]`, `ids: Optional[List[str]]` | `List[str]` |\n| `add_texts` | Add raw texts to the store | `texts: List[str]`, `metadatas: Optional[List[dict]]`, `ids: Optional[List[str]]` | `List[str]` |\n| `similarity_search` | Find similar documents | `query: str`, `k: int`, `filter: Optional[dict]` | `List[Document]` |\n| `similarity_search_by_vector` | Search by embedding | `embedding: List[float]`, `k: int`, `filter: Optional[dict]` | `List[Document]` |\n| `similarity_search_with_score` | Search with relevance scores | `query: str`, `k: int`, `filter: Optional[dict]` | `List[Tuple[Document, float]]` |\n| `delete` | Remove documents by ID | `ids: Optional[List[str]]` | `None` |\n| `get_by_prefix` | Retrieve by ID prefix | `prefix: str` | `List[Document]` |\n\n### Embeddings Integration\n\nVector stores work in conjunction with embedding models to convert text into vector representations. The HuggingFace integration provides `HuggingFaceEmbeddings` for generating embeddings locally:\n\n```python\n# From libs/partners/huggingface/README.md\nfrom langchain_huggingface import HuggingFaceEmbeddings\n\n# Requires sentence-transformers>=5.2.0\nembeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n```\n\nThe embedding models are decoupled from vector stores, allowing flexibility in choosing both components independently.\n\n### Deletion and Prefix Operations\n\nVector stores support flexible document management through deletion and prefix-based retrieval operations:\n\n| Operation | Description |\n|-----------|-------------|\n| `delete(ids)` | Removes specific documents by their IDs |\n| `get_by_prefix(prefix)` | Retrieves all documents whose IDs start with the given prefix |\n\nThese operations enable efficient document lifecycle management within the vector store.\n\n## Retrievers\n\nRetrievers provide a standardized interface for fetching relevant documents from various sources. They abstract away the underlying storage mechanism and provide additional capabilities like filtering and reranking.\n\n### Base Retriever Interface\n\nThe retriever interface in `langchain_classic` defines the contract for all retrieval implementations:\n\n```mermaid\ngraph LR\n    A[Query] --> B[Retriever]\n    B --> C{Filtering}\n    C -->|Apply| D[Metadata Filter]\n    C -->|Apply| E[Time-based Filter]\n    B --> F[Document Pool]\n    F --> G[Top-K Results]\n```\n\n### Retriever Types\n\nLangChain provides several specialized retrievers:\n\n| Retriever Type | Purpose | Use Case |\n|----------------|---------|----------|\n| `VectorStoreRetriever` | Standard similarity search | General-purpose retrieval |\n| `MultiVectorRetriever` | Multiple embedding per document | Tables, images, sub-documents |\n| `MultiQueryRetriever` | Generate multiple query variations | Improve recall |\n| `ContextualCompressionRetriever` | Compress retrieved context | Reduce token usage |\n| `TimeWeightedVectorStoreRetriever` | Prioritize recent documents | Recency-aware search |\n\n### Configuration Options\n\nRetrievers support various configuration parameters:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `search_type` | `str` | `\"similarity\"` | Search algorithm: `similarity`, `mmr` |\n| `k` | `int` | `4` | Number of documents to retrieve |\n| `fetch_k` | `int` | `20` | Initial pool size for MMR |\n| `lambda_mult` | `float` | `0.5` | MMR diversity parameter |\n| `filter` | `dict` | `None` | Metadata filter conditions |\n\n## Partner Integrations\n\n### Chroma Vector Store\n\nThe `langchain-chroma` package provides integration with Chroma, an open-source embedding database:\n\n```bash\npip install langchain-chroma\n```\n\nChroma is designed specifically for AI applications and provides efficient storage and retrieval of embeddings with metadata filtering capabilities.\n\n### HuggingFace Embeddings\n\nThe `langchain-huggingface` package offers multiple embedding options:\n\n- `HuggingFaceEmbeddings` - For local inference using sentence-transformers\n- `HuggingFacePipeline` - For local LLM inference\n\nFor full functionality including embeddings:\n\n```bash\npip install langchain-huggingface[full]\n```\n\n> **Note:** The `[full]` extra requires `sentence-transformers>=5.2.0` and `transformers>=5.0.0`. Projects migrating from `langchain-community` may need to upgrade `sentence-transformers` to `>=5.2.0`.\n\n## Package Structure\n\n### Core Layer (`langchain_core`)\n\nThe `langchain_core.vectorstores` module provides base abstractions that define the contract for all vector store implementations. This layer is dependency-light and focuses on interface definitions.\n\n### Classic Layer (`langchain_classic`)\n\nThe `langchain_classic.vectorstores` module extends the core abstractions with additional functionality:\n\n- Additional vector store implementations\n- Utility functions for common operations\n- Integration with other LangChain components\n\n### Partner Packages\n\nPartner packages provide maintained integrations with specific vector store providers:\n\n```\nlibs/partners/\n├── chroma/       # Chroma vector store\n├── weaviate/     # Weaviate vector database\n├── pinecone/     # Pinecone managed vector database\n├── qdrant/       # Qdrant vector search engine\n└── ...           # Additional providers\n```\n\n## Workflow: Document Indexing and Retrieval\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Loader\n    participant Splitter\n    participant Embedder\n    participant VectorStore\n    participant Retriever\n    \n    User->>Loader: Load documents\n    Loader-->>User: Raw documents\n    User->>Splitter: Split documents\n    Splitter-->>User: Chunked documents\n    User->>Embedder: Generate embeddings\n    Embedder-->>User: Vector embeddings\n    User->>VectorStore: Add documents + embeddings\n    VectorStore-->>User: Document IDs\n    \n    User->>Retriever: Query\n    Retriever->>Embedder: Embed query\n    Embedder-->>Retriever: Query vector\n    Retriever->>VectorStore: Similarity search\n    VectorStore-->>Retriever: Top-K documents\n    Retriever-->>User: Retrieved results\n```\n\n## Best Practices\n\n### Indexing Recommendations\n\n1. **Chunk Size Selection**: Choose appropriate chunk sizes based on your use case—smaller chunks for precise retrieval, larger chunks for more context\n2. **Metadata Enrichment**: Include relevant metadata during indexing to enable filtered retrieval\n3. **Embedding Selection**: Match embedding models to your data type and language requirements\n\n### Retrieval Optimization\n\n1. **Use MMR for Diversity**: When using MMR (Maximal Marginal Relevance), tune `lambda_mult` to balance relevance and diversity\n2. **Implement Filtering**: Use metadata filtering to narrow results to relevant subsets\n3. **Consider Compression**: For long contexts, use contextual compression to reduce token usage\n\n## Security Considerations\n\nWhen implementing vector stores and retrievers in production:\n\n- **Data Isolation**: Ensure vector store data is properly isolated based on access requirements\n- **Input Sanitization**: Validate and sanitize query inputs to prevent injection attacks\n- **Access Control**: Implement appropriate access controls for sensitive document retrieval\n\n## See Also\n\n- [LangChain Core Documentation](https://reference.langchain.com/python/langchain_core)\n- [API Reference](https://reference.langchain.com/python/integrations/)\n- [LangChain Docs](https://docs.langchain.com/oss/python/integrations/providers)\n- [Partner Integrations](https://docs.langchain.com/oss/python/integrations/providers)\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：langchain-ai/langchain\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:552661142 | https://github.com/langchain-ai/langchain | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | release_recency=unknown\n\n<!-- canonical_name: langchain-ai/langchain; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "langchain",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:552661142",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/langchain-ai/langchain"
        },
        {
          "evidence_id": "art_20f7ab635e4c4eab85f60b7ef2f73914",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/langchain-ai/langchain#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "langchain 说明书",
      "toc": [
        "https://github.com/langchain-ai/langchain 项目说明书",
        "目录",
        "Introduction to LangChain",
        "Overview",
        "Architecture Overview",
        "Core Packages",
        "Partner Integrations",
        "Quick Start",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "b6b769baf66b4f500d8a9544c7f2dbcc62429028",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "README.md"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# langchain - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 langchain 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **想在安装前理解开源项目价值和边界的用户**：当前证据主要来自项目文档。 证据：`README.md` Claim：`clm_0002` supported 0.86\n\n## 它能做什么\n\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n## 怎么开始\n\n- `pip install langchain` 证据：`README.md` Claim：`clm_0003` supported 0.86\n\n## 继续前判断卡\n\n- **当前建议**：先做角色匹配试用\n- **为什么**：这个项目更像角色库，核心风险是选错角色或把角色文案当执行能力；先用 Prompt Preview 试角色匹配，再决定是否沙盒导入。\n\n### 30 秒判断\n\n- **现在怎么做**：先做角色匹配试用\n- **最小安全下一步**：先用 Prompt Preview 试角色匹配；满意后再隔离导入\n- **先别相信**：角色质量和任务匹配不能直接相信。\n- **继续会触碰**：角色选择偏差、命令执行、宿主 AI 配置\n\n### 现在可以相信\n\n- **适合人群线索：想在安装前理解开源项目价值和边界的用户**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0002` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n- **存在 Quick Start / 安装命令线索**（supported）：可以相信项目文档出现过启动或安装入口；不要因此直接在主力环境运行。 证据：`README.md` Claim：`clm_0003` supported 0.86\n\n### 现在还不能相信\n\n- **角色质量和任务匹配不能直接相信。**（unverified）：角色库证明有很多角色，不证明每个角色都适合你的具体任务，也不证明角色能产生高质量结果。\n- **不能把角色文案当成真实执行能力。**（unverified）：安装前只能判断角色描述和任务画像是否匹配，不能证明它能在宿主 AI 里完成任务。\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。 证据：`AGENTS.md`, `CLAUDE.md`\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n\n### 继续会触碰什么\n\n- **角色选择偏差**：用户对任务应该由哪个专家角色处理的判断。 原因：选错角色会让 AI 从错误专业视角回答，浪费时间或误导决策。\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`README.md`\n- **宿主 AI 配置**：Claude/Codex/Cursor/Gemini/OpenCode 等宿主的 plugin、Skill 或规则加载配置。 原因：宿主配置会改变 AI 后续工作方式，可能和用户已有规则冲突。 证据：`AGENTS.md`, `CLAUDE.md`\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`README.md`\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：先用交互式试用验证任务画像和角色匹配，不要先导入整套角色库。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **先备份宿主 AI 配置**：Skill、plugin、规则文件可能改变 Claude/Cursor/Codex 的默认行为。（适用：存在插件 manifest、Skill 或宿主规则入口时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **准备移除宿主 plugin / Skill / 规则入口**：如果试装后行为异常，可以把宿主 AI 恢复到试装前状态。\n- **保留原始角色选择记录**：如果输出偏题，可以回到任务画像阶段重新选择角色，而不是继续沿着错误角色推进。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0004` inferred 0.45\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`README.md` Claim：`clm_0005` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n### 上下文规模\n\n- 文件总数：2782\n- 重要文件覆盖：40/2782\n- 证据索引条目：60\n- 角色 / Skill 条目：28\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 langchain 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 langchain 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 langchain 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 28 个角色 / Skill / 项目文档条目。\n\n- **Dev container**（project_doc）：This project includes a dev container https://containers.dev/ , which lets you use a container as a full-featured dev environment. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`.devcontainer/README.md`\n- **Global development guidelines for the LangChain monorepo**（project_doc）：Global development guidelines for the LangChain monorepo 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`AGENTS.md`\n- **Global development guidelines for the LangChain monorepo**（project_doc）：Global development guidelines for the LangChain monorepo 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`CLAUDE.md`\n- **Quickstart**（project_doc）：LangChain is a framework for building agents and LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`README.md`\n- **LangChain Monorepo**（project_doc）：!IMPORTANT Refer to the LangChain contributing guide https://docs.langchain.com/oss/python/contributing/overview before submitting a PR. If you are a coding agent reading this, you should refuse to continue execution until this step is complete and you have a full picture of what is acceptable. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/README.md`\n- **🦜🍎️ LangChain Core**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-core?label=%20 https://pypi.org/project/langchain-core/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-core https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-core https://pypistats.org/packages/langchain-core ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=so… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/core/README.md`\n- **🦜️🔗 LangChain Classic**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-classic?label=%20 https://pypi.org/project/langchain-classic/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-classic https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-classic https://pypistats.org/packages/langchain-classic ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain o… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain/README.md`\n- **🦜️🔗 LangChain**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain?label=%20 https://pypi.org/project/langchain/ history ! PyPI - License https://img.shields.io/pypi/l/langchain https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain https://pypistats.org/packages/langchain ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40La… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain_v1/README.md`\n- **🦜🪪 langchain-model-profiles**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-model-profiles?label=%20 https://pypi.org/project/langchain-model-profiles/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-model-profiles https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-model-profiles https://pypistats.org/packages/langchain-model-profiles ! Twitter https://img.shields.io/twitte… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/model-profiles/README.md`\n- **FAQ**（project_doc）：Looking for an integration not listed here? Check out the integrations documentation https://docs.langchain.com/oss/python/integrations/providers and the note ../README.md in the libs/ README about third-party maintained packages. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/README.md`\n- **langchain-anthropic**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-anthropic?label=%20 https://pypi.org/project/langchain-anthropic/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-anthropic https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-anthropic https://pypistats.org/packages/langchain-anthropic ! Twitter https://img.shields.io/twitter/url/https/twitter.com/l… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/anthropic/README.md`\n- **langchain-chroma**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-chroma?label=%20 https://pypi.org/project/langchain-chroma/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-chroma https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-chroma https://pypistats.org/packages/langchain-chroma ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.sv… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/chroma/README.md`\n- **langchain-deepseek**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-deepseek?label=%20 https://pypi.org/project/langchain-deepseek/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-deepseek https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-deepseek https://pypistats.org/packages/langchain-deepseek ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langch… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/deepseek/README.md`\n- **langchain-exa**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-exa?label=%20 https://pypi.org/project/langchain-exa/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-exa https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-exa https://pypistats.org/packages/langchain-exa ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/exa/README.md`\n- **langchain-fireworks**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-fireworks?label=%20 https://pypi.org/project/langchain-fireworks/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-fireworks https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-fireworks https://pypistats.org/packages/langchain-fireworks ! Twitter https://img.shields.io/twitter/url/https/twitter.com/l… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/fireworks/README.md`\n- **langchain-groq**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-groq?label=%20 https://pypi.org/project/langchain-groq/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-groq https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-groq https://pypistats.org/packages/langchain-groq ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=so… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/groq/README.md`\n- **langchain-huggingface**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-huggingface?label=%20 https://pypi.org/project/langchain-huggingface/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-huggingface https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-huggingface https://pypistats.org/packages/langchain-huggingface ! Twitter https://img.shields.io/twitter/url/https/twi… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/huggingface/README.md`\n- **langchain-mistralai**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-mistralai?label=%20 https://pypi.org/project/langchain-mistralai/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-mistralai https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-mistralai https://pypistats.org/packages/langchain-mistralai ! Twitter https://img.shields.io/twitter/url/https/twitter.com/l… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/mistralai/README.md`\n- **langchain-nomic**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-nomic?label=%20 https://pypi.org/project/langchain-nomic/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-nomic https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-nomic https://pypistats.org/packages/langchain-nomic ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?sty… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/nomic/README.md`\n- **langchain-ollama**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-ollama?label=%20 https://pypi.org/project/langchain-ollama/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-ollama https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-ollama https://pypistats.org/packages/langchain-ollama ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.sv… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/ollama/README.md`\n- **langchain-openai**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-openai?label=%20 https://pypi.org/project/langchain-openai/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-openai https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-openai https://pypistats.org/packages/langchain-openai ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.sv… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/openai/README.md`\n- **langchain-openrouter**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-openrouter?label=%20 https://pypi.org/project/langchain-openrouter/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-openrouter https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-openrouter https://pypistats.org/packages/langchain-openrouter ! Twitter https://img.shields.io/twitter/url/https/twitter.… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/openrouter/README.md`\n- **langchain-perplexity**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-perplexity?label=%20 https://pypi.org/project/langchain-perplexity/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-perplexity https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-perplexity https://pypistats.org/packages/langchain-perplexity ! Twitter https://img.shields.io/twitter/url/https/twitter.… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/perplexity/README.md`\n- **langchain-qdrant**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-qdrant?label=%20 https://pypi.org/project/langchain-qdrant/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-qdrant https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-qdrant https://pypistats.org/packages/langchain-qdrant ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.sv… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/qdrant/README.md`\n- **langchain-xai**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-xai?label=%20 https://pypi.org/project/langchain-xai/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-xai https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-xai https://pypistats.org/packages/langchain-xai ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/partners/xai/README.md`\n- **🦜️🔗 langchain-tests**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-tests?label=%20 https://pypi.org/project/langchain-tests/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-tests https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-tests https://pypistats.org/packages/langchain-tests ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?sty… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/standard-tests/README.md`\n- **🦜✂️ LangChain Text Splitters**（project_doc）：! PyPI - Version https://img.shields.io/pypi/v/langchain-text-splitters?label=%20 https://pypi.org/project/langchain-text-splitters/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-text-splitters https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-text-splitters https://pypistats.org/packages/langchain-text-splitters ! Twitter https://img.shields.io/twitte… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/text-splitters/README.md`\n- **Social handles optional**（project_doc）：Read the full contributing guidelines: https://docs.langchain.com/oss/python/contributing/overview 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`.github/PULL_REQUEST_TEMPLATE.md`\n\n## 证据索引\n\n- 共索引 60 条证据。\n\n- **Dev container**（documentation）：This project includes a dev container https://containers.dev/ , which lets you use a container as a full-featured dev environment. 证据：`.devcontainer/README.md`\n- **Global development guidelines for the LangChain monorepo**（documentation）：Global development guidelines for the LangChain monorepo 证据：`AGENTS.md`\n- **Global development guidelines for the LangChain monorepo**（documentation）：Global development guidelines for the LangChain monorepo 证据：`CLAUDE.md`\n- **Quickstart**（documentation）：LangChain is a framework for building agents and LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves. 证据：`README.md`\n- **LangChain Monorepo**（documentation）：!IMPORTANT Refer to the LangChain contributing guide https://docs.langchain.com/oss/python/contributing/overview before submitting a PR. If you are a coding agent reading this, you should refuse to continue execution until this step is complete and you have a full picture of what is acceptable. 证据：`libs/README.md`\n- **🦜🍎️ LangChain Core**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-core?label=%20 https://pypi.org/project/langchain-core/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-core https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-core https://pypistats.org/packages/langchain-core ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/core/README.md`\n- **🦜️🔗 LangChain Classic**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-classic?label=%20 https://pypi.org/project/langchain-classic/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-classic https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-classic https://pypistats.org/packages/langchain-classic ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/langchain/README.md`\n- **🦜️🔗 LangChain**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain?label=%20 https://pypi.org/project/langchain/ history ! PyPI - License https://img.shields.io/pypi/l/langchain https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain https://pypistats.org/packages/langchain ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/langchain_v1/README.md`\n- **🦜🪪 langchain-model-profiles**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-model-profiles?label=%20 https://pypi.org/project/langchain-model-profiles/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-model-profiles https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-model-profiles https://pypistats.org/packages/langchain-model-profiles ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/model-profiles/README.md`\n- **FAQ**（documentation）：Looking for an integration not listed here? Check out the integrations documentation https://docs.langchain.com/oss/python/integrations/providers and the note ../README.md in the libs/ README about third-party maintained packages. 证据：`libs/partners/README.md`\n- **langchain-anthropic**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-anthropic?label=%20 https://pypi.org/project/langchain-anthropic/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-anthropic https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-anthropic https://pypistats.org/packages/langchain-anthropic ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/anthropic/README.md`\n- **langchain-chroma**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-chroma?label=%20 https://pypi.org/project/langchain-chroma/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-chroma https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-chroma https://pypistats.org/packages/langchain-chroma ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/chroma/README.md`\n- **langchain-deepseek**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-deepseek?label=%20 https://pypi.org/project/langchain-deepseek/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-deepseek https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-deepseek https://pypistats.org/packages/langchain-deepseek ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/deepseek/README.md`\n- **langchain-exa**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-exa?label=%20 https://pypi.org/project/langchain-exa/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-exa https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-exa https://pypistats.org/packages/langchain-exa ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/exa/README.md`\n- **langchain-fireworks**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-fireworks?label=%20 https://pypi.org/project/langchain-fireworks/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-fireworks https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-fireworks https://pypistats.org/packages/langchain-fireworks ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/fireworks/README.md`\n- **langchain-groq**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-groq?label=%20 https://pypi.org/project/langchain-groq/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-groq https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-groq https://pypistats.org/packages/langchain-groq ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/groq/README.md`\n- **langchain-huggingface**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-huggingface?label=%20 https://pypi.org/project/langchain-huggingface/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-huggingface https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-huggingface https://pypistats.org/packages/langchain-huggingface ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/huggingface/README.md`\n- **langchain-mistralai**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-mistralai?label=%20 https://pypi.org/project/langchain-mistralai/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-mistralai https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-mistralai https://pypistats.org/packages/langchain-mistralai ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/mistralai/README.md`\n- **langchain-nomic**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-nomic?label=%20 https://pypi.org/project/langchain-nomic/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-nomic https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-nomic https://pypistats.org/packages/langchain-nomic ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/nomic/README.md`\n- **langchain-ollama**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-ollama?label=%20 https://pypi.org/project/langchain-ollama/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-ollama https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-ollama https://pypistats.org/packages/langchain-ollama ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/ollama/README.md`\n- **langchain-openai**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-openai?label=%20 https://pypi.org/project/langchain-openai/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-openai https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-openai https://pypistats.org/packages/langchain-openai ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/openai/README.md`\n- **langchain-openrouter**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-openrouter?label=%20 https://pypi.org/project/langchain-openrouter/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-openrouter https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-openrouter https://pypistats.org/packages/langchain-openrouter ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/openrouter/README.md`\n- **langchain-perplexity**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-perplexity?label=%20 https://pypi.org/project/langchain-perplexity/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-perplexity https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-perplexity https://pypistats.org/packages/langchain-perplexity ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/perplexity/README.md`\n- **langchain-qdrant**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-qdrant?label=%20 https://pypi.org/project/langchain-qdrant/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-qdrant https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-qdrant https://pypistats.org/packages/langchain-qdrant ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/qdrant/README.md`\n- **langchain-xai**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-xai?label=%20 https://pypi.org/project/langchain-xai/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-xai https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-xai https://pypistats.org/packages/langchain-xai ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/partners/xai/README.md`\n- **🦜️🔗 langchain-tests**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-tests?label=%20 https://pypi.org/project/langchain-tests/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-tests https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-tests https://pypistats.org/packages/langchain-tests ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/standard-tests/README.md`\n- **🦜✂️ LangChain Text Splitters**（documentation）：! PyPI - Version https://img.shields.io/pypi/v/langchain-text-splitters?label=%20 https://pypi.org/project/langchain-text-splitters/ history ! PyPI - License https://img.shields.io/pypi/l/langchain-text-splitters https://opensource.org/licenses/MIT ! PyPI - Downloads https://img.shields.io/pepy/dt/langchain-text-splitters https://pypistats.org/packages/langchain-text-splitters ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain oss.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain oss 证据：`libs/text-splitters/README.md`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/langchain/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/langchain_v1/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/anthropic/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/chroma/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/deepseek/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/exa/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/fireworks/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/groq/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/huggingface/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/mistralai/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/nomic/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/ollama/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/openai/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/openrouter/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/perplexity/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/qdrant/LICENSE`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`libs/partners/xai/LICENSE`\n- **Social handles optional**（documentation）：Read the full contributing guidelines: https://docs.langchain.com/oss/python/contributing/overview 证据：`.github/PULL_REQUEST_TEMPLATE.md`\n- **Devcontainer**（structured_config）：// For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose { // Name for the dev container \"name\": \"langchain\", // Point to a Docker Compose file \"dockerComposeFile\": \"./docker-compose.yaml\", // Required when using Docker Compose. The name of the service to connect to once running \"service\": \"langchain\", // The optional 'workspaceFolder' property is the path VS Code should open by default when // connected. This is typically a file mount in .devcontainer/docker-compose.yml \"workspaceFolder\": \"/workspaces/langchain\", \"mounts\": \"source=langchain-workspaces,target… 证据：`.devcontainer/devcontainer.json`\n- **.Markdownlint**（structured_config）：{ \"MD013\": false, \"MD024\": { \"siblings only\": true }, \"MD025\": false, \"MD033\": false, \"MD034\": false, \"MD036\": false, \"MD041\": false, \"MD046\": { \"style\": \"fenced\" } } 证据：`.markdownlint.json`\n- **.Mcp**（structured_config）：{ \"mcpServers\": { \"docs-langchain\": { \"type\": \"http\", \"url\": \"https://docs.langchain.com/mcp\" }, \"reference-langchain\": { \"type\": \"http\", \"url\": \"https://reference.langchain.com/mcp\" } } } 证据：`.mcp.json`\n- **Pr Labeler Config**（structured_config）：{ \"trustedThreshold\": 5, \"labelColor\": \"b76e79\", \"sizeThresholds\": { \"label\": \"size: XS\", \"max\": 50 }, { \"label\": \"size: S\", \"max\": 200 }, { \"label\": \"size: M\", \"max\": 500 }, { \"label\": \"size: L\", \"max\": 1000 }, { \"label\": \"size: XL\" } , \"excludedFiles\": \"uv.lock\" , \"excludedPaths\": \"docs/\" , \"typeToLabel\": { \"feat\": \"feature\", \"fix\": \"fix\", \"docs\": \"documentation\", \"style\": \"linting\", \"refactor\": \"refactor\", \"perf\": \"performance\", \"test\": \"tests\", \"build\": \"infra\", \"ci\": \"infra\", \"chore\": \"infra\", \"revert\": \"revert\", \"release\": \"release\", \"hotfix\": \"hotfix\", \"breaking\": \"breaking\" }, \"scopeToLabel\": { \"core\": \"core\", \"langchain\": \"langchain\", \"langchain-classic\": \"langchain-classic\", \"mode… 证据：`.github/scripts/pr-labeler-config.json`\n- **Docker Compose**（source_file）：version: '3' services: langchain: build: dockerfile: libs/langchain/dev.Dockerfile context: .. 证据：`.devcontainer/docker-compose.yaml`\n- **Git**（source_file）：Python pycache .pyc .pyo .venv .mypy cache .pytest cache .ruff cache .egg-info .tox 证据：`.dockerignore`\n- **top-most EditorConfig file**（source_file）：top-most EditorConfig file root = true 证据：`.editorconfig`\n- **.gitattributes**（source_file）：text=auto eol=lf .{cmd, cC mM dD } text eol=crlf .{bat, bB aA tT } text eol=crlf 证据：`.gitattributes`\n- **Codeowners**（source_file）：/.github/ @ccurme @eyurtsev @mdrxy /libs/core/ @eyurtsev /libs/partners/ @ccurme @mdrxy 证据：`.github/CODEOWNERS`\n- **Please see the documentation for all configuration options:**（source_file）：Please see the documentation for all configuration options: https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates and https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 证据：`.github/dependabot.yml`\n- **Emacs backup**（source_file）：.vs/ .claude/ .idea/ Emacs backup ~ Byte-compiled / optimized / DLL files pycache / .py cod $py.class 证据：`.gitignore`\n- **Text normalization hooks for consistent formatting**（source_file）：repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: - id: no-commit-to-branch prevent direct commits to protected branches args: \"--branch\", \"master\" - id: check-yaml validate YAML syntax args: \"--unsafe\" allow custom tags - id: check-toml validate TOML syntax - id: end-of-file-fixer ensure files end with a newline - id: trailing-whitespace remove trailing whitespace from lines exclude: \\.ambr$ 证据：`.pre-commit-config.yaml`\n- **Citation**（source_file）：cff-version: 1.2.0 message: \"If you use this software, please cite it as below.\" authors: - family-names: \"Chase\" given-names: \"Harrison\" title: \"LangChain\" date-released: 2022-10-17 url: \"https://github.com/langchain-ai/langchain\" 证据：`CITATION.cff`\n- **Makefile for libs/ directory**（source_file）：Makefile for libs/ directory Contains targets that operate across multiple packages 证据：`libs/Makefile`\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`.devcontainer/README.md`, `AGENTS.md`, `CLAUDE.md`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`.devcontainer/README.md`, `AGENTS.md`, `CLAUDE.md`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **Introduction to LangChain**：importance `high`\n  - source_paths: README.md, libs/core/langchain_core/__init__.py, libs/langchain/langchain_classic/__init__.py\n- **Getting Started with LangChain**：importance `high`\n  - source_paths: libs/core/langchain_core/language_models/chat_models.py, libs/core/langchain_core/runnables/base.py\n- **Runnable and Execution Model**：importance `high`\n  - source_paths: libs/core/langchain_core/runnables/base.py, libs/core/langchain_core/runnables/config.py, libs/core/langchain_core/runnables/retry.py, libs/core/langchain_core/runnables/branch.py, libs/core/langchain_core/runnables/graph.py\n- **Messages and Prompt System**：importance `high`\n  - source_paths: libs/core/langchain_core/messages/__init__.py, libs/core/langchain_core/messages/chat.py, libs/core/langchain_core/messages/content.py, libs/core/langchain_core/messages/block_translators/__init__.py, libs/core/langchain_core/prompts/__init__.py\n- **Callbacks and Tracing Infrastructure**：importance `medium`\n  - source_paths: libs/core/langchain_core/callbacks/__init__.py, libs/core/langchain_core/callbacks/base.py, libs/core/langchain_core/callbacks/manager.py, libs/core/langchain_core/tracers/__init__.py, libs/core/langchain_core/tracers/base.py\n- **Chat Models and Embeddings**：importance `high`\n  - source_paths: libs/core/langchain_core/language_models/chat_models.py, libs/core/langchain_core/language_models/llms.py, libs/core/langchain_core/embeddings/__init__.py, libs/core/langchain_core/embeddings/embeddings.py, libs/core/langchain_core/language_models/model_profile.py\n- **Agents Framework**：importance `high`\n  - source_paths: libs/langchain/langchain_classic/agents/__init__.py, libs/langchain/langchain_classic/agents/agent.py, libs/langchain/langchain_classic/agents/react/__init__.py, libs/langchain/langchain_classic/agents/structured_chat/__init__.py, libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py\n- **Tools and Toolkits**：importance `high`\n  - source_paths: libs/core/langchain_core/tools/__init__.py, libs/core/langchain_core/tools/base.py, libs/core/langchain_core/tools/structured.py, libs/core/langchain_core/tools/render.py, libs/langchain/langchain_classic/tools/__init__.py\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `b6b769baf66b4f500d8a9544c7f2dbcc62429028`\n- inspected_files: `README.md`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 能力判断依赖假设\n\n- Trigger: README/documentation is current enough for a first validation pass.\n- Host AI rule: 将假设转成下游验证清单。\n- Why it matters: 假设不成立时，用户拿不到承诺的能力。\n- Evidence: capability.assumptions | github_repo:552661142 | https://github.com/langchain-ai/langchain | README/documentation is current enough for a first validation pass.\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 维护活跃度未知\n\n- Trigger: 未记录 last_activity_observed。\n- Host AI rule: 补 GitHub 最近 commit、release、issue/PR 响应信号。\n- Why it matters: 新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- Evidence: evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | last_activity_observed missing\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 下游验证发现风险项\n\n- Trigger: no_demo\n- Host AI rule: 进入安全/权限治理复核队列。\n- Why it matters: 下游已经要求复核，不能在页面中弱化。\n- Evidence: downstream_validation.risk_items | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 存在评分风险\n\n- Trigger: no_demo\n- Host AI rule: 把风险写入边界卡，并确认是否需要人工复核。\n- Why it matters: 风险会影响是否适合普通用户安装。\n- Evidence: risks.scoring_risks | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: issue/PR 响应质量未知\n\n- Trigger: issue_or_pr_quality=unknown。\n- Host AI rule: 抽样最近 issue/PR，判断是否长期无人处理。\n- Why it matters: 用户无法判断遇到问题后是否有人维护。\n- Evidence: evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | issue_or_pr_quality=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 发布节奏不明确\n\n- Trigger: release_recency=unknown。\n- Host AI rule: 确认最近 release/tag 和 README 安装命令是否一致。\n- Why it matters: 安装命令和文档可能落后于代码，用户踩坑概率升高。\n- Evidence: evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | release_recency=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：langchain-ai/langchain\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：chatgpt\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 能力判断依赖假设（medium）：假设不成立时，用户拿不到承诺的能力。 建议检查：将假设转成下游验证清单。\n- 维护活跃度未知（medium）：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 下游验证发现风险项（medium）：下游已经要求复核，不能在页面中弱化。 建议检查：进入安全/权限治理复核队列。\n- 存在评分风险（medium）：风险会影响是否适合普通用户安装。 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- issue/PR 响应质量未知（low）：用户无法判断遇到问题后是否有人维护。 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/langchain-ai/langchain 项目说明书\n\n生成时间：2026-05-15 21:57:00 UTC\n\n## 目录\n\n- [Introduction to LangChain](#introduction)\n- [Getting Started with LangChain](#getting-started)\n- [Runnable and Execution Model](#runnable-execution)\n- [Messages and Prompt System](#messages-system)\n- [Callbacks and Tracing Infrastructure](#callbacks-tracing)\n- [Chat Models and Embeddings](#chat-models-embeddings)\n- [Agents Framework](#agents-framework)\n- [Tools and Toolkits](#tools-integrations)\n- [Document Loaders and Text Processing](#document-loaders)\n- [Vector Stores and Retrievers](#vectorstores-retrievers)\n\n<a id='introduction'></a>\n\n## Introduction to LangChain\n\n### 相关页面\n\n相关主题：[Runnable and Execution Model](#runnable-execution), [Getting Started with LangChain](#getting-started)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n- [libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n- [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md)\n- [libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n- [libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n- [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n- [libs/partners/chroma/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/chroma/README.md)\n</details>\n\n# Introduction to LangChain\n\n## Overview\n\nLangChain is a comprehensive framework designed for building agents and LLM-powered applications. It enables developers to chain together interoperable components and third-party integrations to simplify AI application development, while maintaining flexibility as underlying technologies evolve.\n\nLangChain follows a modular architecture where core functionality is separated into specialized packages. The main `langchain` package serves as the primary interface for most use cases, with partner packages providing dedicated integrations for specific providers.\n\n## Architecture Overview\n\nLangChain's architecture consists of several key layers working together to provide a seamless development experience:\n\n```mermaid\ngraph TD\n    A[Application Layer] --> B[langchain Package]\n    B --> C[langchain-classic]\n    B --> D[langchain-core]\n    C --> E[Chains & Components]\n    C --> F[Community Re-exports]\n    D --> G[Core Abstractions]\n    H[Partner Packages] --> B\n    H --> I[OpenAI, Anthropic, HuggingFace, etc.]\n```\n\n## Core Packages\n\n### langchain-core\n\nThe foundational package containing core abstractions and interfaces that all other LangChain packages depend on. This includes base classes for language models, prompts, callbacks, and other fundamental components.\n\n### langchain-classic\n\nThe classic LangChain package containing chains, `langchain-community` re-exports, indexing API, and deprecated functionality. In most cases, developers should use the main `langchain` package instead.\n\n## Partner Integrations\n\nLangChain provides dedicated partner packages for various LLM providers and services. Each package is independently maintained and optimized for its specific provider.\n\n### LLM Provider Packages\n\n| Package | Provider | Purpose | Install Command |\n|---------|----------|---------|-----------------|\n| `langchain-openai` | OpenAI | OpenAI SDK integration | `pip install langchain-openai` |\n| `langchain-anthropic` | Anthropic | Claude models integration | `pip install langchain-anthropic` |\n| `langchain-huggingface` | Hugging Face | Transformers & embeddings | `pip install langchain-huggingface` |\n| `langchain-ollama` | Ollama | Local model inference | `pip install langchain-ollama` |\n| `langchain-deepseek` | DeepSeek | DeepSeek API integration | `pip install langchain-deepseek` |\n| `langchain-groq` | Groq | Groq API integration | `pip install langchain-groq` |\n| `langchain-fireworks` | Fireworks.ai | Fireworks API integration | `pip install langchain-fireworks` |\n| `langchain-perplexity` | Perplexity | Perplexity API integration | `pip install langchain-perplexity` |\n| `langchain-xai` | xAI | xAI API integration | `pip install langchain-xai` |\n| `langchain-openrouter` | OpenRouter | Unified multi-provider API | `pip install langchain-openrouter` |\n\n### Vector Store & Search Packages\n\n| Package | Provider | Purpose | Install Command |\n|---------|----------|---------|-----------------|\n| `langchain-chroma` | Chroma | Vector database integration | `pip install langchain-chroma` |\n| `langchain-exa` | Exa | Web search API for AI | `pip install langchain-exa` |\n| `langchain-nomic` | Nomic | Embedding & visualization | `pip install langchain-nomic` |\n\n## Quick Start\n\nThe simplest way to get started with LangChain is using the `init_chat_model` function for initializing chat models:\n\n```python\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-5.4\")\nresult = model.invoke(\"Hello, world!\")\n```\n\nFor more advanced customization or agent orchestration, LangChain recommends using [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), their framework for building controllable agent workflows.\n\n## Installation Options\n\nLangChain offers flexible installation options to suit different needs:\n\n```bash\n# Base installation\npip install langchain\n\n# With specific integrations\npip install langchain-huggingface[full]\n```\n\nThe `[full]` extra for packages like `langchain-huggingface` includes additional dependencies such as `sentence-transformers>=5.2.0` and `transformers>=5.0.0` for local inference capabilities.\n\n## Key Features\n\n### Interoperability\n\nLangChain emphasizes interoperability through standardized interfaces. Components can be easily swapped and combined, allowing developers to:\n\n- Switch between different LLM providers without code changes\n- Combine multiple integrations in a single application\n- Future-proof applications against technology changes\n\n### Flexible Abstraction Layers\n\nLangChain provides multiple levels of abstraction:\n\n- **High-level chains**: Quick starts for common patterns\n- **Low-level components**: Fine-grained control for advanced use cases\n\nThe framework grows with your application's complexity, allowing you to start simple and add sophistication as needed.\n\n### Security Considerations\n\nWhen building applications with LangChain, security should be a primary concern. The framework includes built-in protections for components like API chains:\n\n> **Security Note**: The API chain uses the requests toolkit to make `GET`, `POST`, `PATCH`, `PUT`, and `DELETE` requests to an API. Exercise care in who is allowed to use this chain. Control access to who can submit requests and what network access it has.\n\n## Additional Resources\n\nLangChain provides comprehensive documentation and support:\n\n| Resource | URL |\n|----------|-----|\n| Full Documentation | [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) |\n| API Reference | [reference.langchain.com/python](https://reference.langchain.com/python) |\n| Community Forum | [forum.langchain.com](https://forum.langchain.com) |\n| Chat with Docs | [chat.langchain.com](https://chat.langchain.com/) |\n| LangChain Academy | [academy.langchain.com](https://academy.langchain.com/) |\n| Contributing Guide | [docs.langchain.com/contributing](https://docs.langchain.com/oss/python/contributing/overview) |\n\n## Related Projects\n\nFor specialized use cases, LangChain offers additional projects:\n\n- **LangGraph**: Framework for building controllable agent workflows\n- **Deep Agents**: Higher-level package with built-in capabilities for planning, subagents, and file system usage\n- **LangSmith**: Platform for developing, debugging, and deploying AI agents\n- **LangChain.js**: JavaScript/TypeScript equivalent library\n\n## Ecosystem Diagram\n\n```mermaid\ngraph LR\n    A[Developers] --> B[langchain Package]\n    B --> C[Core Abstractions]\n    B --> D[Partner Integrations]\n    D --> E[LLM Providers]\n    D --> F[Vector Stores]\n    D --> G[Search APIs]\n    B --> H[LangGraph]\n    B --> I[LangSmith]\n    C --> J[Chat Models]\n    C --> K[Embeddings]\n    C --> L[Tools]\n    C --> M[Memory]\n```\n\nLangChain continues to evolve with an active open-source community. Contributions are welcome in the form of new features, improved infrastructure, or better documentation.\n\n---\n\n<a id='getting-started'></a>\n\n## Getting Started with LangChain\n\n### 相关页面\n\n相关主题：[Introduction to LangChain](#introduction), [Runnable and Execution Model](#runnable-execution)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n- [libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n- [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md)\n- [libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n- [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n- [libs/partners/deepseek/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/deepseek/README.md)\n- [libs/partners/groq/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/groq/README.md)\n</details>\n\n# Getting Started with LangChain\n\nLangChain is a framework for building agents and LLM-powered applications. It enables developers to chain together interoperable components and third-party integrations to simplify AI application development while future-proofing decisions as the underlying technology evolves. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Installation\n\n### Base Installation\n\nThe core LangChain package can be installed via pip or uv:\n\n```bash\npip install langchain\n# or\nuv add langchain\n```\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n### Integration Packages\n\nLangChain uses a modular architecture with partner-specific packages. Each integration is distributed as a separate package:\n\n| Package | Purpose | Install Command |\n|---------|---------|-----------------|\n| `langchain-openai` | OpenAI models (GPT-4, GPT-5, etc.) | `pip install langchain-openai` |\n| `langchain-anthropic` | Anthropic models (Claude) | `pip install langchain-anthropic` |\n| `langchain-huggingface` | Hugging Face models | `pip install langchain-huggingface` |\n| `langchain-ollama` | Ollama local models | `pip install langchain-ollama` |\n| `langchain-deepseek` | DeepSeek models | `pip install langchain-deepseek` |\n| `langchain-groq` | Groq models | `pip install langchain-groq` |\n\n资料来源：[libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md), [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md), [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md), [libs/partners/deepseek/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/deepseek/README.md), [libs/partners/groq/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/groq/README.md)\n\n### Full Installation with Dependencies\n\nFor packages requiring additional dependencies like `sentence-transformers` or `transformers`:\n\n```bash\npip install langchain-huggingface[full]\n```\n\n> **Note:** The `[full]` extra includes `sentence-transformers>=5.2.0` and `transformers>=5.0.0`. 资料来源：[libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n\n## Quick Start\n\n### Initializing a Chat Model\n\nLangChain provides a unified interface for initializing chat models using `init_chat_model`:\n\n```python\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-5.4\")\nresult = model.invoke(\"Hello, world!\")\n```\n\nThe `init_chat_model` function supports model providers through a prefixed model string format (e.g., `openai:gpt-5.4`, `anthropic:claude-3-opus`). 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n### Supported Providers\n\nLangChain integrates with multiple model providers:\n\n- **OpenAI** - GPT-4, GPT-5, and other OpenAI models through the `openai` SDK 资料来源：[libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n- **Anthropic** - Claude models through Anthropic's API 资料来源：[libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md)\n- **Hugging Face** - Both API-based and local inference via transformers 资料来源：[libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n- **Ollama** - Local model inference 资料来源：[libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n- **DeepSeek** - DeepSeek models 资料来源：[libs/partners/deepseek/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/deepseek/README.md)\n- **Groq** - Groq inference 资料来源：[libs/partners/groq/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/groq/README.md)\n\n## Architecture Overview\n\nLangChain provides flexible abstraction layers allowing developers to work at the level of abstraction that suits their needs:\n\n```mermaid\ngraph TD\n    A[LangChain Application] --> B[High-Level Chains]\n    A --> C[Low-Level Components]\n    B --> D[Agents]\n    B --> E[Chains]\n    C --> F[LLMs]\n    C --> G[Prompts]\n    C --> H[Tools]\n    F --> I[Provider Integrations]\n    I --> J[OpenAI]\n    I --> K[Anthropic]\n    I --> L[HuggingFace]\n    I --> M[Ollama]\n```\n\n- **High-level chains** - Quick starts for common patterns\n- **Low-level components** - Fine-grained control for custom implementations\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Ecosystem\n\nLangChain consists of multiple interconnected packages:\n\n```mermaid\ngraph LR\n    A[Core LangChain] --> B[langchain-core]\n    A --> C[Partner Packages]\n    A --> D[langchain-community]\n    B --> E[Base abstractions]\n    C --> F[Provider-specific]\n    D --> G[Community integrations]\n```\n\n### Package Structure\n\n| Package | Description |\n|---------|-------------|\n| `langchain-core` | Core abstractions and interfaces |\n| `langchain` | Main framework (chains, agents, core functionality) |\n| `langchain-community` | Community-contributed integrations |\n| Partner packages | Provider-specific integrations (openai, anthropic, etc.) |\n\n## Documentation Resources\n\nFor comprehensive learning and reference:\n\n| Resource | URL | Purpose |\n|----------|-----|---------|\n| Main Docs | [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) | Conceptual guides, overviews, tutorials |\n| API Reference | [reference.langchain.com/python](https://reference.langchain.com/python) | Complete API documentation |\n| Chat LangChain | [chat.langchain.com](https://chat.langchain.com/) | Interactive documentation chat |\n| LangChain Forum | [forum.langchain.com](https://forum.langchain.com) | Community discussions |\n| LangChain Academy | [academy.langchain.com](https://academy.langchain.com/) | Free courses on LangChain |\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Advanced Usage\n\n### Building Agents\n\nFor advanced customization and agent orchestration, LangChain recommends using [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), a framework for building controllable agent workflows. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n### Development and Debugging\n\nFor developing, debugging, and deploying AI agents and LLM applications, [LangSmith](https://docs.langchain.com/langsmith/home) provides comprehensive tooling. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Contributing\n\nLangChain is an open-source project welcoming contributions in the form of new features, improved infrastructure, or better documentation. See the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview) for details on how to get involved. 资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md), [libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md)\n\n---\n\n<a id='runnable-execution'></a>\n\n## Runnable and Execution Model\n\n### 相关页面\n\n相关主题：[Introduction to LangChain](#introduction), [Callbacks and Tracing Infrastructure](#callbacks-tracing), [Agents Framework](#agents-framework)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n- [libs/core/langchain_core/runnables/config.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/config.py)\n- [libs/core/langchain_core/runnables/retry.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/retry.py)\n- [libs/core/langchain_core/runnables/branch.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/branch.py)\n- [libs/core/langchain_core/runnables/graph.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/graph.py)\n</details>\n\n# Runnable and Execution Model\n\n## Overview\n\nThe **Runnable** interface is the foundational abstraction in LangChain for creating composable, executable units of work. Every component in LangChain—including chains, models, tools, and utilities—implements the Runnable protocol, enabling uniform composition, execution, and error handling across the entire framework.\n\nThe execution model provides both synchronous and asynchronous invocation patterns, retry mechanisms, configurable runtime parameters, and conditional branching capabilities. This design allows developers to build complex AI applications by composing simple, reusable components into sophisticated pipelines.\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n---\n\n## Core Interface\n\n### Base Runnable Protocol\n\nThe `Runnable` base class defines the standard interface that all LangChain components implement. Every Runnable must support the following invocation patterns:\n\n| Method | Description | Input | Output |\n|--------|-------------|-------|--------|\n| `invoke` | Synchronous single input processing | `Input` | `Output` |\n| `ainvoke` | Asynchronous single input processing | `Input` | `Awaitable[Output]` |\n| `batch` | Synchronous batch processing | `List[Input]` | `List[Output]` |\n| `abatch` | Asynchronous batch processing | `List[Input]` | `Awaitable[List[Output]]` |\n| `stream` | Synchronous streaming output | `Input` | `Iterator[Output]` |\n| `astream` | Asynchronous streaming output | `Input` | `AsyncIterator[Output]` |\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n### Runnable Composition\n\nRunnables can be composed using the pipe operator (`|`), creating a `RunnableSequence`. This allows chaining multiple components where the output of one becomes the input of the next.\n\n```mermaid\ngraph LR\n    A[Input] --> B[Runnable 1]\n    B --> C[Runnable 2]\n    C --> D[Runnable 3]\n    D --> E[Output]\n```\n\nExample composition pattern:\n\n```python\nfrom langchain_core.runnables import RunnableLambda\n\nchain = (\n    RunnableLambda(lambda x: x.upper())\n    | RunnableLambda(lambda x: f\"Result: {x}\")\n)\nresult = chain.invoke(\"hello\")  # \"Result: HELLO\"\n```\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n---\n\n## Execution Configuration\n\n### RunnableConfig\n\nThe `RunnableConfig` class encapsulates runtime configuration that controls how a Runnable executes. Configuration is passed through the invocation chain and can influence retry behavior, callback invocation, metadata tagging, and execution constraints.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `tags` | `List[str]` | Tags for tracing and organization |\n| `metadata` | `Dict[str, Any]` | Metadata attached to the run |\n| `callbacks` | `Callbacks` | Callback handlers for monitoring |\n| `max_concurrency` | `int | None` | Maximum concurrent executions |\n| `recursion_limit` | `int` | Maximum recursion depth (default: 25) |\n| `run_name` | `str | None` | Name identifier for this run |\n\n资料来源：[libs/core/langchain_core/runnables/config.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/config.py)\n\n### Configuration Propagation\n\nConfiguration flows through the Runnable sequence automatically. When invoking a chain:\n\n```python\nfrom langchain_core.runnables import RunnableConfig\n\nconfig = RunnableConfig(\n    tags=[\"production\", \"v1\"],\n    metadata={\"user_id\": \"123\"},\n    run_name=\"chat_completion\"\n)\n\n# Config is automatically passed through the chain\nresult = chain.invoke(input, config=config)\n```\n\n资料来源：[libs/core/langchain_core/runnables/config.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/config.py)\n\n---\n\n## Retry Mechanism\n\n### Retry Configuration\n\nThe retry system provides fault tolerance through the `RetryConfig` class, which defines retry policies for transient failures.\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `max_attempts` | `int` | 3 | Maximum retry attempts |\n| `initial_interval` | `float` | 1.0 | Initial delay between retries (seconds) |\n| `growth_factor` | `float` | 2.0 | Exponential backoff multiplier |\n| `max_interval` | `float` | 10.0 | Maximum delay cap (seconds) |\n| `retry_on` | `Tuple[Type[Exception], ...]` | `(Exception,)` | Exception types to retry |\n\n资料来源：[libs/core/langchain_core/runnables/retry.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/retry.py)\n\n### Retry Execution Flow\n\n```mermaid\ngraph TD\n    A[Invoke Runnable] --> B{Success?}\n    B -->|Yes| C[Return Result]\n    B -->|No| D{Attempts < Max?}\n    D -->|Yes| E[Wait with Backoff]\n    E --> F[Retry Invocation]\n    F --> B\n    D -->|No| G[Raise Exception]\n```\n\n### Applying Retry to Runnables\n\n```python\nfrom langchain_core.runnables.retry import RetryConfig\n\n# Configure retry policy\nretry_config = RetryConfig(\n    max_attempts=3,\n    initial_interval=0.5,\n    growth_factor=2.0,\n    retry_on=(ConnectionError, TimeoutError)\n)\n\n# Apply to any Runnable\nretry_chain = chain.with_retry(retry_config)\n```\n\n资料来源：[libs/core/langchain_core/runnables/retry.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/retry.py)\n\n---\n\n## Branching Logic\n\n### RunnableBranch\n\nThe `RunnableBranch` class provides conditional execution paths within a chain. It evaluates a series of conditions and executes the corresponding Runnable when a condition is met.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `branches` | `List[Tuple[Runnable, Runnable]]` | Pairs of condition and branch runnable |\n| `default` | `Runnable | None` | Default branch when no conditions match |\n\n资料来源：[libs/core/langchain_core/runnables/branch.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/branch.py)\n\n### Branch Execution Flow\n\n```mermaid\ngraph TD\n    A[Input] --> B[Condition 1?]\n    B -->|True| C[Branch 1]\n    B -->|False| D[Condition 2?]\n    D -->|True| E[Branch 2]\n    D -->|False| F[Default Branch]\n    C --> G[Output]\n    E --> G\n    F --> G\n```\n\n### Branching Example\n\n```python\nfrom langchain_core.runnables.branch import RunnableBranch\n\nbranch = RunnableBranch(\n    (lambda x: x.get(\"type\") == \"greeting\", greeting_chain),\n    (lambda x: x.get(\"type\") == \"question\", question_chain),\n    default_chain  # Executes when no conditions match\n)\n\nresult = branch.invoke({\"type\": \"greeting\", \"content\": \"Hello!\"})\n```\n\n资料来源：[libs/core/langchain_core/runnables/branch.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/branch.py)\n\n---\n\n## Graph Model\n\n### RunnableGraph\n\nThe `RunnableGraph` class provides a visual and programmatic representation of a Runnable sequence as a directed graph. This enables inspection, serialization, and visualization of complex chain architectures.\n\n| Method | Description |\n|--------|-------------|\n| `add_node(name, runnable)` | Add a node to the graph |\n| `add_edge(source, target)` | Add a directed edge between nodes |\n| `set_entry_point(name)` | Define the starting node |\n| `get_graph()` | Retrieve the graph representation |\n\n资料来源：[libs/core/langchain_core/runnables/graph.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/graph.py)\n\n### Graph Visualization\n\n```mermaid\ngraph TD\n    A[entry_point] --> B[process_input]\n    B --> C{Validation?}\n    C -->|Pass| D[execute_step_1]\n    C -->|Fail| E[error_handler]\n    D --> F[execute_step_2]\n    F --> G[finalize]\n    E --> G\n```\n\n### Graph Construction\n\n```python\nfrom langchain_core.runnables.graph import RunnableGraph\n\ngraph = RunnableGraph()\n\ngraph.add_node(\"input\", input_processor)\ngraph.add_node(\"process\", processing_chain)\ngraph.add_node(\"output\", output_formatter)\n\ngraph.set_entry_point(\"input\")\ngraph.add_edge(\"input\", \"process\")\ngraph.add_edge(\"process\", \"output\")\n\n# Visualize or inspect the graph\nvisualization = graph.get_graph()\n```\n\n资料来源：[libs/core/langchain_core/runnables/graph.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/graph.py)\n\n---\n\n## Type Hints and Generics\n\nThe Runnable interface uses Python generics to ensure type safety across the chain:\n\n```python\nclass Runnable[Input, Output](BaseRunnable):\n    \"\"\"Base class for generic Runnable with typed inputs and outputs.\"\"\"\n    \n    def invoke(self, input: Input, config: RunnableConfig | None = None) -> Output:\n        ...\n```\n\nType preservation through the chain enables:\n- Static type checking with mypy/pyright\n- IDE autocomplete for intermediate outputs\n- Runtime type validation\n\n资料来源：[libs/core/langchain_core/runnables/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/runnables/base.py)\n\n---\n\n## Summary\n\nThe Runnable and Execution Model forms the backbone of LangChain's architecture:\n\n| Component | Purpose |\n|-----------|---------|\n| **Runnable Protocol** | Unified interface for all executable components |\n| **Execution Methods** | Sync/async, single/batch, and streaming invocations |\n| **Configuration** | Runtime parameters propagated through chains |\n| **Retry Logic** | Automatic retry with exponential backoff |\n| **Branching** | Conditional execution paths |\n| **Graph Model** | Visualization and inspection of chain structure |\n\nThis design enables developers to build complex AI workflows by composing simple, reusable, and testable components with consistent behavior and error handling.\n\n---\n\n<a id='messages-system'></a>\n\n## Messages and Prompt System\n\n### 相关页面\n\n相关主题：[Chat Models and Embeddings](#chat-models-embeddings)\n\nI cannot generate an accurate technical wiki page about the \"Messages and Prompt System\" because the context provided does not contain the actual source code files listed in the `<details>` block. The context only includes README documentation files from various partner packages (huggingface, anthropic, openai, etc.) and some API chain code examples.\n\nThe required source files for generating this wiki are:\n\n- `libs/core/langchain_core/messages/__init__.py`\n- `libs/core/langchain_core/messages/chat.py`\n- `libs/core/langchain_core/messages/content.py`\n- `libs/core/langchain_core/messages/block_translators/__init__.py`\n- `libs/core/langchain_core/prompts/__init__.py`\n- `libs/core/langchain_core/prompts/chat.py`\n\n**What the context contains instead:**\n\n- Partner package READMEs (huggingface, anthropic, ollama, chroma, openrouter, groq, exa, deepseek, perplexity, xai, nomic, openai)\n- `libs/langchain/langchain_classic/chains/api/base.py` (API chain implementation)\n- `libs/standard-tests/README.md` (testing utilities documentation)\n\nTo generate a comprehensive and accurate wiki page about the Messages and Prompt System, I need access to the actual implementation source files. Please provide the content of the files listed in the `<details>` block so I can analyze them and create the technical documentation accordingly.\n\n---\n\n<a id='callbacks-tracing'></a>\n\n## Callbacks and Tracing Infrastructure\n\n### 相关页面\n\n相关主题：[Runnable and Execution Model](#runnable-execution)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/callbacks/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/callbacks/__init__.py)\n- [libs/core/langchain_core/callbacks/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/callbacks/base.py)\n- [libs/core/langchain_core/callbacks/manager.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/callbacks/manager.py)\n- [libs/core/langchain_core/tracers/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tracers/__init__.py)\n- [libs/core/langchain_core/tracers/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tracers/base.py)\n- [libs/core/langchain_core/tracers/event_stream.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tracers/event_stream.py)\n</details>\n\n# Callbacks and Tracing Infrastructure\n\n## Overview\n\nThe LangChain callbacks and tracing infrastructure provides a unified system for observability, monitoring, and event propagation across LangChain components. This system enables developers to track LLM invocations, chain executions, tool usage, and custom events throughout the application lifecycle.\n\nLangChain follows a monorepo structure where core primitives and abstractions are located in `libs/core/langchain_core/`, with callback and tracer implementations residing in dedicated modules under `langchain_core.callbacks` and `langchain_core.tracers` respectively. 资料来源：[libs/README.md](libs/README.md)\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[CallbackManager]\n    B --> C[BaseCallbackHandler]\n    C --> D[Tracer Implementations]\n    C --> E[Custom Handlers]\n    D --> F[LangSmith]\n    D --> G[Other Exporters]\n    B --> H[AsyncCallbackManager]\n    H --> C\n    style A fill:#e1f5fe\n    style F fill:#c8e6c9\n    style G fill:#c8e6c9\n```\n\n## Callback System\n\n### Core Components\n\nThe callback system in LangChain is built around the following key abstractions:\n\n| Component | File Location | Purpose |\n|-----------|--------------|---------|\n| `BaseCallbackHandler` | `libs/core/langchain_core/callbacks/base.py` | Abstract base class for all callback handlers |\n| `CallbackManager` | `libs/core/langchain_core/callbacks/manager.py` | Manages synchronous callback execution |\n| `AsyncCallbackManager` | `libs/core/langchain_core/callbacks/manager.py` | Manages asynchronous callback execution |\n| `Callbacks` | `libs/core/langchain_core/callbacks/__init__.py` | Public API exports |\n\n### BaseCallbackHandler\n\nThe `BaseCallbackHandler` class defines the contract for all callback implementations. It provides methods for intercepting various events during chain execution:\n\n```python\n# Conceptual structure\nclass BaseCallbackHandler:\n    \"\"\"Base class for callback handlers.\"\"\"\n    \n    def on_llm_start(self, ...):\n        \"\"\"Called when LLM starts processing.\"\"\"\n        pass\n    \n    def on_llm_end(self, ...):\n        \"\"\"Called when LLM finishes processing.\"\"\"\n        pass\n    \n    def on_chain_start(self, ...):\n        \"\"\"Called when a chain starts execution.\"\"\"\n        pass\n    \n    def on_chain_end(self, ...):\n        \"\"\"Called when a chain finishes execution.\"\"\"\n        pass\n    \n    def on_tool_start(self, ...):\n        \"\"\"Called when a tool starts execution.\"\"\"\n        pass\n    \n    def on_tool_end(self, ...):\n        \"\"\"Called when a tool finishes execution.\"\"\"\n        pass\n```\n\n资料来源：[libs/core/langchain_core/callbacks/base.py](libs/core/langchain_core/callbacks/base.py)\n\n### CallbackManager\n\nThe `CallbackManager` coordinates multiple callback handlers and ensures proper event propagation:\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant CM as CallbackManager\n    participant H1 as Handler 1\n    participant H2 as Handler 2\n    participant LC as LangChain Component\n\n    App->>CM: add_handler(handler)\n    App->>LC: invoke(inputs, callbacks=CM)\n    LC->>CM: on_chain_start(...)\n    CM->>H1: on_chain_start(...)\n    CM->>H2: on_chain_start(...)\n    LC->>CM: on_llm_start(...)\n    CM->>H1: on_llm_start(...)\n    CM->>H2: on_llm_start(...)\n    LC->>CM: on_llm_end(...)\n    CM->>H1: on_llm_end(...)\n    CM->>H2: on_llm_end(...)\n    LC->>CM: on_chain_end(...)\n    CM->>H1: on_chain_end(...)\n    CM->>H2: on_chain_end(...)\n```\n\nKey features of `CallbackManager`:\n\n- Manages a list of registered handlers\n- Supports handler persistence via `copy()` method\n- Provides context management for nested callbacks\n- Handles both sync and async handlers transparently\n\n资料来源：[libs/core/langchain_core/callbacks/manager.py](libs/core/langchain_core/callbacks/manager.py)\n\n### Callback Configuration Options\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `tags` | `list[str]` | Tags for filtering callbacks |\n| ` inheritable` | `bool` | Whether callbacks propagate to child runs |\n| `metadata` | `dict` | Additional context metadata |\n\n## Tracing Infrastructure\n\n### Overview\n\nThe tracing subsystem extends the callback system to provide persistent, exportable traces of LangChain executions. Tracers are specialized callback handlers that serialize execution data for external systems like LangSmith.\n\n```mermaid\ngraph LR\n    A[LangChain Execution] --> B[Tracer Base]\n    B --> C[Core Tracer]\n    B --> D[Event Stream Tracer]\n    C --> E[LangChain Tracers]\n    D --> F[Async Event Streaming]\n    E --> G[LangSmith]\n    E --> H[Other Exporters]\n```\n\n资料来源：[libs/core/langchain_core/tracers/__init__.py](libs/core/langchain_core/tracers/__init__.py)\n\n### Tracer Base Class\n\nLocated in `libs/core/langchain_core/tracers/base.py`, the base tracer provides:\n\n| Feature | Description |\n|---------|-------------|\n| Run tracking | Maintains state for all active and completed runs |\n| Event serialization | Converts callback events to exportable format |\n| Parent-child relationships | Manages nested execution traces |\n| Persistence layer | Abstract interface for storing traces |\n\n### Event Stream Tracer\n\nThe `EventStreamTracer` provides an async-native interface for streaming trace events:\n\n```python\n# Conceptual implementation\nclass EventStreamTracer:\n    \"\"\"Tracer that supports async event streaming.\"\"\"\n    \n    async def astream_events(self, ...):\n        \"\"\"Async generator for trace events.\"\"\"\n        while True:\n            event = await self._get_next_event()\n            if event is None:\n                break\n            yield event\n```\n\nThis tracer enables real-time event streaming capabilities for applications requiring live observability.\n\n资料来源：[libs/core/langchain_core/tracers/event_stream.py](libs/core/langchain_core/tracers/event_stream.py)\n\n## Integration Patterns\n\n### Passing Callbacks to Components\n\nCallbacks can be passed to LangChain components at initialization or invocation time:\n\n```python\nfrom langchain_core.callbacks import CallbackManager, BaseCallbackHandler\n\nclass MyHandler(BaseCallbackHandler):\n    def on_chain_start(self, serialized, inputs, **kwargs):\n        print(f\"Chain starting with inputs: {inputs}\")\n\nhandler = MyHandler()\ncallback_manager = CallbackManager(handlers=[handler])\n\n# Pass to model\nresponse = model.invoke(\"Hello\", config={\"callbacks\": callback_manager})\n\n# Pass to chain\nchain.invoke(input, config={\"callbacks\": callback_manager})\n```\n\n### Using Tracers for Persistence\n\n```python\nfrom langchain_core.tracers import LangChainTracer\n\ntracer = LangChainTracer(project_name=\"my-project\")\n\nchain.invoke(\n    input,\n    config={\"callbacks\": [tracer]}\n)\n```\n\n## Data Flow\n\n```mermaid\ngraph TD\n    subgraph Execution\n        A[User Code] --> B[Chain/Model]\n        B --> C[CallbackManager]\n    end\n    \n    subgraph EventPropagation\n        C --> D[on_chain_start]\n        D --> E[on_llm_start]\n        E --> F[LLM Execution]\n        F --> G[on_llm_end]\n        G --> H[on_chain_end]\n    end\n    \n    subgraph Observability\n        H --> I[Tracer]\n        I --> J[Run Tree]\n        J --> K[Export/Store]\n        K --> L[LangSmith/Other]\n    end\n    \n    style L fill:#fff3e0\n    style K fill:#fff3e0\n```\n\n## Best Practices\n\n1. **Handler Isolation**: Create separate callback handlers for different concerns (monitoring, tracing, debugging)\n\n2. **Performance Consideration**: Avoid expensive operations in synchronous callback handlers; prefer async handlers for I/O-bound operations\n\n3. **Memory Management**: Use `copy()` on `CallbackManager` when spawning child executions to prevent handler leakage\n\n4. **Tag-Based Filtering**: Use tags to selectively enable/disable callbacks in different environments\n\n5. **Tracer Configuration**: Configure tracers with appropriate project names and metadata for proper organization in observability platforms\n\n## Related Documentation\n\n- [LangChain Core API Reference](https://reference.langchain.com/python/langchain_core)\n- [LangChain Documentation](https://docs.langchain.com/oss/python/langchain/overview)\n- [LangSmith Integration](https://docs.langchain.com/langsmith/home)\n\n## Package Structure\n\nThe callbacks and tracing infrastructure is maintained in the core library:\n\n```\nlibs/core/langchain_core/\n├── callbacks/\n│   ├── __init__.py      # Public exports\n│   ├── base.py          # BaseCallbackHandler\n│   └── manager.py       # CallbackManager, AsyncCallbackManager\n└── tracers/\n    ├── __init__.py      # Public exports\n    ├── base.py          # Base tracer implementations\n    └── event_stream.py  # Async event streaming support\n```\n\n资料来源：[libs/README.md](libs/README.md)\n\n---\n\n<a id='chat-models-embeddings'></a>\n\n## Chat Models and Embeddings\n\n### 相关页面\n\n相关主题：[Introduction to LangChain](#introduction), [Messages and Prompt System](#messages-system)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/language_models/chat_models.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/chat_models.py)\n- [libs/core/langchain_core/language_models/llms.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/llms.py)\n- [libs/core/langchain_core/embeddings/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/embeddings/__init__.py)\n- [libs/core/langchain_core/embeddings/embeddings.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/embeddings/embeddings.py)\n- [libs/core/langchain_core/language_models/model_profile.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/model_profile.py)\n</details>\n\n# Chat Models and Embeddings\n\n## Overview\n\nChat Models and Embeddings are foundational components within the LangChain ecosystem that enable interaction with large language models (LLMs) for conversational and vector-based AI applications. These abstractions provide standardized interfaces for working with different model providers while maintaining flexibility for customization.\n\nLangChain provides two primary categories of language model interfaces:\n\n| Category | Purpose | Key Classes |\n|----------|---------|-------------|\n| **Chat Models** | Handle conversational interactions with message-based APIs | `BaseChatModel`, `BaseMessage` |\n| **Embeddings** | Convert text into vector representations for similarity search | `Embeddings`, `BaseEmbeddings` |\n| **LLMs** | Traditional text-in, text-out language model interfaces | `BaseLLM`, `LLM` |\n\n资料来源：[libs/core/langchain_core/language_models/chat_models.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/language_models/chat_models.py)\n\n## Architecture\n\n### High-Level Component Hierarchy\n\n```mermaid\ngraph TD\n    A[LangChain Core] --> B[Language Models]\n    A --> C[Embeddings]\n    B --> D[BaseChatModel]\n    B --> E[BaseLLM]\n    D --> F[Provider Implementations]\n    E --> G[Provider Implementations]\n    C --> H[Embeddings Interface]\n    H --> I[Embedding Providers]\n    \n    F --> J[OpenAI Chat]\n    F --> K[Anthropic Chat]\n    F --> L[Ollama Chat]\n    \n    I --> M[OpenAI Embeddings]\n    I --> N[HuggingFace Embeddings]\n    I --> O[Ollama Embeddings]\n```\n\n### Chat Model Initialization Pattern\n\nThe preferred method for initializing chat models in LangChain is through the `init_chat_model` function, which provides a unified initialization interface across providers.\n\n```python\nfrom langchain.chat_models import init_chat_model\n\n# Initialize with provider prefix\nmodel = init_chat_model(\"openai:gpt-4o-mini\", temperature=0)\n\n# Invoke the model\nresult = model.invoke(\"Hello, world!\")\n```\n\n资料来源：[README.md](https://github.com/langchain-ai/langchain/blob/main/README.md)\n\n## Chat Models\n\n### BaseChatModel Interface\n\nThe `BaseChatModel` class serves as the foundation for all chat model implementations in LangChain. It extends `BaseLanguageModel` and provides standardized methods for generating responses from conversation histories.\n\n#### Key Methods\n\n| Method | Purpose |\n|--------|---------|\n| `invoke()` | Synchronous invocation with input processing |\n| `ainvoke()` | Asynchronous invocation |\n| `bind_tools()` | Attach tools/functions to the model for tool calling |\n| `with_structured_output()` | Constrain output to follow a schema |\n\nThe `bind_tools` method enables models to call external tools, which is fundamental for building agentic applications:\n\n```python\nmodel = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\ntools = toolkit.get_tools()\napi_request_chain = API_URL_PROMPT.partial(api_docs=api_spec) | model.bind_tools(tools, tool_choice=\"any\")\n```\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n### Message Types\n\nChat models in LangChain work with structured message objects that represent different roles in a conversation:\n\n| Message Type | Role | Typical Use |\n|--------------|------|-------------|\n| `SystemMessage` | System | Instructions and context |\n| `HumanMessage` | User | User input |\n| `AIMessage` | Assistant | Model responses |\n| `ToolMessage` | Tool | Tool execution results |\n\n### State Management in Chat Applications\n\nWhen building chat applications with stateful interactions, LangChain uses typed state dictionaries with LangGraph:\n\n```python\nclass ChainState(TypedDict):\n    \"\"\"LangGraph state.\"\"\"\n    messages: Annotated[Sequence[BaseMessage], add_messages]\n\nasync def acall_model(state: ChainState, config: RunnableConfig):\n    response = await model.ainvoke(state[\"messages\"], config)\n    return {\"messages\": [response]}\n```\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n## Embeddings\n\n### Embeddings Interface\n\nThe `Embeddings` interface provides a standardized way to convert text into numerical vector representations. These vectors enable semantic similarity calculations, retrieval-augmented generation (RAG), and vector database operations.\n\n#### Core Methods\n\n| Method | Description |\n|--------|-------------|\n| `embed_query()` | Embed a single query string |\n| `embed_documents()` | Embed multiple document strings |\n| `embed_query_async()` | Async version of embed_query |\n| `embed_documents_async()` | Async version of embed_documents |\n\n资料来源：[libs/core/langchain_core/embeddings/embeddings.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/embeddings/embeddings.py)\n\n### Integration with Vector Stores\n\nEmbeddings work seamlessly with LangChain's vector store integrations. The HuggingFace partner package provides specific implementations:\n\n```bash\n# Install with full dependencies for local inference\npip install langchain-huggingface[full]\n```\n\nThe `[full]` extra includes `sentence-transformers>=5.2.0` and `transformers>=5.0.0`, which are required for `HuggingFaceEmbeddings`.\n\n资料来源：[libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n\n## Model Providers\n\nLangChain provides partner packages for various LLM providers, each following consistent patterns for installation and usage:\n\n| Provider | Package | Install Command |\n|----------|---------|-----------------|\n| OpenAI | `langchain-openai` | `pip install langchain-openai` |\n| Anthropic | `langchain-anthropic` | `pip install langchain-anthropic` |\n| Ollama | `langchain-ollama` | `pip install langchain-ollama` |\n| HuggingFace | `langchain-huggingface` | `pip install langchain-huggingface` |\n| Groq | `langchain-groq` | `pip install langchain-groq` |\n| DeepSeek | `langchain-deepseek` | `pip install langchain-deepseek` |\n| Perplexity | `langchain-perplexity` | `pip install langchain-perplexity` |\n| xAI | `langchain-xai` | `pip install langchain-xai` |\n| Chroma | `langchain-chroma` | `pip install langchain-chroma` |\n| Exa | `langchain-exa` | `pip install langchain-exa` |\n| OpenRouter | `langchain-openrouter` | `pip install langchain-openrouter` |\n\nEach provider package contains integration classes that implement the base interfaces described above.\n\n资料来源：[libs/partners/openai/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/openai/README.md), [libs/partners/anthropic/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/anthropic/README.md), [libs/partners/ollama/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/ollama/README.md)\n\n## Testing Chat Models\n\nLangChain provides standardized test infrastructure through the `langchain-tests` package to ensure consistent behavior across implementations:\n\n```python\nfrom typing import Type\nimport pytest\nfrom langchain_core.language_models import BaseChatModel\nfrom langchain_tests.unit_tests import ChatModelUnitTests\n\nfrom langchain_parrot_chain import ChatParrotChain\n\nclass TestParrotChainStandard(ChatModelUnitTests):\n    @pytest.fixture\n    def chat_model_class(self) -> Type[BaseChatModel]:\n        return ChatParrotChain\n```\n\n#### Configurable Test Fixtures\n\n| Fixture | Required | Default | Description |\n|---------|----------|---------|-------------|\n| `chat_model_class` | Yes | — | The chat model class to test |\n| `chat_model_params` | No | `{}` | Constructor keyword arguments |\n| `chat_model_has_tool_calling` | No | `hasattr(class, 'bind_tools')` | Tool calling capability |\n| `chat_model_has_structured_output` | No | `hasattr(class, 'with_structured_output')` | Structured output support |\n\n资料来源：[libs/standard-tests/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/standard-tests/README.md)\n\n## Deprecated Patterns\n\nThe `langchain-community` package previously contained community-contributed integrations, but this pattern is being deprecated in favor of official partner packages:\n\n```python\n@deprecated(\n    since=\"0.2.13\",\n    removal=\"2.0.0\",\n    alternative=\"langchain.agents.create_agent\",\n)\nclass APIChain(Chain):\n    \"\"\"Chain that makes API calls and summarizes the responses.\"\"\"\n```\n\nNew integrations should be implemented as standalone partner packages following the patterns established in `libs/partners/*`.\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n## Quick Reference\n\n### Initialization\n\n```python\nfrom langchain.chat_models import init_chat_model\n\n# Basic initialization\nmodel = init_chat_model(\"provider:model-name\")\n\n# With parameters\nmodel = init_chat_model(\"openai:gpt-4o-mini\", temperature=0)\n```\n\n### Invocation\n\n```python\n# Synchronous\nresponse = model.invoke(\"Your message here\")\n\n# Asynchronous\nresponse = await model.ainvoke(\"Your message here\")\n\n# With chat history\nresponse = model.invoke([SystemMessage(content=\"You are helpful\"), \n                         HumanMessage(content=\"Hello\")])\n```\n\n### Tool Binding\n\n```python\nmodel_with_tools = model.bind_tools(tools)\nresponse = model_with_tools.invoke(\"Use tool X to find information\")\n```\n\n## Further Reading\n\n- [API Reference](https://reference.langchain.com/python) - Complete API documentation\n- [LangChain Docs](https://docs.langchain.com/oss/python/langchain/overview) - Conceptual guides and tutorials\n- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) - Building controllable agent workflows\n- [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview) - How to add new integrations\n\n---\n\n<a id='agents-framework'></a>\n\n## Agents Framework\n\n### 相关页面\n\n相关主题：[Tools and Toolkits](#tools-integrations), [Runnable and Execution Model](#runnable-execution)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain/langchain_classic/agents/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/__init__.py)\n- [libs/langchain/langchain_classic/agents/agent.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent.py)\n- [libs/langchain/langchain_classic/agents/react/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/react/__init__.py)\n- [libs/langchain/langchain_classic/agents/structured_chat/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/structured_chat/__init__.py)\n- [libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py)\n- [libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py)\n</details>\n\n# Agents Framework\n\nThe LangChain Agents Framework is a core component of the LangChain ecosystem that enables language models to reason about and take actions through a systematic loop of observation, thought, and execution. The framework provides abstractions for building autonomous agents that can use tools, make decisions, and complete complex multi-step tasks.\n\n## Architecture Overview\n\nLangChain's agent architecture follows a cyclical pattern where the agent receives inputs, decides on actions using an LLM, executes those actions via tools, and processes the results to determine the next step.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Agent]\n    B --> C[LLM Decision]\n    C --> D{Action Type}\n    D -->|Tool Call| E[Tool Execution]\n    D -->|Final Answer| F[Output]\n    E --> G[Observation]\n    G --> B\n    F --> H[User]\n    \n    style B fill:#e1f5fe\n    style C fill:#fff3e0\n    style E fill:#f3e5f5\n```\n\n## Core Agent Classes\n\nThe agent system is built on a base `Agent` class that defines the interface for all agent implementations. LangChain provides several pre-built agent types for different use cases.\n\n### Base Agent Interface\n\nAll agents inherit from the base `Agent` class which provides:\n\n- Chain integration for LLM calls\n- Tool binding and management\n- Output parsing for action extraction\n- State management across agent steps\n\n### Agent Types\n\n| Agent Type | Description | Use Case |\n|------------|-------------|----------|\n| `ReAct` | Reasoning + Acting agent | General purpose tool use |\n| `Structured Chat` | JSON-structured tool calls | Structured tool interfaces |\n| `OpenAI Functions` | OpenAI function calling format | OpenAI function calling |\n| `Toolkits` | Pre-configured tool bundles | Domain-specific tasks |\n\n资料来源：[libs/langchain/langchain_classic/agents/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/__init__.py)\n\n## Tool Integration\n\nAgents interact with external systems through tools. Tools are abstractions that wrap functionality and expose it to the LLM in a structured format.\n\n### Tool Structure\n\nEach tool typically consists of:\n\n- **Name**: Unique identifier for the tool\n- **Description**: Natural language description for LLM understanding\n- **Parameters**: Schema defining required and optional inputs\n- **Execute Function**: The actual implementation\n\n### Toolkits\n\nToolkits provide pre-configured bundles of related tools for specific domains:\n\n| Toolkit | Purpose |\n|---------|---------|\n| `RequestsToolkit` | HTTP requests for API interactions |\n| `SQLDatabaseToolkit` | Database queries |\n| Custom Toolkits | Domain-specific tool collections |\n\n资料来源：[libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent_toolkits/__init__.py)\n\n## Agent Execution Loop\n\nThe agent execution follows a ReAct-style (Reasoning + Acting) pattern:\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Agent\n    participant LLM\n    participant Tools\n    \n    User->>Agent: Input Query\n    Agent->>LLM: Prompt + Tools\n    LLM-->>Agent: Thought + Action\n    Agent->>Tools: Execute Tool Call\n    Tools-->>Agent: Observation\n    Agent->>LLM: Observation + Next Decision\n    loop Until Final Answer\n        LLM-->>Agent: Next Action\n        Agent->>Tools: Execute Tool Call\n        Tools-->>Agent: Observation\n    end\n    Agent-->>User: Final Response\n```\n\n## Agent Creation API\n\nThe recommended way to create agents is through the `create_agent` factory function:\n\n```python\nfrom langchain.agents import create_agent\n\n# Create an agent with specified model and tools\nagent = create_agent(\n    model=\"openai:gpt-4\",\n    tools=[...],\n    system_message=\"You are a helpful assistant.\"\n)\n```\n\n资料来源：[libs/langchain/langchain_classic/agents/agent.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/agent.py)\n\n## Agent Types Detail\n\n### ReAct Agent\n\nThe ReAct (Reasoning + Acting) agent implements a reasoning loop where the model thinks through each step before taking action.\n\n**Characteristics:**\n\n- Explicit thought process before each action\n- Observation processing after tool execution\n- Flexible tool usage without strict schema requirements\n\n资料来源：[libs/langchain/langchain_classic/agents/react/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/react/__init__.py)\n\n### Structured Chat Agent\n\nDesigned for agents that interact with tools using structured JSON inputs, this agent type excels when working with tools that have complex parameter schemas.\n\n**Characteristics:**\n\n- JSON-structured tool inputs\n- Strong typing for tool parameters\n- Schema validation for tool calls\n\n资料来源：[libs/langchain/langchain_classic/agents/structured_chat/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/structured_chat/__init__.py)\n\n### OpenAI Functions Agent\n\nOptimized for OpenAI's function calling API, this agent type maps LangChain tools to OpenAI function specifications.\n\n**Characteristics:**\n\n- Direct mapping to OpenAI function calling format\n- Tool choice controls (auto, any, none)\n- Streaming support for responses\n\n资料来源：[libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/agents/openai_functions_agent/__init__.py)\n\n## Integration with LangGraph\n\nFor complex agent workflows, LangChain Agents integrate seamlessly with LangGraph for building stateful, controllable agent applications:\n\n```python\nfrom langgraph.prebuilt import ToolNode\nfrom langgraph.graph import StateGraph\n\nclass AgentState(TypedDict):\n    messages: Annotated[Sequence[BaseMessage], add_messages]\n\ngraph_builder = StateGraph(AgentState)\ngraph_builder.add_node(\"call_tool\", some_agent_node)\ngraph_builder.add_node(\"execute_tool\", ToolNode(tools))\ngraph_builder.add_edge(\"call_tool\", \"execute_tool\")\n```\n\n## Configuration Options\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `model` | `str` or `BaseLanguageModel` | LLM to power the agent |\n| `tools` | `List[BaseTool]` | Available tools for the agent |\n| `system_message` | `str` | Instructions for agent behavior |\n| `max_iterations` | `int` | Maximum agent steps before termination |\n| `timeout` | `float` | Timeout for agent execution |\n\n## Deprecated Patterns\n\nThe `APIChain` class has been deprecated in favor of the new agent system:\n\n```python\n@deprecated(\n    since=\"0.2.13\",\n    removal=\"2.0.0\",\n    alternative=\"langchain.agents.create_agent\",\n)\nclass APIChain(Chain):\n    \"\"\"Chain that makes API calls and summarizes responses.\"\"\"\n```\n\nMigration to the new agent system is recommended for all use cases.\n\n## Security Considerations\n\nWhen deploying agents:\n\n- **Input Validation**: Validate all user inputs before processing\n- **Tool Permissions**: Control which tools are available to agents\n- **Network Access**: Limit agent network capabilities appropriately\n- **Rate Limiting**: Implement rate limiting to prevent abuse\n\n## Summary\n\nThe LangChain Agents Framework provides a flexible, extensible system for building LLM-powered agents that can:\n\n1. Reason about user queries\n2. Select appropriate tools for task completion\n3. Execute actions and process results\n4. Iterate until goals are achieved\n\nThe framework supports multiple agent types optimized for different use cases, with seamless integration into the broader LangChain ecosystem including LangGraph for complex workflows.\n\n---\n\n<a id='tools-integrations'></a>\n\n## Tools and Toolkits\n\n### 相关页面\n\n相关主题：[Agents Framework](#agents-framework)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/tools/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/__init__.py)\n- [libs/core/langchain_core/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/base.py)\n- [libs/core/langchain_core/tools/structured.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/structured.py)\n- [libs/core/langchain_core/tools/render.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/render.py)\n- [libs/langchain/langchain_classic/tools/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/tools/__init__.py)\n- [libs/langchain/langchain_classic/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/tools/base.py)\n</details>\n\n# Tools and Toolkits\n\nLangChain provides a flexible and extensible system for defining, managing, and executing tools that enable Large Language Models (LLMs) to interact with external systems, APIs, and data sources. The Tools and Toolkits module forms the foundational building block for agent-based architectures, allowing models to perform actions and retrieve information beyond their training data.\n\n## Overview\n\nTools in LangChain serve as the interface between LLMs and external capabilities. They provide a standardized way to:\n\n- Define callable functions with clear input/output schemas\n- Control what operations an agent can perform\n- Handle serialization, deserialization, and tool calling protocols\n- Integrate with various third-party services and APIs\n\nThe system is designed around the principle of structured tool definitions, where each tool specifies its name, description, and parameter schema, enabling LLMs to understand when and how to invoke specific tools.\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[Agent / LLM] -->|Tool Calls| B[Tool Executor]\n    B --> C[BaseTool]\n    C --> D[StructuredTool]\n    C --> E[ToolKit]\n    D --> F[Function Definition]\n    E --> G[Tool Collection]\n    B -->|Results| A\n    \n    style A fill:#e1f5ff\n    style B fill:#fff3e0\n    style C fill:#e8f5e9\n```\n\n## Core Components\n\n### BaseTool\n\nThe `BaseTool` class serves as the foundation for all tools in LangChain. It provides the essential interface that all tool implementations must follow.\n\n| Attribute | Type | Description |\n|-----------|------|-------------|\n| `name` | `str` | Unique identifier for the tool |\n| `description` | `str` | Human-readable description for LLM understanding |\n| `args_schema` | `Type[BaseModel]` | Pydantic model defining input parameters |\n| `return_schema` | `Optional[Type[BaseModel]]` | Schema for return values |\n| `tags` | `Optional[List[str]]` | Categorization tags |\n| `metadata` | `Optional[Dict[str, Any]]` | Additional metadata |\n\n**Key Methods:**\n\n```python\ndef invoke(self, input: Union[str, Dict], config: Optional[CallbackManager] = None) -> Any\nasync def ainvoke(self, input: Union[str, Dict], config: Optional[AsyncCallbackManager] = None) -> Any\n```\n\n资料来源：[libs/core/langchain_core/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/base.py)\n\n### StructuredTool\n\n`StructuredTool` extends `BaseTool` with enhanced parameter handling capabilities. It supports complex input schemas and provides better type safety for tool definitions.\n\n**Key Features:**\n\n- Pydantic-based input validation\n- JSON schema generation for LLM tool calling protocols\n- Support for nested objects and arrays\n- Custom validation logic via model validators\n\n资料来源：[libs/core/langchain_core/tools/structured.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/structured.py)\n\n## Tool Definition Pattern\n\n```python\nfrom langchain_core.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\nclass SearchInput(BaseModel):\n    query: str = Field(description=\"The search query to look up\")\n    limit: int = Field(default=10, description=\"Maximum number of results\")\n\ndef search_function(query: str, limit: int = 10) -> str:\n    \"\"\"Perform a web search and return results.\"\"\"\n    # Implementation here\n    pass\n\nsearch_tool = StructuredTool(\n    name=\"web_search\",\n    description=\"Search the web for information about a given topic\",\n    func=search_function,\n    args_schema=SearchInput,\n)\n```\n\n## Toolkits\n\nToolkits provide a convenient way to group related tools together. They bundle multiple tools that are commonly used together for specific use cases.\n\n### Toolkit Structure\n\n| Component | Description |\n|-----------|-------------|\n| `tools` | List of `BaseTool` instances |\n| `name` | Toolkit identifier |\n| `description` | Summary of toolkit purpose |\n\n### Creating Custom Toolkits\n\n```python\nfrom langchain_core.tools import BaseTool, StructuredTool\nfrom langchain_core.tools import Toolkit\n\nclass CustomToolkit(Toolkit):\n    def __init__(self) -> None:\n        super().__init__()\n        self.tools = [\n            tool_a,\n            tool_b,\n            tool_c,\n        ]\n    \n    def get_tools(self) -> list[BaseTool]:\n        return self.tools\n```\n\n资料来源：[libs/langchain/langchain_classic/tools/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/tools/base.py)\n\n## Tool Rendering\n\nTool rendering converts tool definitions into formats consumable by different LLM providers and protocols.\n\n```mermaid\ngraph LR\n    A[BaseTool] -->|convert| B[OpenAI Format]\n    A -->|convert| C[Anthropic Format]\n    A -->|convert| D[JSON Schema]\n    A -->|convert| E[Custom Format]\n```\n\n### Render Modes\n\n| Mode | Description |\n|------|-------------|\n| `tool_call` | Renders tools for OpenAI-style tool calling |\n| `json_schema` | Outputs JSON Schema representation |\n| `anthropic` | Format compatible with Anthropic Claude models |\n\n资料来源：[libs/core/langchain_core/tools/render.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/tools/render.py)\n\n## Tool Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent\n    participant Executor\n    participant Tool\n    participant External\n    \n    Agent->>Executor: invoke(tool_name, parameters)\n    Executor->>Tool: validate_and_bind_params(input)\n    Tool->>External: execute_action()\n    External-->>Tool: result\n    Tool-->>Executor: validated_result\n    Executor-->>Agent: ToolOutput\n```\n\n## Integration with Agents\n\nTools are bound to agents through the agent construction process. The binding specifies which tools an agent can access and how they should be invoked.\n\n**Common Patterns:**\n\n1. **Direct Binding**: Single tool passed to agent\n2. **ToolKit Binding**: Multiple related tools from a toolkit\n3. **Dynamic Binding**: Tools selected at runtime based on context\n\n```python\nfrom langchain.agents import create_agent\nfrom langchain_core.tools import StructuredTool\n\nagent = create_agent(\n    llm=model,\n    tools=[search_tool, calculator_tool],\n    prompt=system_prompt,\n)\n```\n\n## Built-in Tool Categories\n\n### Core Tools\n\nLocated in `libs/langchain/langchain_classic/tools/`:\n\n| Tool | Purpose |\n|------|---------|\n| `Search` | Web search capabilities |\n| `Wikipedia` | Wikipedia API integration |\n| `Calculator` | Mathematical operations |\n| `FileSystem` | Local file operations |\n\n### API Integration Tools\n\nLangChain provides specialized tools for API interactions:\n\n```python\nfrom langchain_classic.chains.api.base import APIChain\n\n# APIChain is deprecated in favor of agent-based approaches\n# but demonstrates the pattern of API tool integration\n```\n\n资料来源：[libs/langchain/langchain_classic/chains/api/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/chains/api/base.py)\n\n## Configuration Options\n\n### Tool Configuration Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `handle_parsing_errors` | `bool` | `True` | Handle LLM output parsing errors |\n| `return_direct` | `bool` | `False` | Return tool output directly to LLM |\n| `verbose` | `bool` | `False` | Enable verbose logging |\n| `max_retries` | `int` | `3` | Maximum retry attempts |\n| `timeout` | `Optional[float]` | `None` | Execution timeout in seconds |\n\n## Error Handling\n\nTools implement comprehensive error handling to manage failures gracefully:\n\n```python\ntry:\n    result = tool.invoke(input_dict)\nexcept ValidationError as e:\n    # Handle input validation failures\n    return ErrorOutput(error=str(e))\nexcept ExecutionError as e:\n    # Handle execution failures\n    return ErrorOutput(error=str(e))\nexcept TimeoutError:\n    # Handle timeout\n    return ErrorOutput(error=\"Tool execution timed out\")\n```\n\n## Best Practices\n\n### 1. Clear Tool Descriptions\nWrite descriptions that help the LLM understand when and how to use each tool:\n\n```python\ndescription = \"\"\"\n    Search for information on the web.\n    \n    Use this tool when:\n    - User asks about current events or facts\n    - User requests information not in training data\n    - User asks about specific entities that need verification\n    \n    Parameters:\n    - query: The search query (required)\n    - limit: Maximum results to return (optional, default: 5)\n    \"\"\"\n```\n\n### 2. Comprehensive Schemas\nDefine complete input schemas with descriptions:\n\n```python\nclass QueryInput(BaseModel):\n    query: str = Field(\n        description=\"The search query string. Be specific and include relevant keywords.\",\n        min_length=1,\n        max_length=500,\n    )\n    source: Literal[\"web\", \"news\", \"scholar\"] = Field(\n        default=\"web\",\n        description=\"The type of search to perform\",\n    )\n```\n\n### 3. Proper Error Messages\nReturn meaningful error messages that help the agent recover:\n\n```python\ndef execute_tool(self, params: Dict) -> str:\n    try:\n        return self._do_execution(params)\n    except ValueError as e:\n        return f\"Invalid input: {str(e)}. Please check your parameters and try again.\"\n    except ConnectionError:\n        return \"Unable to connect to the service. Please check your network connection.\"\n```\n\n## Testing Tools\n\nLangChain provides testing utilities for tool implementations:\n\n```python\nfrom langchain_core.tools import tool, StructuredTool\n\ndef test_tool_invoke():\n    \"\"\"Test tool invocation with valid input.\"\"\"\n    result = my_tool.invoke({\"param1\": \"value1\"})\n    assert result is not None\n\ndef test_tool_validation():\n    \"\"\"Test input validation.\"\"\"\n    from pydantic import ValidationError\n    try:\n        my_tool.invoke({\"invalid_param\": \"value\"})\n        assert False, \"Should have raised ValidationError\"\n    except ValidationError:\n        pass  # Expected behavior\n```\n\n## Package Structure\n\n```\nlangchain/\n├── libs/\n│   ├── core/\n│   │   └── langchain_core/\n│   │       └── tools/\n│   │           ├── __init__.py       # Public API exports\n│   │           ├── base.py           # BaseTool implementation\n│   │           ├── structured.py     # StructuredTool implementation\n│   │           └── render.py         # Tool rendering utilities\n│   └── langchain/\n│       └── langchain_classic/\n│           └── tools/\n│               ├── __init__.py       # Built-in tools\n│               └── base.py           # Toolkit base classes\n```\n\n## See Also\n\n- [Agents Documentation](https://docs.langchain.com/oss/python/langchain/agents)\n- [Chain Reference](https://docs.langchain.com/oss/python/langchain/chains)\n- [Integration Providers](https://docs.langchain.com/oss/python/integrations/providers)\n\n---\n\n<a id='document-loaders'></a>\n\n## Document Loaders and Text Processing\n\n### 相关页面\n\n相关主题：[Vector Stores and Retrievers](#vectorstores-retrievers), [Document Loaders and Text Processing](#document-loaders)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/__init__.py)\n- [libs/core/langchain_core/document_loaders/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/base.py)\n- [libs/langchain/langchain_classic/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/document_loaders/__init__.py)\n- [libs/langchain/langchain_classic/document_loaders/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/document_loaders/base.py)\n- [libs/text-splitters/langchain_text_splitters/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/__init__.py)\n- [libs/text-splitters/langchain_text_splitters/character.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/character.py)\n</details>\n\n# Document Loaders and Text Processing\n\nDocument Loaders and Text Processing are fundamental components in the LangChain ecosystem that enable the ingestion, transformation, and preparation of various document types for downstream AI applications. These components form the entry point for most LLM-powered workflows, converting raw content from diverse sources into structured `Document` objects that can be processed by chains, agents, and retrieval systems.\n\nThe document processing pipeline in LangChain follows a two-stage approach: first, documents are loaded from external sources using specialized loaders; second, the loaded content is optionally split into smaller, semantically coherent chunks using text splitters. This separation of concerns allows for flexible composition and reuse across different application scenarios.\n\n## Architecture Overview\n\nLangChain's document processing architecture is distributed across multiple packages within the monorepo, with core abstractions defined in `langchain-core` and legacy implementations preserved in `langchain-classic`. Text splitting functionality is isolated in a dedicated `langchain-text-splitters` package to maintain separation of concerns and enable independent versioning.\n\nThe architecture employs a base class pattern where `BaseLoader` defines the interface contract that all concrete loader implementations must satisfy. This design enables polymorphic usage across different document sources while maintaining consistent behavior. The `BaseTransformer` abstraction extends this pattern for document transformation, and the `TextSplitter` hierarchy provides pluggable text chunking strategies.\n\n```mermaid\ngraph TD\n    A[External Sources] --> B[Document Loaders]\n    B --> C[Document Objects]\n    C --> D[Text Splitters]\n    D --> E[Chunked Documents]\n    E --> F[Vector Stores]\n    E --> G[LLM Chains]\n    E --> H[Agents]\n    \n    B1[BaseLoader] --> B2[File Loaders]\n    B1 --> B3[Web Loaders]\n    B1 --> B4[Database Loaders]\n    B1 --> B5[Cloud Storage Loaders]\n    \n    D1[TextSplitter] --> D2[CharacterTextSplitter]\n    D1 --> D3[RecursiveCharacterTextSplitter]\n    D1 --> D4[LanguageSpecificSplitter]\n```\n\n## Document Object Model\n\nAt the heart of LangChain's document processing lies the `Document` data class, which provides a standardized representation for loaded content. Each `Document` instance encapsulates the actual text content along with associated metadata that describes the source, provenance, and any auxiliary information relevant to the content.\n\nThe `Document` class is defined in `langchain-core` and serves as the universal currency for document data throughout the LangChain ecosystem. This unified representation enables seamless interoperability between loaders, splitters, and consumers such as vector stores and language models.\n\n### Document Class Structure\n\n| Attribute | Type | Description |\n|-----------|------|-------------|\n| `page_content` | `str` | The actual text content of the document |\n| `metadata` | `dict[str, Any]` | Associated metadata including source, page number, etc. |\n| `type` | `str` | Document type identifier (optional) |\n\nThe metadata dictionary provides flexibility for storing arbitrary key-value pairs that describe the document's origin and characteristics. Common metadata fields include the source file path, URL, creation date, author, and any domain-specific attributes that may be useful for filtering or attribution.\n\n## Base Loader Interface\n\nThe `BaseLoader` abstract class in `langchain_core.document_loaders.base` establishes the foundational interface that all document loaders must implement. This interface ensures consistency across the loader ecosystem while allowing each loader to handle the specifics of its target data source.\n\n```mermaid\ngraph LR\n    A[load] --> B[Document List]\n    A1[aload] --> B\n    C[lazy_load] --> B\n    D[get_lazy_loaders] --> E[Generator]\n    \n    style A fill:#90EE90\n    style A1 fill:#90EE90\n    style C fill:#90EE90\n    style D fill:#90EE90\n```\n\n### Core Loading Methods\n\n| Method | Return Type | Description |\n|--------|-------------|-------------|\n| `load()` | `list[Document]` | Load all documents synchronously |\n| `aload()` | `list[Document]` | Load all documents asynchronously |\n| `lazy_load()` | `Iterator[Document]` | Lazily load documents one at a time |\n| `get_lazy_loaders()` | `Generator[BaseLoader]` | Generate child loaders for partitioning |\n\nThe `lazy_load()` method provides a memory-efficient approach for processing large document collections by yielding documents one at a time rather than loading everything into memory simultaneously. This is particularly valuable when working with large file systems or extensive web crawls.\n\n资料来源：[libs/core/langchain_core/document_loaders/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/base.py)\n\n## Base Transformer Interface\n\nThe `BaseTransformer` abstract class extends the loading framework to support document transformation and enrichment. Transformers operate on already-loaded documents, applying processing operations such as filtering, annotation, or format conversion.\n\nThis abstraction is particularly useful for implementing cross-cutting concerns like content cleaning, metadata extraction, and format standardization that should be applied consistently across multiple loader types.\n\n```mermaid\ngraph TD\n    A[Raw Documents] --> B[Transformer Pipeline]\n    B --> C[Transformed Documents]\n    \n    B1[Metadata Enricher] --> B\n    B2[Content Cleaner] --> B\n    B3[Format Converter] --> B\n```\n\n## Text Splitters\n\nText splitters address the practical requirement of dividing large documents into smaller, manageable chunks that satisfy token limitations and enable efficient retrieval. The `TextSplitter` class hierarchy in `langchain-text-splitters` provides multiple strategies for document segmentation.\n\nThe splitting process operates by first breaking documents into character-level chunks, then optionally applying a secondary splitting step based on the specific text structure (such as markdown headers, code blocks, or natural language sentences). This hierarchical approach ensures that chunks respect semantic boundaries while maintaining the desired size constraints.\n\n### Character Text Splitter\n\nThe `CharacterTextSplitter` provides the foundational character-based splitting implementation. It divides text at character boundaries, with configurable chunk size and overlap parameters to control the granularity and continuity between chunks.\n\n资料来源：[libs/text-splitters/langchain_text_splitters/character.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/character.py)\n\n#### Key Configuration Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `separator` | `str` | `\"\\n\\n\"` | Character sequence used for splitting |\n| `chunk_size` | `int` | `4000` | Maximum characters per chunk |\n| `chunk_overlap` | `int` | `200` | Characters of overlap between chunks |\n| `length_function` | `Callable` | `len` | Function to calculate text length |\n| `is_separator_regex` | `bool` | `False` | Whether separator is a regex pattern |\n\nThe `chunk_overlap` parameter is particularly important for maintaining context continuity across chunk boundaries. By including overlapping content between adjacent chunks, downstream systems can retrieve relevant information that might span a boundary without losing critical context.\n\n```mermaid\ngraph LR\n    A[Full Document] --> B[Chunk 1<br/>chars 0-4000]\n    B --> C[Chunk 2<br/>chars 3800-7800<br/>200 char overlap]\n    C --> D[Chunk 3<br/>chars 7600-11600<br/>200 char overlap]\n    \n    style B fill:#87CEEB\n    style C fill:#87CEEB\n    style D fill:#87CEEB\n```\n\n### Recursive Character Text Splitter\n\nThe `RecursiveCharacterTextSplitter` extends the basic character splitter with a multi-pass approach that attempts to split text at increasingly smaller delimiters if the initial split results in chunks exceeding the target size. This strategy preserves semantic coherence by preferring natural text boundaries such as paragraphs and sentences over arbitrary character cuts.\n\nThe splitter maintains an ordered list of separators that it attempts in sequence: double newlines for paragraphs, single newlines for line breaks, spaces for mid-sentence breaks, and finally individual characters as a last resort. This ordered approach ensures that chunks align with linguistic structures whenever possible.\n\n### Splitting Methods\n\n| Method | Parameters | Return Type | Description |\n|--------|------------|-------------|-------------|\n| `split_documents()` | `documents: list[Document]` | `list[Document]` | Split a list of Document objects |\n| `split_text()` | `text: str` | `list[str]` | Split raw text into chunks |\n| `create_documents()` | `texts: list[str]`, `metadatas?: list[dict]` | `list[Document]` | Create Document objects from texts |\n\n资料来源：[libs/text-splitters/langchain_text_splitters/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/text-splitters/langchain_text_splitters/__init__.py)\n\n## Document Loader Categories\n\nThe LangChain ecosystem encompasses a diverse collection of document loaders organized by their target data sources. These loaders are distributed across multiple packages, with community-contributed loaders maintained in the `langchain-community` package and select integrations maintained by the LangChain team in the `partners/` directory.\n\n### File System Loaders\n\nFile system loaders handle documents stored locally or accessible through file protocols. Support extends to common formats including plain text, CSV, JSON, XML, Markdown, and various word processing formats. Binary formats such as PDFs and Office documents require additional dependencies that are installed as optional extras.\n\n| Loader Category | Typical Extensions | Dependencies |\n|-----------------|-------------------|--------------|\n| Plain Text | `.txt`, `.text` | None |\n| Structured Data | `.csv`, `.json`, `.xml` | Built-in libraries |\n| Documents | `.pdf`, `.docx`, `.pptx` | Optional extras |\n| Code | `.py`, `.js`, `.java`, etc. | Language-specific parsers |\n\n### Web Loaders\n\nWeb loaders enable extraction of content from internet and intranet resources. These loaders handle HTTP requests, HTML parsing, and content extraction to produce clean document objects free of navigation elements and advertising content.\n\nThe `WebBaseLoader`, `PyMuPDFLoader`, and `PlaywrightLoader` represent common approaches to web content extraction, each offering different tradeoffs between speed, accuracy, and JavaScript rendering capabilities.\n\n### Database Loaders\n\nDatabase loaders connect to SQL and NoSQL databases to extract structured data and present it as documents. These loaders are essential for enterprise workflows that require accessing content stored in data warehouses, document databases, or traditional relational systems.\n\n### Cloud Storage Loaders\n\nIntegration with major cloud storage providers enables loading documents from Amazon S3, Google Cloud Storage, and Azure Blob Storage. These loaders handle authentication, bucket navigation, and file streaming to support large-scale document ingestion pipelines.\n\n## Integration with Retrieval Systems\n\nDocument loaders and text splitters form the foundation of Retrieval-Augmented Generation (RAG) pipelines. After documents are loaded and chunked, they are typically embedded using embedding models and stored in vector databases for similarity search.\n\n```mermaid\ngraph TD\n    A[Documents] --> B[Loader]\n    B --> C[Document Objects]\n    C --> D[Text Splitter]\n    D --> E[Chunks]\n    E --> F[Embedding Model]\n    F --> G[Vector Store]\n    G --> H[Retrieval]\n    H --> I[LLM Response]\n```\n\nThe chunk size and overlap parameters directly impact retrieval quality. Smaller chunks capture fine-grained concepts but may lack sufficient context, while larger chunks provide more context but may dilute the relevance of retrieved content. Tuning these parameters requires balancing the specific requirements of the target application.\n\n## Legacy and Deprecated Components\n\nThe `langchain-classic` package preserves legacy loader implementations that were present in earlier versions of LangChain. These components are maintained for backward compatibility but may lack features present in the newer `langchain-core` implementations.\n\n资料来源：[libs/langchain/langchain_classic/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/document_loaders/__init__.py)\n\nThe deprecation warnings in `langchain-classic` indicate a gradual migration path toward the consolidated abstractions in `langchain-core`. New projects should prefer the core implementations, while existing projects can continue using classic loaders until suitable migration paths are available.\n\n## Best Practices\n\nWhen working with document loaders and text splitters, several practices help ensure optimal results in production environments. First, always consider the downstream consumer when selecting chunk sizes—smaller chunks work well for question answering, while larger chunks preserve more context for summarization tasks. Second, include comprehensive metadata during loading to enable filtering and attribution in later stages.\n\nFor large-scale document processing, prefer lazy loading strategies to manage memory consumption effectively. When dealing with heterogeneous document collections, consider creating custom transformer pipelines that apply format-specific preprocessing before the generic splitting logic.\n\nThe separator configuration should align with the natural structure of the document format. Markdown documents benefit from separator patterns that respect heading hierarchies, while plain text may work better with paragraph-based or sentence-based splitting strategies.\n\n## Package Organization Summary\n\n| Package | Purpose | Key Classes |\n|---------|---------|-------------|\n| `langchain-core` | Core abstractions | `BaseLoader`, `BaseTransformer`, `Document` |\n| `langchain-classic` | Legacy implementations | `CSVLoader`, `PyPDFLoader`, etc. |\n| `langchain-text-splitters` | Text chunking | `TextSplitter`, `CharacterTextSplitter`, `RecursiveCharacterTextSplitter` |\n\n资料来源：[libs/core/langchain_core/document_loaders/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/document_loaders/__init__.py)\n\nThe separation of `langchain-text-splitters` into its own package reflects the independent applicability of text splitting beyond document loading. This design enables reuse of splitting logic in contexts where documents are generated programmatically or obtained through means other than loaders.\n\n---\n\n<a id='vectorstores-retrievers'></a>\n\n## Vector Stores and Retrievers\n\n### 相关页面\n\n相关主题：[Document Loaders and Text Processing](#document-loaders), [Chat Models and Embeddings](#chat-models-embeddings), [Tools and Toolkits](#tools-integrations)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/core/langchain_core/vectorstores/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/vectorstores/__init__.py)\n- [libs/core/langchain_core/vectorstores/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/core/langchain_core/vectorstores/base.py)\n- [libs/langchain/langchain_classic/vectorstores/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/vectorstores/__init__.py)\n- [libs/langchain/langchain_classic/vectorstores/base.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/vectorstores/base.py)\n- [libs/langchain/langchain_classic/retrievers/__init__.py](https://github.com/langchain-ai/langchain/blob/main/libs/langchain/langchain_classic/retrievers/__init__.py)\n- [libs/partners/chroma/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/chroma/README.md)\n- [libs/partners/huggingface/README.md](https://github.com/langchain-ai/langchain/blob/main/libs/partners/huggingface/README.md)\n</details>\n\n# Vector Stores and Retrievers\n\n## Overview\n\nVector Stores and Retrievers are fundamental components in LangChain that enable efficient similarity search and retrieval of documents based on semantic meaning. Vector stores handle the storage and indexing of embedded data, while retrievers provide the interface for querying and fetching relevant information from these stores.\n\nLangChain provides a layered architecture where core abstractions are defined in `langchain_core` and concrete implementations are available through partner packages and `langchain_classic`.\n\n## Architecture\n\nThe Vector Stores and Retrievers system follows a clean separation of concerns with interfaces defined at the core level and implementations distributed across the ecosystem.\n\n```mermaid\ngraph TD\n    A[User Query] --> B[Embedding Model]\n    B --> C[Vector Query]\n    C --> D[Vector Store]\n    D --> E[Retriever]\n    E --> F[Retrieved Documents]\n    \n    G[Documents] --> H[Document Loader]\n    H --> I[Text Splitter]\n    I --> J[Embedding Model]\n    J --> K[Indexed Vectors]\n    K --> D\n    \n    L[langchain_core] --> M[BaseVectorStore Interface]\n    L --> N[BaseRetriever Interface]\n    O[langchain_classic] --> P[Additional VectorStore Implementations]\n    O --> Q[Retrievers]\n    R[Partner Packages] --> S[Chroma, FAISS, Pinecone, etc.]\n```\n\n## Vector Stores\n\n### Core Interface\n\nThe base vector store interface in `langchain_core` provides the foundational methods that all vector store implementations must support. The interface is designed to be provider-agnostic while enabling specific features through optional methods.\n\n#### Key Methods\n\n| Method | Purpose | Parameters | Return Type |\n|--------|---------|------------|-------------|\n| `add_documents` | Add documents to the store | `documents: List[Document]`, `ids: Optional[List[str]]` | `List[str]` |\n| `add_texts` | Add raw texts to the store | `texts: List[str]`, `metadatas: Optional[List[dict]]`, `ids: Optional[List[str]]` | `List[str]` |\n| `similarity_search` | Find similar documents | `query: str`, `k: int`, `filter: Optional[dict]` | `List[Document]` |\n| `similarity_search_by_vector` | Search by embedding | `embedding: List[float]`, `k: int`, `filter: Optional[dict]` | `List[Document]` |\n| `similarity_search_with_score` | Search with relevance scores | `query: str`, `k: int`, `filter: Optional[dict]` | `List[Tuple[Document, float]]` |\n| `delete` | Remove documents by ID | `ids: Optional[List[str]]` | `None` |\n| `get_by_prefix` | Retrieve by ID prefix | `prefix: str` | `List[Document]` |\n\n### Embeddings Integration\n\nVector stores work in conjunction with embedding models to convert text into vector representations. The HuggingFace integration provides `HuggingFaceEmbeddings` for generating embeddings locally:\n\n```python\n# From libs/partners/huggingface/README.md\nfrom langchain_huggingface import HuggingFaceEmbeddings\n\n# Requires sentence-transformers>=5.2.0\nembeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n```\n\nThe embedding models are decoupled from vector stores, allowing flexibility in choosing both components independently.\n\n### Deletion and Prefix Operations\n\nVector stores support flexible document management through deletion and prefix-based retrieval operations:\n\n| Operation | Description |\n|-----------|-------------|\n| `delete(ids)` | Removes specific documents by their IDs |\n| `get_by_prefix(prefix)` | Retrieves all documents whose IDs start with the given prefix |\n\nThese operations enable efficient document lifecycle management within the vector store.\n\n## Retrievers\n\nRetrievers provide a standardized interface for fetching relevant documents from various sources. They abstract away the underlying storage mechanism and provide additional capabilities like filtering and reranking.\n\n### Base Retriever Interface\n\nThe retriever interface in `langchain_classic` defines the contract for all retrieval implementations:\n\n```mermaid\ngraph LR\n    A[Query] --> B[Retriever]\n    B --> C{Filtering}\n    C -->|Apply| D[Metadata Filter]\n    C -->|Apply| E[Time-based Filter]\n    B --> F[Document Pool]\n    F --> G[Top-K Results]\n```\n\n### Retriever Types\n\nLangChain provides several specialized retrievers:\n\n| Retriever Type | Purpose | Use Case |\n|----------------|---------|----------|\n| `VectorStoreRetriever` | Standard similarity search | General-purpose retrieval |\n| `MultiVectorRetriever` | Multiple embedding per document | Tables, images, sub-documents |\n| `MultiQueryRetriever` | Generate multiple query variations | Improve recall |\n| `ContextualCompressionRetriever` | Compress retrieved context | Reduce token usage |\n| `TimeWeightedVectorStoreRetriever` | Prioritize recent documents | Recency-aware search |\n\n### Configuration Options\n\nRetrievers support various configuration parameters:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `search_type` | `str` | `\"similarity\"` | Search algorithm: `similarity`, `mmr` |\n| `k` | `int` | `4` | Number of documents to retrieve |\n| `fetch_k` | `int` | `20` | Initial pool size for MMR |\n| `lambda_mult` | `float` | `0.5` | MMR diversity parameter |\n| `filter` | `dict` | `None` | Metadata filter conditions |\n\n## Partner Integrations\n\n### Chroma Vector Store\n\nThe `langchain-chroma` package provides integration with Chroma, an open-source embedding database:\n\n```bash\npip install langchain-chroma\n```\n\nChroma is designed specifically for AI applications and provides efficient storage and retrieval of embeddings with metadata filtering capabilities.\n\n### HuggingFace Embeddings\n\nThe `langchain-huggingface` package offers multiple embedding options:\n\n- `HuggingFaceEmbeddings` - For local inference using sentence-transformers\n- `HuggingFacePipeline` - For local LLM inference\n\nFor full functionality including embeddings:\n\n```bash\npip install langchain-huggingface[full]\n```\n\n> **Note:** The `[full]` extra requires `sentence-transformers>=5.2.0` and `transformers>=5.0.0`. Projects migrating from `langchain-community` may need to upgrade `sentence-transformers` to `>=5.2.0`.\n\n## Package Structure\n\n### Core Layer (`langchain_core`)\n\nThe `langchain_core.vectorstores` module provides base abstractions that define the contract for all vector store implementations. This layer is dependency-light and focuses on interface definitions.\n\n### Classic Layer (`langchain_classic`)\n\nThe `langchain_classic.vectorstores` module extends the core abstractions with additional functionality:\n\n- Additional vector store implementations\n- Utility functions for common operations\n- Integration with other LangChain components\n\n### Partner Packages\n\nPartner packages provide maintained integrations with specific vector store providers:\n\n```\nlibs/partners/\n├── chroma/       # Chroma vector store\n├── weaviate/     # Weaviate vector database\n├── pinecone/     # Pinecone managed vector database\n├── qdrant/       # Qdrant vector search engine\n└── ...           # Additional providers\n```\n\n## Workflow: Document Indexing and Retrieval\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Loader\n    participant Splitter\n    participant Embedder\n    participant VectorStore\n    participant Retriever\n    \n    User->>Loader: Load documents\n    Loader-->>User: Raw documents\n    User->>Splitter: Split documents\n    Splitter-->>User: Chunked documents\n    User->>Embedder: Generate embeddings\n    Embedder-->>User: Vector embeddings\n    User->>VectorStore: Add documents + embeddings\n    VectorStore-->>User: Document IDs\n    \n    User->>Retriever: Query\n    Retriever->>Embedder: Embed query\n    Embedder-->>Retriever: Query vector\n    Retriever->>VectorStore: Similarity search\n    VectorStore-->>Retriever: Top-K documents\n    Retriever-->>User: Retrieved results\n```\n\n## Best Practices\n\n### Indexing Recommendations\n\n1. **Chunk Size Selection**: Choose appropriate chunk sizes based on your use case—smaller chunks for precise retrieval, larger chunks for more context\n2. **Metadata Enrichment**: Include relevant metadata during indexing to enable filtered retrieval\n3. **Embedding Selection**: Match embedding models to your data type and language requirements\n\n### Retrieval Optimization\n\n1. **Use MMR for Diversity**: When using MMR (Maximal Marginal Relevance), tune `lambda_mult` to balance relevance and diversity\n2. **Implement Filtering**: Use metadata filtering to narrow results to relevant subsets\n3. **Consider Compression**: For long contexts, use contextual compression to reduce token usage\n\n## Security Considerations\n\nWhen implementing vector stores and retrievers in production:\n\n- **Data Isolation**: Ensure vector store data is properly isolated based on access requirements\n- **Input Sanitization**: Validate and sanitize query inputs to prevent injection attacks\n- **Access Control**: Implement appropriate access controls for sensitive document retrieval\n\n## See Also\n\n- [LangChain Core Documentation](https://reference.langchain.com/python/langchain_core)\n- [API Reference](https://reference.langchain.com/python/integrations/)\n- [LangChain Docs](https://docs.langchain.com/oss/python/integrations/providers)\n- [Partner Integrations](https://docs.langchain.com/oss/python/integrations/providers)\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：langchain-ai/langchain\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:552661142 | https://github.com/langchain-ai/langchain | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | release_recency=unknown\n\n<!-- canonical_name: langchain-ai/langchain; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：langchain-ai/langchain\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:552661142 | https://github.com/langchain-ai/langchain | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:552661142 | https://github.com/langchain-ai/langchain | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:552661142 | https://github.com/langchain-ai/langchain | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# langchain - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for langchain-ai/langchain.\n\nProject:\n- Name: langchain\n- Repository: https://github.com/langchain-ai/langchain\n- Summary: The agent engineering platform. Available in TypeScript!\n- Host target: chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: The agent engineering platform. Available in TypeScript!\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: The agent engineering platform. Available in TypeScript!\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. introduction: Introduction to LangChain. Produce one small intermediate artifact and wait for confirmation.\n2. getting-started: Getting Started with LangChain. Produce one small intermediate artifact and wait for confirmation.\n3. runnable-execution: Runnable and Execution Model. Produce one small intermediate artifact and wait for confirmation.\n4. messages-system: Messages and Prompt System. Produce one small intermediate artifact and wait for confirmation.\n5. chat-models-embeddings: Chat Models and Embeddings. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/langchain-ai/langchain\n- https://github.com/langchain-ai/langchain#readme\n- README.md\n- libs/core/langchain_core/__init__.py\n- libs/langchain/langchain_classic/__init__.py\n- libs/core/langchain_core/language_models/chat_models.py\n- libs/core/langchain_core/runnables/base.py\n- libs/core/langchain_core/runnables/config.py\n- libs/core/langchain_core/runnables/retry.py\n- libs/core/langchain_core/runnables/branch.py\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：langchain-ai/langchain\n\n## 官方安装入口\n\n### Python / pip · 官方安装入口\n\n```bash\npip install langchain\n```\n\n来源：https://github.com/langchain-ai/langchain#readme\n\n## 来源\n\n- repo: https://github.com/langchain-ai/langchain\n- docs: https://github.com/langchain-ai/langchain#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_e8be47b3d404457fa44753c525c9108f"
}
