{
  "canonical_name": "langchain-ai/langchainjs",
  "compilation_id": "pack_57d207b207234dcd83f6620e7cfe4eb3",
  "created_at": "2026-05-16T06:25:04.317072+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=skill, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=skill, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `npm install -S langchain` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "npm install -S langchain",
      "sandbox_container_image": "node:22-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "deterministic_isolated_install",
      "sandbox_validation_id": "sbx_12e0143db7ae48dea96afb97fc1cfc85"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_6160c3635a7917b10fa436d58b5e216c",
    "canonical_name": "langchain-ai/langchainjs",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/langchain-ai/langchainjs",
    "slug": "langchainjs",
    "source_packet_id": "phit_c673851771da42b0b5c4fca4701e4fed",
    "source_validation_id": "dval_d18ecddff9bf424e9e8c59d6cb1811bd"
  },
  "merchandising": {
    "best_for": "需要软件开发与交付能力，并使用 local_cli的用户",
    "github_forks": 3167,
    "github_stars": 17672,
    "one_liner_en": "The agent engineering platform",
    "one_liner_zh": "The agent engineering platform",
    "primary_category": {
      "category_id": "software-development",
      "confidence": "medium",
      "name_en": "Software Development",
      "name_zh": "软件开发与交付",
      "reason": "matched_keywords:git, cli"
    },
    "target_user": "使用 local_cli 等宿主 AI 的用户",
    "title_en": "langchainjs",
    "title_zh": "langchainjs 能力包",
    "visible_tags": [
      {
        "label_en": "Browser Agents",
        "label_zh": "浏览器 Agent",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-browser-agents",
        "type": "product_domain"
      },
      {
        "label_en": "Web Task Automation",
        "label_zh": "网页任务自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-web-task-automation",
        "type": "user_job"
      },
      {
        "label_en": "Natural-language Web Actions",
        "label_zh": "自然语言网页操作",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-natural-language-web-actions",
        "type": "core_capability"
      },
      {
        "label_en": "Page Observation and Action Planning",
        "label_zh": "页面观察与动作规划",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-page-observation-and-action-planning",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Evaluation Suite",
        "label_zh": "评测体系",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-evaluation-suite",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_c673851771da42b0b5c4fca4701e4fed",
  "page_model": {
    "artifacts": {
      "artifact_slug": "langchainjs",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "npm install -S langchain",
          "label": "Node.js / npm · 官方安装入口",
          "source": "https://github.com/langchain-ai/langchainjs#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "自然语言网页操作",
        "页面观察与动作规划",
        "评测体系"
      ],
      "eyebrow": "软件开发与交付",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要软件开发与交付能力，并使用 local_cli的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "The agent engineering platform"
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "local_cli",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "skill, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：[Feature request] React Native support",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_39933028ef894033b30ff784e81f185f | https://github.com/langchain-ai/langchainjs/issues/4239 | 来源类型 github_issue 暴露的待验证使用条件。"
            ],
            "severity": "high",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：[Feature request] React Native support",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：@langchain/core@1.1.46",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_36a7a58d5cd84bda8dde7918402a6f8a | https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46 | 来源讨论提到 npm 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：@langchain/core@1.1.46",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Must pass in at least 1 record to upsert.",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_26c3acaad9e14ed3953206f25870c0b0 | https://github.com/langchain-ai/langchainjs/issues/10890 | 来源讨论提到 node 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Must pass in at least 1 record to upsert.",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "downstream_validation.risk_items | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "进入安全/权限治理复核队列。",
            "title": "下游验证发现风险项",
            "user_impact": "下游已经要求复核，不能在页面中弱化。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "risks.scoring_risks | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "把风险写入边界卡，并确认是否需要人工复核。",
            "title": "存在评分风险",
            "user_impact": "风险会影响是否适合普通用户安装。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters",
            "category": "安全/权限坑",
            "evidence": [
              "community_evidence:github | cevd_4d2a6bed33284a3cbd2f3319321d9e4c | https://github.com/langchain-ai/langchainjs/issues/10894 | 来源讨论提到 node 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters",
            "user_impact": "可能阻塞安装或首次运行。"
          },
          {
            "body": "issue_or_pr_quality=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | issue_or_pr_quality=unknown"
            ],
            "severity": "low",
            "suggested_check": "抽样最近 issue/PR，判断是否长期无人处理。",
            "title": "issue/PR 响应质量未知",
            "user_impact": "用户无法判断遇到问题后是否有人维护。"
          },
          {
            "body": "release_recency=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | release_recency=unknown"
            ],
            "severity": "low",
            "suggested_check": "确认最近 release/tag 和 README 安装命令是否一致。",
            "title": "发布节奏不明确",
            "user_impact": "安装命令和文档可能落后于代码，用户踩坑概率升高。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 10 个潜在踩坑项，其中 1 个为 high/blocking；最高优先级：配置坑 - 来源证据：[Feature request] React Native support。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 1079,
        "forks": 3167,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 17672
      },
      "source_url": "https://github.com/langchain-ai/langchainjs",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "The agent engineering platform",
      "title": "langchainjs 能力包",
      "trial_prompt": "# langchainjs - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for langchain-ai/langchainjs.\n\nProject:\n- Name: langchainjs\n- Repository: https://github.com/langchain-ai/langchainjs\n- Summary: The agent engineering platform\n- Host target: local_cli\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: The agent engineering platform\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: The agent engineering platform\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. introduction: Introduction to LangChain.js. Produce one small intermediate artifact and wait for confirmation.\n2. getting-started: Getting Started. Produce one small intermediate artifact and wait for confirmation.\n3. package-architecture: Package Architecture. Produce one small intermediate artifact and wait for confirmation.\n4. core-abstractions: Core Abstractions. Produce one small intermediate artifact and wait for confirmation.\n5. chat-models: Chat Models and LLM Providers. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/langchain-ai/langchainjs\n- https://github.com/langchain-ai/langchainjs#readme\n- README.md\n- libs/langchain/README.md\n- libs/langchain-core/README.md\n- libs/langchain-classic/README.md\n- package.json\n- libs/langchain/package.json\n- libs/langchain-core/package.json\n- examples/package.json\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: [Feature request] React Native support（https://github.com/langchain-ai/langchainjs/issues/4239）；github/github_issue: bug(@langchain/openai): Bare JSON.parse in Responses API converter crash（https://github.com/langchain-ai/langchainjs/issues/10894）；github/github_issue: Must pass in at least 1 record to upsert.（https://github.com/langchain-ai/langchainjs/issues/10890）；github/github_release: @langchain/core@1.1.46（https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46）；github/github_release: @langchain/core@1.1.41（https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.41）；github/github_release: @langchain/anthropic@1.3.27（https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/anthropic%401.3.27）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "[Feature request] React Native support",
              "url": "https://github.com/langchain-ai/langchainjs/issues/4239"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "bug(@langchain/openai): Bare JSON.parse in Responses API converter crash",
              "url": "https://github.com/langchain-ai/langchainjs/issues/10894"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Must pass in at least 1 record to upsert.",
              "url": "https://github.com/langchain-ai/langchainjs/issues/10890"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "@langchain/core@1.1.46",
              "url": "https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "@langchain/core@1.1.41",
              "url": "https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.41"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "@langchain/anthropic@1.3.27",
              "url": "https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/anthropic%401.3.27"
            }
          ],
          "status": "已收录 6 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "软件开发与交付",
      "desc": "The agent engineering platform",
      "effort": "安装已验证",
      "forks": 3167,
      "icon": "code",
      "name": "langchainjs 能力包",
      "risk": "可发布",
      "slug": "langchainjs",
      "stars": 17672,
      "tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "自然语言网页操作",
        "页面观察与动作规划",
        "评测体系"
      ],
      "thumb": "gray",
      "type": "Skill Pack"
    },
    "manual": {
      "markdown": "# https://github.com/langchain-ai/langchainjs 项目说明书\n\n生成时间：2026-05-16 06:23:32 UTC\n\n## 目录\n\n- [Introduction to LangChain.js](#introduction)\n- [Getting Started](#getting-started)\n- [Package Architecture](#package-architecture)\n- [Core Abstractions](#core-abstractions)\n- [Chat Models and LLM Providers](#chat-models)\n- [Embeddings Integration](#embeddings)\n- [Agent Framework](#agent-framework)\n- [Tools and Toolkits](#tools-toolkits)\n- [Vector Stores](#vector-stores)\n- [Memory Systems](#memory-system)\n\n<a id='introduction'></a>\n\n## Introduction to LangChain.js\n\n### 相关页面\n\n相关主题：[Getting Started](#getting-started), [Package Architecture](#package-architecture)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchainjs/blob/main/README.md)\n- [libs/langchain/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/README.md)\n- [libs/langchain-core/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/README.md)\n- [libs/langchain-classic/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n</details>\n\n# Introduction to LangChain.js\n\nLangChain.js is a JavaScript/TypeScript framework designed for building applications with Large Language Models (LLMs) through composability. It provides a standardized interface for working with different LLM providers, offers tools for prompt management, and enables the creation of complex chains and agents that can interact with external data sources and APIs.\n\n## Architecture Overview\n\nLangChain.js is organized into a modular monorepo structure with multiple packages that serve different purposes. Each package focuses on specific functionality while maintaining interoperability through shared dependencies.\n\n```mermaid\ngraph TD\n    A[Application Layer] --> B[libs/langchain]\n    A --> C[@langchain/classic]\n    B --> D[@langchain/core]\n    C --> D\n    E[Provider Integrations] --> F[libs/providers/*]\n    F --> D\n    G[Utility Packages] --> H[@langchain/textsplitters]\n    H --> D\n    \n    style A fill:#e1f5fe\n    style D fill:#f3e5f5\n    style F fill:#fff3e0\n```\n\n## Core Packages\n\n### @langchain/core\n\nThe foundational package containing essential abstractions and schemas used throughout the LangChain.js ecosystem.\n\n| Property | Value |\n|----------|-------|\n| Name | @langchain/core |\n| Version | 1.1.46 |\n| Type | ES Module |\n| Engine | Node.js >= 20 |\n\n**Key Dependencies:**\n\n- `@cfworker/json-schema` - JSON schema validation\n- `@standard-schema/spec` - Standard schema support\n- `js-tiktoken` - Token counting\n- `langsmith` - Tracing and evaluation (>= 0.5.0 < 1.0.0)\n- `mustache` - Template rendering\n- `p-queue` - Promise queue management\n- `zod` - Schema validation\n\n资料来源：[libs/langchain-core/package.json:1-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/package.json)\n\n### libs/langchain\n\nThe main LangChain package that provides the primary API surface for building LLM applications. This package focuses on the essential building blocks for modern agent development, including the `createAgent` API.\n\n### @langchain/classic\n\nA backward-compatibility package containing functionality migrated from LangChain v0.x. This package exists to support existing applications while the core `langchain` package focuses on modern agent development.\n\n**When to use @langchain/classic:**\n\n- Existing code using legacy chains (LLMChain, ConversationalRetrievalQAChain, RetrievalQAChain)\n- Using the indexing API with RecordManager\n- Depending on community integrations previously re-exported from langchain\n- Maintaining existing applications not yet ready for the new `createAgent` API\n\n资料来源：[libs/langchain-classic/README.md:1-45](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n\n## Provider Integrations\n\nLangChain.js provides integration packages for various LLM providers through the `libs/providers` directory.\n\n### Available Provider Packages\n\n| Package | Purpose |\n|---------|---------|\n| @langchain/anthropic | Anthropic Claude models integration |\n| @langchain/google-common | Common utilities for Google AI models |\n| @langchain/tavily | Tavily search integration |\n| @langchain/qdrant | Qdrant vector database integration |\n\nEach integration package follows a consistent template structure and includes:\n\n- TypeScript source code\n- ESLint linting configuration\n- Vitest testing framework\n- TypeScript build compilation via tsdown\n\n资料来源：[libs/providers/langchain-tavily/package.json:1-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-tavily/package.json)\n资料来源：[libs/providers/langchain-qdrant/package.json:1-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/package.json)\n\n## Text Processing\n\n### @langchain/textsplitters\n\nA dedicated package for splitting text into chunks, commonly used in retrieval-augmented generation (RAG) pipelines.\n\n**Supported Splitting Strategies:**\n\n| Strategy | Use Case |\n|----------|----------|\n| RecursiveCharacterTextSplitter | General purpose text splitting |\n| HTML Text Splitter | Structured HTML content |\n| Custom Separators | Domain-specific splitting requirements |\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\nconst output = await splitter.createDocuments([text]);\n```\n\n资料来源：[libs/langchain-textsplitters/README.md:1-40](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/README.md)\n资料来源：[examples/src/langchain-classic/indexes/html_text_splitter.ts:1-20](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/langchain-classic/indexes/html_text_splitter.ts)\n\n## Testing Infrastructure\n\nLangChain.js includes custom Jest/Vitest matchers for testing chains, agents, and messages.\n\n```typescript\nexport const langchainMatchers = {\n  toBeHumanMessage,\n  toBeAIMessage,\n  toBeSystemMessage,\n  toBeToolMessage,\n  toHaveToolCalls,\n  toHaveToolCallCount,\n  toContainToolCall,\n  toHaveToolMessages,\n  toHaveBeenInterrupted,\n  toHaveStructuredResponse,\n};\n```\n\n**Matcher Usage:**\n\n```typescript\nexpect(someMessage).toBeHumanMessage();\nexpect(someMessage).toBeAIMessage();\nexpect(someChain).toHaveToolCalls([\n  { name: \"calculator\", args: { expression: \"2+2\" } }\n]);\n```\n\n资料来源：[libs/langchain-core/src/testing/matchers.ts:80-95](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/testing/matchers.ts)\n\n## Agent Middleware\n\n### Human-in-the-Loop (HITL) Middleware\n\nThe framework supports human-in-the-loop workflows through configurable interrupt mechanisms.\n\n```typescript\nconst config: InterruptOnConfig = {\n  allowedDecisions: [\"approve\", \"edit\"],\n  description: formatToolDescription\n};\n```\n\n**Configuration Schema:**\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| allowedDecisions | string[] | Actions the human can take |\n| description | string \\| Function | Description of the interrupt |\n| argsSchema | Record | JSON schema for action arguments |\n\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:60-80](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n\n## Message Block Translators\n\nLangChain.js provides standardized message translation between different API formats, particularly for OpenAI and Anthropic.\n\n```typescript\nconst BlockTranslator = {\n  translateContent: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletions(message);\n    }\n    return convertToV1FromResponses(message);\n  },\n  translateContentChunk: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletionsChunk(message);\n    }\n    return convertToV1FromResponsesChunk(message);\n  },\n};\n```\n\n资料来源：[libs/langchain-core/src/messages/block_translators/openai.ts:1-15](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/messages/block_translators/openai.ts)\n\n## Namespace System\n\nLangChain.js uses a namespace utility for tracking and identifying LangChain-specific objects.\n\n```typescript\nexport const ns = createNamespace(\"langchain\");\n```\n\nNamespaces provide:\n- Hierarchical path organization (`ns.sub(\"component\")`)\n- Instance type checking (`ns.isInstance(obj)`)\n- Branded type creation for type safety\n\n资料来源：[libs/langchain-core/src/utils/namespace.ts:1-45](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/utils/namespace.ts)\n\n## Legacy Chains (Deprecated)\n\nThe `@langchain/classic` package includes deprecated chain implementations:\n\n| Chain | Purpose |\n|-------|---------|\n| LLMChain | Basic LLM call with prompt template |\n| ConversationalRetrievalQAChain | Q&A with conversation memory |\n| RetrievalQAChain | Q&A over documents |\n| StuffDocumentsChain | Stuff documents into prompt |\n| MapReduceDocumentsChain | Map-reduce document operations |\n| RefineDocumentsChain | Iterative document refinement |\n\n资料来源：[libs/langchain-classic/README.md:15-30](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n\n## Development Workflow\n\n### Repository Structure\n\n```\nlangchainjs/\n├── libs/\n│   ├── langchain/              # Main LangChain package\n│   ├── langchain-core/         # Core abstractions\n│   ├── langchain-classic/      # Legacy v0.x functionality\n│   ├── langchain-textsplitters/ # Text splitting utilities\n│   └── providers/              # Provider integrations\n├── examples/                   # Usage examples\n└── libs/create-langchain-integration/  # Package template\n```\n\n### Building Packages\n\n```bash\npnpm install\npnpm build\n```\n\n### Running Examples\n\n```bash\ncp .env.example .env\n# Edit .env with API keys\npnpm run start ./src/path/to/example.ts\n```\n\n资料来源：[examples/src/README.md:1-25](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/README.md)\n\n## Migration Guide\n\n### From langchain v0.x to v1.0\n\n**For new projects:** Use the `langchain` v1.0 package with `createAgent` API.\n\n**For existing projects:** Install `@langchain/classic` for backward compatibility:\n\n```bash\nnpm install @langchain/classic @langchain/core\n```\n\nThe new APIs provide:\n- Cleaner, more powerful agent building\n- Middleware support\n- Better performance for modern workflows\n- Focused API surface with less complexity\n- Active development on v1.0 features\n\n资料来源：[libs/langchain-classic/README.md:30-55](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n\n## Version Compatibility\n\nWhen using multiple LangChain packages, ensure they share the same `@langchain/core` instance:\n\n```json\n{\n  \"dependencies\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"overrides\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"pnpm\": {\n    \"overrides\": {\n      \"@langchain/core\": \"^0.3.0\"\n    }\n  }\n}\n```\n\n资料来源：[libs/create-langchain-integration/template/README.md:20-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/create-langchain-integration/template/README.md)\n\n---\n\n<a id='getting-started'></a>\n\n## Getting Started\n\n### 相关页面\n\n相关主题：[Introduction to LangChain.js](#introduction)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchainjs/blob/main/README.md)\n- [libs/langchain-textsplitters/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/README.md)\n- [libs/langchain-classic/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n- [libs/providers/langchain-anthropic/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-anthropic/README.md)\n- [libs/providers/langchain-cohere/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-cohere/README.md)\n- [libs/providers/langchain-aws/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-aws/README.md)\n- [examples/src/README.md](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/README.md)\n- [libs/create-langchain-integration/template/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/create-langchain-integration/template/README.md)\n</details>\n\n# Getting Started\n\nLangChain.js is a comprehensive framework for building LLM-powered applications. It enables developers to chain together interoperable components and third-party integrations, simplifying AI application development while future-proofing decisions as underlying technology evolves.\n\n资料来源：[README.md:1-15]()\n\n## Prerequisites\n\n### System Requirements\n\n| Requirement | Version/Notes |\n|-------------|--------------|\n| Node.js | v18.x or higher recommended |\n| Package Manager | npm, yarn, or pnpm |\n| Runtime | Node.js or Edge (Vercel, Cloudflare Workers) |\n\n### Environment Setup\n\nBefore installing LangChain.js packages, ensure your environment is properly configured. Most integrations require API keys from respective service providers.\n\n```bash\n# Example API key environment variables\nexport ANTHROPIC_API_KEY=\"your-key-here\"\nexport OPENAI_API_KEY=\"your-key-here\"\nexport COHERE_API_KEY=\"your-key-here\"\nexport TOGETHER_AI_API_KEY=\"your-key-here\"\nexport BEDROCK_AWS_REGION=\"us-east-1\"\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-30]()\n\n## Installation\n\nLangChain.js follows a modular architecture with packages organized by functionality. Understanding the package structure is essential for proper installation.\n\n### Package Architecture Overview\n\n```mermaid\ngraph TD\n    A[Application] --> B[langchain v1.0]\n    A --> C[@langchain/classic]\n    B --> D[@langchain/core]\n    C --> D\n    B --> E[@langchain/textsplitters]\n    B --> F[Provider Packages]\n    F --> G[@langchain/anthropic]\n    F --> H[@langchain/openai]\n    F --> I[@langchain/cohere]\n    F --> J[@langchain/aws]\n    F --> K[@langchain/google-common]\n```\n\n资料来源：[libs/langchain-classic/README.md:1-50]()\n\n### Core Packages\n\n#### @langchain/core\n\nThe foundational package that all other LangChain packages depend on. It provides base classes, interfaces, and utilities used across the ecosystem.\n\n```bash npm2yarn\nnpm install @langchain/core\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-15]()\n\n#### langchain (v1.0)\n\nThe main package for building agents with modern APIs, including the `createAgent` function with middleware support.\n\n```bash npm2yarn\nnpm install langchain\n```\n\n#### @langchain/textsplitters\n\nSpecialized package for splitting documents into chunks, commonly used in retrieval-augmented generation (RAG) pipelines.\n\n```bash npm2yarn\nnpm install @langchain/textsplitters @langchain/core\n```\n\n资料来源：[libs/langchain-textsplitters/README.md:1-25]()\n\n### Provider Packages\n\nLangChain.js provides official integrations for various LLM providers:\n\n| Package | Purpose | Documentation |\n|---------|---------|---------------|\n| `@langchain/anthropic` | Anthropic Claude models | [Link](https://js.langchain.com/docs/integrations/chat/anthropic) |\n| `@langchain/openai` | OpenAI GPT models | [Link](https://js.langchain.com/docs/integrations/chat/openai) |\n| `@langchain/cohere` | Cohere models | [Link](https://js.langchain.com/docs/integrations/chat/cohere) |\n| `@langchain/aws` | AWS Bedrock models | [Link](https://js.langchain.com/docs/integrations/chat/aws-bedrock) |\n| `@langchain/together-ai` | Together AI models | [Link](https://js.langchain.com/docs/integrations/chat/together) |\n\n```bash npm2yarn\nnpm install @langchain/anthropic @langchain/core\nnpm install @langchain/openai @langchain/core\nnpm install @langchain/cohere @langchain/core\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-25]()\n\n### @langchain/classic (Legacy)\n\nFor existing applications using LangChain v0.x, the `@langchain/classic` package provides backward compatibility with legacy chains and functionality.\n\n```bash npm2yarn\nnpm install @langchain/classic\n```\n\nThis package requires `@langchain/core` as a peer dependency:\n\n```bash npm2yarn\nnpm install @langchain/core\n```\n\n资料来源：[libs/langchain-classic/README.md:1-60]()\n\n## Core Dependency Management\n\nWhen using multiple LangChain packages together, ensure all packages resolve to the same instance of `@langchain/core` to avoid conflicts.\n\nAdd the following configuration to your `package.json`:\n\n```json\n{\n  \"name\": \"your-project\",\n  \"version\": \"0.0.0\",\n  \"dependencies\": {\n    \"@langchain/anthropic\": \"^0.0.9\",\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"resolutions\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"overrides\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"pnpm\": {\n    \"overrides\": {\n      \"@langchain/core\": \"^0.3.0\"\n    }\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-cohere/README.md:15-45]()\n\n## Development Setup\n\n### Building from Source\n\nTo develop with LangChain.js locally or run examples:\n\n```bash\n# Install dependencies from repository root\npnpm install\n\n# Build all packages\npnpm build\n```\n\n资料来源：[examples/src/README.md:1-20]()\n\n### Running Examples\n\nMost examples require API keys. Configure your environment by copying the example environment file:\n\n```bash\ncp .env.example .env\n```\n\nThen edit `.env` with your API keys. Run examples using the provided script:\n\n```bash\n# From the examples/ directory\npnpm run start <path to example>\n\n# Example\npnpm run start ./src/prompts/few_shot.ts\n```\n\nTo run examples with transpiled JavaScript:\n\n```bash\npnpm run start:dist <path to example>\n\n# Example\npnpm run start:dist ./dist/prompts/few_shot.js\n```\n\n资料来源：[examples/src/README.md:1-45]()\n\n### Testing\n\nTest files follow naming conventions:\n- Unit tests: `*.test.ts`\n- Integration tests: `*.int.test.ts`\n\n```bash\n# Run all tests\npnpm test\n\n# Run integration tests specifically\npnpm test:int\n```\n\nFor individual packages:\n\n```bash\n# Build a specific package\npnpm build --filter @langchain/textsplitters\n\n# Run tests for a specific package\ncd libs/langchain-textsplitters\npnpm test\n```\n\n资料来源：[libs/langchain-textsplitters/README.md:25-50]()\n\n## Quick Start Example\n\n### Basic Chat Model Usage\n\n```typescript\nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { HumanMessage } from \"@langchain/core/messages\";\n\nconst model = new ChatAnthropic({\n  model: \"claude-sonnet-4-5-20250514\",\n});\n\nconst response = await model.invoke([\n  new HumanMessage(\"Translate \"I love programming\" into French.\")\n]);\n\nconsole.log(response.content);\n// Output: J'adore la programmation.\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-50]()\n\n### Document Splitting for RAG\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst text = `<!DOCTYPE html>\n<html>\n  <head>\n    <title>LangChain</title>\n  </head>\n  <body>\n    <h1>Welcome</h1>\n    <p>Building applications with LLMs</p>\n  </body>\n</html>`;\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\n\nconst output = await splitter.createDocuments([text]);\nconsole.log(output);\n```\n\n资料来源：[examples/src/langchain-classic/indexes/html_text_splitter.ts:1-30]()\n\n## Choosing the Right Package\n\n### For New Projects\n\nUse `langchain` v1.0 for new projects. It provides:\n\n- **`createAgent`**: A cleaner, more powerful way to build agents with middleware support\n- **Better performance**: Optimized for modern agent workflows\n- **Focused API surface**: Less complexity, easier to learn\n- **Active development**: New features and improvements focus on v1.0 APIs\n\n### For Legacy Projects\n\nUse `@langchain/classic` if you:\n\n- Have existing code using legacy chains (e.g., `LLMChain`, `ConversationalRetrievalQAChain`)\n- Use the indexing API\n- Depend on functionality from `@langchain/community` previously re-exported from `langchain`\n- Are maintaining an existing application and not yet ready to migrate\n\n### Package Decision Flow\n\n```mermaid\ngraph TD\n    A[New Project?] -->|Yes| B[Use langchain v1.0]\n    A -->|No| C[Maintaining Existing Code?]\n    C -->|Yes| D[Using Legacy Chains?]\n    C -->|No| E[Use langchain v1.0]\n    D -->|Yes| F[Use @langchain/classic]\n    D -->|No| E\n```\n\n资料来源：[libs/langchain-classic/README.md:40-80]()\n\n## Additional Resources\n\n| Resource | Description |\n|----------|-------------|\n| [Documentation](https://docs.langchain.com/oss/javascript/langchain/overview) | Official LangChain.js documentation |\n| [Deep Agents](https://docs.langchain.com/oss/javascript/deepagents/) | Higher-level package for common agent patterns |\n| [Release Notes](https://docs.langchain.com/oss/javascript/releases/langchain-v1) | Version-specific changelog and migration guides |\n| [API Reference](https://api.js.langchain.com/) | Generated API documentation |\n\n资料来源：[README.md:15-30]()\n\n---\n\n<a id='package-architecture'></a>\n\n## Package Architecture\n\n### 相关页面\n\n相关主题：[Introduction to LangChain.js](#introduction), [Core Abstractions](#core-abstractions)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [pnpm-workspace.yaml](https://github.com/langchain-ai/langchainjs/blob/main/pnpm-workspace.yaml)\n- [turbo.json](https://github.com/langchain-ai/langchainjs/blob/main/turbo.json)\n- [libs/langchain-core/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/package.json)\n- [libs/langchain/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/package.json)\n- [libs/langchain-classic/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/package.json)\n- [libs/langchain-mcp-adapters/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-mcp-adapters/package.json)\n- [libs/langchain-textsplitters/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/package.json)\n</details>\n\n# Package Architecture\n\n## Overview\n\nThe LangChain.js repository employs a sophisticated **monorepo architecture** built on pnpm workspaces and Turbo. This design enables modular development, shared tooling, and independent versioning across multiple packages. The monorepo structure separates concerns into distinct layers: core abstractions, main integration packages, provider-specific SDKs, and community contributions. This architectural approach allows developers to install only the dependencies they need while maintaining a coherent ecosystem where packages can reference and compose with each other seamlessly.\n\n## Monorepo Structure\n\n### Workspace Organization\n\nThe repository uses **pnpm workspaces** to manage multiple packages under a single repository. This enables efficient dependency management, shared node_modules, and unified versioning strategies across the ecosystem.\n\n```mermaid\ngraph TD\n    A[Root: langchainjs] --> B[libs/]\n    A --> C[examples/]\n    A --> D[docs/]\n    B --> E[langchain-core/]\n    B --> F[langchain/]\n    B --> G[langchain-classic/]\n    B --> H[providers/]\n    B --> I[langchain-textsplitters/]\n    B --> J[community/]\n    H --> K[langchain-openai/]\n    H --> L[langchain-anthropic/]\n    H --> M[langchain-google/]\n    H --> N[langchain-pinecone/]\n    H --> O[langchain-qdrant/]\n```\n\n### Build Pipeline with Turbo\n\nThe build system uses **Turbo** to orchestrate builds, tests, and linting across packages. Turbo's caching mechanism significantly improves build times by only rebuilding packages that have changed since the last build.\n\n```mermaid\ngraph LR\n    A[pnpm install] --> B[turbo build]\n    B --> C[langchain-core]\n    C --> D[langchain]\n    C --> E[providers]\n    C --> F[community]\n    D --> G[langchain-classic]\n    G --> H[examples]\n```\n\n## Core Packages\n\n### @langchain/core\n\nThe foundational package that contains all core abstractions, interfaces, and utilities. This package is a peer dependency for all other LangChain packages and must be kept synchronized across the ecosystem.\n\n```mermaid\ngraph TD\n    A[@langchain/core] --> B[Messages & Chat Models]\n    A --> C[Tools & Toolkits]\n    A --> D[Vector Stores]\n    A --> E[Document Loaders]\n    A --> F[Output Parsers]\n    A --> G[Retrievers]\n    A --> H[Callbacks]\n    A --> I[Memory]\n```\n\n**Key Responsibilities:**\n\n- Defines base classes for all major abstractions (BaseLanguageModel, BaseRetriever, BaseChatModel)\n- Provides TypeScript utilities and type guards for common patterns\n- Exports core message types (AIMessage, HumanMessage, SystemMessage, ToolMessage)\n- Implements hash utilities and data transformation functions\n\n### @langchain/langchain\n\nThe main package that re-exports functionality from core and community packages. This serves as a convenient entry point for users who want access to all integrations without managing individual package dependencies.\n\n### @langchain/classic\n\nContains legacy chain implementations from v0.x that have been deprecated or replaced in v1.0. This package provides migration paths for existing users while maintaining backward compatibility.\n\n| Component | Description |\n|-----------|-------------|\n| LLMChain | Basic chain for calling an LLM with a prompt template |\n| ConversationalRetrievalQAChain | Chain for conversational question-answering over documents |\n| RetrievalQAChain | Chain for question-answering over documents without conversation memory |\n| StuffDocumentsChain | Chain for stuffing documents into a prompt |\n| MapReduceDocumentsChain | Chain for map-reduce operations over documents |\n| RefineDocumentsChain | Chain for iterative refinement over documents |\n\n## Text Processing Packages\n\n### @langchain/textsplitters\n\nProvides various implementations of text splitters commonly used in retrieval-augmented generation (RAG) pipelines. Text splitters break large documents into smaller chunks for embedding and retrieval.\n\n**Supported Languages:**\n\n| Language | Use Case |\n|----------|----------|\n| html | HTML document splitting |\n| markdown | Markdown document splitting |\n| javascript | JavaScript/TypeScript code splitting |\n| python | Python code splitting |\n| text | Plain text splitting |\n\n**Example Usage:**\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\nconst output = await splitter.createDocuments([text]);\n```\n\n## Provider Integration Packages\n\nThe provider packages in `libs/providers/` contain integrations for specific third-party services. Each package follows a consistent structure and can be used independently or alongside other LangChain packages.\n\n### Package Naming Convention\n\nProvider packages follow the naming pattern `@langchain/[provider-name]`, where the provider name typically corresponds to the service being integrated. All provider packages depend on `@langchain/core` and may optionally depend on the provider's official SDK.\n\n### Provider Packages\n\n| Package | Dependency |\n|---------|------------|\n| @langchain/anthropic | @langchain/core, @anthropic-ai/sdk |\n| @langchain/openai | @langchain/core, openai |\n| @langchain/google | @langchain/core, @google-ai/generativelanguage |\n| @langchain/pinecone | @langchain/core, @pinecone-database/pinecone |\n| @langchain/qdrant | @langchain/core, qdrant-client |\n\n### Common Package Structure\n\nEach provider package follows a standardized structure:\n\n```mermaid\ngraph TD\n    A[Package Root] --> B[README.md]\n    A --> C[package.json]\n    A --> D[src/index.ts]\n    A --> E[src/chat_models/]\n    A --> F[src/output_parsers/]\n    A --> G[tests/]\n    E --> H[index.ts]\n    G --> I[*.test.ts]\n    G --> J[*.int.test.ts]\n```\n\n## Dependency Management\n\n### Core Dependency Synchronization\n\nWhen using multiple LangChain packages in a project, it is critical to ensure all packages depend on the same instance of `@langchain/core`. Package managers may resolve different versions of `@langchain/core` as separate instances, causing runtime errors.\n\n**Recommended Configuration (package.json):**\n\n```json\n{\n  \"name\": \"your-project\",\n  \"version\": \"0.0.0\",\n  \"dependencies\": {\n    \"@langchain/openai\": \"^0.0.0\",\n    \"@langchain/anthropic\": \"^0.0.0\",\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"resolutions\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"overrides\": {\n    \"@langchain/core\": \"^0.3.0\"\n  }\n}\n```\n\nDifferent package managers require specific configuration fields:\n\n| Package Manager | Field |\n|----------------|-------|\n| npm/yarn | `resolutions` |\n| pnpm | `pnpm.overrides` |\n\n## Entry Points and Exports\n\n### Export Strategy\n\nPackages can export functionality through two mechanisms: re-exports from `src/index.ts` or explicit entry points defined in the `exports` field of `package.json`. The exports field enables conditional imports and tree-shaking optimization.\n\n### Deprecation Warnings\n\nWhen importing from deprecated entry points, packages emit console warnings directing users to the new import paths. These warnings can be suppressed by setting the environment variable `LANGCHAIN_SUPPRESS_MIGRATION_WARNINGS` to `\"true\"`.\n\n```typescript\n// Deprecated warning format\nif (getEnvironmentVariable(\"LANGCHAIN_SUPPRESS_MIGRATION_WARNINGS\") !== \"true\") {\n  console.warn(warningText);\n}\n```\n\n## Development Workflow\n\n### Installing Dependencies\n\n```bash\npnpm install\n```\n\n### Building Packages\n\nIndividual packages can be built using the Turbo filter:\n\n```bash\npnpm build --filter @langchain/core\n```\n\n### Running Tests\n\n| Test Type | Command | File Pattern |\n|-----------|---------|--------------|\n| Unit Tests | `pnpm test` | `*.test.ts` |\n| Integration Tests | `pnpm test:int` | `*.int.test.ts` |\n\nTest files should be located in a `tests/` directory within the `src/` folder.\n\n### Linting and Formatting\n\n```bash\npnpm lint && pnpm format\n```\n\n## Migration Path\n\n### From langchain v0.x to v1.0\n\nThe v1.0 release introduces significant architectural changes. Legacy functionality has been moved to `@langchain/classic`, while new abstractions live in `@langchain/core` and provider-specific packages. Users upgrading from v0.x should:\n\n1. Install `@langchain/classic` for backward compatibility\n2. Install new provider packages as needed\n3. Update import statements to use new package paths\n4. Remove imports from deprecated `langchain/` subpaths\n\n```bash\nnpm install @langchain/classic\nnpm install @langchain/anthropic @langchain/core\n```\n\n## Summary\n\nThe LangChain.js package architecture reflects a commitment to modularity, performance, and developer experience. By separating concerns across well-defined packages, the ecosystem enables efficient development while maintaining interoperability. The monorepo structure, powered by pnpm workspaces and Turbo, provides the foundation for managing this complexity at scale.\n\n---\n\n<a id='core-abstractions'></a>\n\n## Core Abstractions\n\n### 相关页面\n\n相关主题：[Package Architecture](#package-architecture), [Chat Models and LLM Providers](#chat-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain-core/src/runnables/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/runnables/base.ts)\n- [libs/langchain-core/src/language_models/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/language_models/chat_models.ts)\n- [libs/langchain-core/src/language_models/llms.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/language_models/llms.ts)\n- [libs/langchain-core/src/embeddings.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/embeddings.ts)\n- [libs/langchain-core/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/vectorstores.ts)\n- [libs/langchain-core/src/callbacks/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/callbacks/base.ts)\n- [libs/langchain-core/src/prompts/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/prompts/index.ts)\n- [libs/langchain-core/src/output_parsers/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/output_parsers/index.ts)\n</details>\n\n# Core Abstractions\n\nThe **Core Abstractions** in LangChain.js form the foundational building blocks that enable interoperability between different components in LLM-powered applications. These abstractions define standardized interfaces for language models, embeddings, vector stores, prompts, output parsers, and runnable components, allowing developers to swap implementations without changing application logic.\n\n## Overview\n\nLangChain.js implements a set of abstract base classes in `@langchain/core` that establish contracts for:\n\n- **Runnable components** that can be composed into processing pipelines\n- **Language models** (chat and completion models)\n- **Embedding models** for vector representations\n- **Vector stores** for semantic search and retrieval\n- **Callback systems** for observability and event handling\n- **Prompt templates** for structured input formatting\n- **Output parsers** for structured response extraction\n\nThis abstraction layer enables loose coupling between components, making it straightforward to migrate between different model providers, vector stores, or embedding implementations while maintaining the same application code.\n\n## Runnable Architecture\n\n### BaseRunnable\n\nThe `BaseRunnable` class is the central abstraction that all runnable components extend. It provides a unified interface for invoking, batching, streaming, and composing operations.\n\n```mermaid\ngraph TD\n    A[BaseRunnable] --> B[invoke input]\n    A --> C[batch inputs]\n    A --> D[stream output]\n    A --> E[async operations]\n    \n    B --> F[Runnable Sequence]\n    B --> G[Runnable Branch]\n    B --> H[Runnable Map]\n    \n    style A fill:#e1f5fe\n    style F fill:#fff3e0\n    style G fill:#fff3e0\n    style H fill:#fff3e0\n```\n\nThe `BaseRunnable` interface defines the following core methods:\n\n| Method | Description | Signature |\n|--------|-------------|-----------|\n| `invoke` | Synchronous single input processing | `(input: Input, options?: RunnableConfig): Promise<Output>` |\n| `batch` | Process multiple inputs efficiently | `(inputs: Input[], options?: RunnableConfig): Promise<Output[]>` |\n| `stream` | Stream output chunks | `(input: Input, options?: RunnableConfig): Promise<Readable>` |\n| `pipe` | Chain runnables together | `(coerceToRunnable(other)): BaseRunnable` |\n| `withConfig` | Attach configuration | `(config: RunnableConfig): Runnable` |\n\n资料来源：[libs/langchain-core/src/runnables/base.ts]()\n\n### RunnableConfig\n\nConfiguration options passed to runnable operations:\n\n```typescript\ninterface RunnableConfig {\n  tags?: string[];\n  metadata?: Record<string, unknown>;\n  callbacks?: CallbackManager | CallbackHandler | Array<CallbackManager | CallbackHandler>;\n  maxConcurrency?: number;\n  maxTokens?: number;\n  recursionLimit?: number;\n  runName?: string;\n}\n```\n\n资料来源：[libs/langchain-core/src/runnables/base.ts]()\n\n## Language Model Abstractions\n\n### BaseChatModel\n\nThe `BaseChatModel` abstract class defines the contract for chat-oriented language models that process messages and return chat results.\n\n```mermaid\ngraph LR\n    A[HumanMessage] --> B[BaseChatModel]\n    C[SystemMessage] --> B\n    D[AIMessage] --> B\n    \n    B --> E[invoke]\n    B --> F[batch]\n    B --> G[stream]\n    \n    E --> H[BaseMessageChunk]\n    F --> I[BaseMessage[]]\n    G --> J[Generator of chunks]\n    \n    style B fill:#e8f5e9\n```\n\nKey properties and methods:\n\n| Property/Method | Type | Description |\n|-----------------|------|-------------|\n| `_llmType` | `string` (protected) | Identifier for the specific LLM type |\n| `_invocationParams` | `Record<string, unknown>` (protected) | Parameters for the API call |\n| `invoke` | `Method` | Process input and return chat result |\n| `generate` | `Method` | Generate responses with usage metadata |\n| `stream` | `Method` | Stream response chunks |\n\n资料来源：[libs/langchain-core/src/language_models/chat_models.ts]()\n\n### BaseLLM\n\nThe `BaseLLM` class provides the abstraction for traditional completion-based language models:\n\n| Property/Method | Type | Description |\n|-----------------|------|-------------|\n| `_llmType` | `string` (protected) | Identifier for the LLM type |\n| `_call` | `protected abstract method` | Core completion logic |\n| `_streamResponseChunks` | `protected method` | Optional streaming support |\n| `generate` | `Method` | Generate completions for prompts |\n\n资料来源：[libs/langchain-core/src/language_models/llms.ts]()\n\n### Message Types\n\nLangChain.js uses a structured message system:\n\n| Message Type | Description |\n|-------------|-------------|\n| `HumanMessage` | Input from the user |\n| `AIMessage` | Response from the model |\n| `SystemMessage` | System-level instructions |\n| `ToolMessage` | Response from tool execution |\n| `FunctionMessage` | Legacy function call responses |\n\n## Embedding Abstractions\n\n### BaseEmbeddings\n\nThe `BaseEmbeddings` abstract class provides a standardized interface for generating vector embeddings from text:\n\n```mermaid\ngraph TD\n    A[BaseEmbeddings] --> B[embedQuery text]\n    A --> C[embedDocuments texts]\n    \n    B --> D[float array]\n    C --> E[float array[]]\n    \n    style A fill:#fce4ec\n```\n\n| Method | Description | Parameters |\n|--------|-------------|------------|\n| `embedQuery` | Generate embedding for a single query | `document: string` |\n| `embedDocuments` | Generate embeddings for multiple documents | `documents: string[]` |\n\nBoth methods return `Promise<number[][]>` representing the embedding vectors.\n\n资料来源：[libs/langchain-core/src/embeddings.ts]()\n\n## Vector Store Abstractions\n\n### VectorStore\n\nThe `VectorStore` class provides an abstraction for storing and querying vector embeddings:\n\n```mermaid\ngraph TD\n    A[VectorStore] --> B[addDocuments]\n    A --> C[similaritySearch]\n    A --> D[similaritySearchVectorWithScore]\n    A --> E[maxMarginalRelevanceSearch]\n    \n    B --> F[Document[]]\n    C --> G[Document[] by query]\n    D --> H[Document[] with scores]\n    E --> I[Diverse results]\n    \n    style A fill:#fff8e1\n```\n\nCore methods:\n\n| Method | Description | Parameters |\n|--------|-------------|------------|\n| `addDocuments` | Add documents to the store | `documents: Document[], addOptions?` |\n| `similaritySearch` | Find similar documents | `query: string, k?, filter?` |\n| `similaritySearchVectorWithScore` | Search by embedding vector | `query: number[], k?, filter?` |\n| `maxMarginalRelevanceSearch` | MMR diversity search | `query: string, k?, fetchK?, lambda?` |\n| `delete` | Remove documents by ID | `params` |\n\n资料来源：[libs/langchain-core/src/vectorstores.ts]()\n\n### Document Structure\n\nDocuments are the fundamental unit stored in vector stores:\n\n```typescript\ninterface Document {\n  pageContent: string;  // The text content\n  metadata: Record<string, unknown>;  // Associated metadata\n}\n```\n\n## Callback System\n\n### BaseCallbackHandler\n\nThe `BaseCallbackHandler` provides a hook system for observing and logging LangChain operations:\n\n```mermaid\ngraph LR\n    A[LangChain Event] --> B[CallbackManager]\n    B --> C[onChainStart]\n    B --> D[onChainEnd]\n    B --> E[onChainError]\n    B --> F[onLLMStart]\n    B --> G[onLLMEnd]\n    B --> H[onToolStart]\n    B --> I[onToolEnd]\n    \n    style B fill:#e3f2fd\n```\n\nEvent handlers:\n\n| Event | Handler | Trigger |\n|-------|---------|---------|\n| Chain lifecycle | `onChainStart`, `onChainEnd`, `onChainError` | Chain execution |\n| LLM lifecycle | `onLLMStart`, `onLLMEnd`, `onLLMError` | Model calls |\n| Tool lifecycle | `onToolStart`, `onToolEnd`, `onToolError` | Tool execution |\n| Retriever | `onRetrieverStart`, `onRetrieverEnd` | Retrieval operations |\n| Text generation | `onText`, `onLLMNewToken` | Streaming events |\n\n资料来源：[libs/langchain-core/src/callbacks/base.ts]()\n\n## Prompt Abstractions\n\n### BasePromptTemplate\n\nThe `BasePromptTemplate` abstract class standardizes prompt creation and formatting:\n\n```mermaid\ngraph TD\n    A[BasePromptTemplate] --> B[merge]\n    A --> C[partial]\n    A --> D[invoke]\n    \n    E[Input Values] --> C\n    F[Partial Values] --> B\n    G[Runtime Input] --> D\n    \n    B --> H[PromptTemplate]\n    C --> I[PromptTemplate]\n    D --> J[Formatted String]\n    \n    style A fill:#f3e5f5\n```\n\nKey methods:\n\n| Method | Description |\n|--------|-------------|\n| `invoke` | Format prompt with input values |\n| `partial` | Create partially filled template |\n| `merge` | Combine multiple templates |\n| `save` | Serialize template to file |\n\n资料来源：[libs/langchain-core/src/prompts/index.ts]()\n\n### PromptValue\n\nPrompt values represent the formatted input to language models:\n\n```typescript\ninterface PromptValue {\n  toChatMessages(): BaseMessage[];\n  toString(): string;\n}\n```\n\n## Output Parser Abstractions\n\n### BaseOutputParser\n\nThe `BaseOutputParser` class provides a standardized interface for parsing and validating model outputs:\n\n```mermaid\ngraph TD\n    A[BaseOutputParser] --> B[parse text]\n    A --> C[parseWithPrompt]\n    A --> D[getFormatInstructions]\n    \n    E[Raw Output] --> B\n    F[Prompt + Output] --> C\n    \n    B --> G[Structured Result]\n    C --> G\n    D --> H[Format Instructions]\n    \n    style A fill:#e0f7fa\n```\n\nRequired methods:\n\n| Method | Description | Return Type |\n|--------|-------------|-------------|\n| `parse` | Parse raw output | `Promise<T>` |\n| `parseWithPrompt` | Parse with prompt context | `Promise<T>` |\n| `getFormatInstructions` | Get parsing instructions | `string` |\n\nOptional methods:\n\n| Method | Description |\n|--------|-------------|\n| `invoke` | Unified parsing interface |\n| `resultType` | TypeScript type information |\n\n资料来源：[libs/langchain-core/src/output_parsers/index.ts]()\n\n## Component Composition\n\nLangChain.js enables powerful composition through the pipe operator:\n\n```mermaid\ngraph LR\n    A[PromptTemplate] -->|\"pipe\"| B[ChatModel]\n    B -->|\"pipe\"| C[OutputParser]\n    \n    style A fill:#fff8e1\n    style B fill:#e8f5e9\n    style C fill:#e0f7fa\n```\n\nExample composition:\n\n```typescript\nconst chain = prompt.pipe(chatModel).pipe(outputParser);\nconst result = await chain.invoke({ input: \"your question\" });\n```\n\nThis composition model allows:\n\n- Sequential processing through `pipe()`\n- Parallel execution with `RunnableParallel`\n- Conditional branching with `RunnableBranch`\n- Error handling and fallbacks\n\n## Configuration and Initialization\n\nAll core abstractions accept configuration through constructor options:\n\n```typescript\ninterface BaseLanguageModelParams {\n  callbacks?: CallbackManager;\n  tags?: string[];\n  metadata?: Record<string, unknown>;\n}\n\ninterface BaseChatModel extends BaseLanguageModelParams {\n  temperature?: number;\n  topP?: number;\n  maxTokens?: number;\n  modelName?: string;\n}\n```\n\n## Extension Pattern\n\nTo implement a custom component, extend the base class and implement required abstract methods:\n\n```typescript\nimport { BaseLLM } from \"@langchain/core/language_models/llms\";\n\nclass MyCustomLLM extends BaseLLM {\n  _llmType() {\n    return \"my_custom_llm\";\n  }\n\n  async _call(prompt: string): Promise<string> {\n    // Implementation\n    return response;\n  }\n}\n```\n\n## Summary\n\nThe Core Abstractions in LangChain.js provide:\n\n| Layer | Purpose | Key Classes |\n|-------|---------|-------------|\n| **Runnable** | Unified component interface | `BaseRunnable`, `RunnableSequence` |\n| **Language Models** | Model interoperability | `BaseChatModel`, `BaseLLM` |\n| **Embeddings** | Vector generation | `BaseEmbeddings` |\n| **Vector Stores** | Semantic storage | `VectorStore` |\n| **Callbacks** | Observability | `BaseCallbackHandler` |\n| **Prompts** | Structured inputs | `BasePromptTemplate` |\n| **Output Parsers** | Structured outputs | `BaseOutputParser` |\n\nThese abstractions enable portable, testable, and composable LLM applications while maintaining flexibility to swap implementations as requirements evolve.\n\n---\n\n<a id='chat-models'></a>\n\n## Chat Models and LLM Providers\n\n### 相关页面\n\n相关主题：[Embeddings Integration](#embeddings), [Agent Framework](#agent-framework)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/providers/langchain-openai/src/chat_models/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-openai/src/chat_models/base.ts)\n- [libs/providers/langchain-anthropic/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-anthropic/src/chat_models.ts)\n- [libs/providers/langchain-google-common/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-google-common/src/chat_models.ts)\n- [libs/providers/langchain-mistralai/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-mistralai/src/chat_models.ts)\n- [libs/providers/langchain-ollama/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-ollama/src/chat_models.ts)\n- [libs/providers/langchain-deepseek/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-deepseek/src/chat_models.ts)\n- [libs/providers/langchain-groq/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-groq/src/chat_models.ts)\n- [libs/providers/langchain-xai/src/chat_models/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-xai/src/chat_models/index.ts)\n</details>\n\n# Chat Models and LLM Providers\n\nLangChain.js provides a unified abstraction layer for interacting with various Large Language Model (LLM) providers through a standardized chat model interface. This system enables developers to seamlessly switch between different providers (OpenAI, Anthropic, Google, Mistral AI, Ollama, Deepseek, Groq, xAI) while maintaining consistent API patterns for invocation, streaming, and tool usage.\n\n## Architecture Overview\n\nThe chat model architecture follows a provider-specific implementation pattern where each LLM provider package contains its own chat model class that extends common base abstractions from `@langchain/core`.\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[Chat Model Interface]\n    B --> C[Provider Implementations]\n    C --> D[OpenAI ChatModels]\n    C --> E[Anthropic ChatModels]\n    C --> F[Google ChatModels]\n    C --> G[MistralAI ChatModels]\n    C --> H[Ollama ChatModels]\n    C --> I[Deepseek ChatModels]\n    C --> J[Groq ChatModels]\n    C --> K[xAI ChatModels]\n    B --> L[Tool Calling Abstraction]\n    B --> M[Streaming Abstraction]\n```\n\n## Core Message Types\n\nLangChain.js defines standardized message types that form the foundation of the chat model system. All providers return variations of these message types with provider-specific metadata.\n\n### AIMessage\n\nThe primary return type from chat model invocations, containing the model's response along with usage and response metadata.\n\n```typescript\nAIMessage {\n  \"id\": \"msg_01QDpd78JUHpRP6bRRNyzbW3\",\n  \"content\": \"Here's the translation to French:\\n\\nJ'adore la programmation.\",\n  \"response_metadata\": {\n    \"id\": \"msg_01QDpd78JUHpRP6bRRNyzbW3\",\n    \"model\": \"claude-sonnet-4-5-20250929\",\n    \"stop_reason\": \"end_turn\",\n    \"stop_sequence\": null,\n    \"usage\": {\n      \"input_tokens\": 25,\n      \"output_tokens\": 19\n    },\n    \"type\": \"message\",\n    \"role\": \"assistant\"\n  },\n  \"usage_metadata\": {\n    \"input_tokens\": 25,\n    \"output_tokens\": 19,\n    \"total_tokens\": 44\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n### AIMessageChunk\n\nUsed for streaming responses, representing incremental pieces of the complete response. Each chunk contains partial content that accumulates to form the full message.\n\n```typescript\nAIMessageChunk {\n  \"id\": \"msg_01N8MwoYxiKo9w4chE4gXUs4\",\n  \"content\": \"Here\",\n  \"additional_kwargs\": {\n    \"id\": \"msg_01N8MwoYxiKo9w4chE4gXUs4\",\n    \"type\": \"message\",\n    \"role\": \"assistant\",\n    \"model\": \"claude-sonnet-4-5-20250929\"\n  },\n  \"usage_metadata\": {\n    \"input_tokens\": 25,\n    \"output_tokens\": 1,\n    \"total_tokens\": 26\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n### Message Structure Types\n\nThe core message system defines flexible structures for tools and tool calls:\n\n| Type | Purpose | Fields |\n|------|---------|--------|\n| `MessageToolDefinition` | Defines a tool's input/output schema | `input: TInput`, `output: TOutput` |\n| `MessageToolSet` | Collection of available tools | `{ [key: string]: MessageToolDefinition }` |\n| `MessageStructure` | Base structure for messages | Extends to include tools, tool calls |\n| `$MessageToolCallBlock` | Tool call invocation | `type`, `name`, `args`, `id?` |\n\n资料来源：[libs/langchain-core/src/messages/message.ts:1-100]()\n\n## Invocation Patterns\n\n### Single Invocation\n\nThe primary method for synchronous chat completion, accepting various input formats:\n\n```typescript\nconst input = `Translate \"I love programming\" into French.`;\n\n// Models accept string, list of messages, or formatted prompt\nconst result = await llm.invoke(input);\nconsole.log(result);\n```\n\n资料来源：[libs/providers/langchain-openai/src/chat_models/index.ts:1-100]()\n\n### Streaming\n\nAll chat models support streaming for real-time response generation:\n\n```typescript\nfor await (const chunk of await llm.stream(input)) {\n  console.log(chunk);\n}\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n## Tool Calling System\n\nLangChain.js provides a unified tool calling abstraction that works across providers with provider-specific translations.\n\n### Tool Format Translation\n\nThe OpenAI provider demonstrates how tools are translated to provider-specific formats:\n\n```typescript\nfunction convertToCustomTool(tool: FunctionDef & {\n  description?: string;\n  parameters?: JsonSchema;\n}): IClient.Chat.ChatCompletionCustomTool {\n  const getFormat = () => {\n    if (!tool.format) {\n      return undefined;\n    }\n    if (tool.format.type === \"grammar\") {\n      return {\n        type: \"grammar\" as const,\n        grammar: {\n          definition: tool.format.definition,\n          syntax: tool.format.syntax,\n        },\n      };\n    }\n    if (tool.format.type === \"text\") {\n      return {\n        type: \"text\" as const,\n      };\n    }\n    return undefined;\n  };\n  return {\n    type: \"custom\",\n    custom: {\n      name: tool.name,\n      description: tool.description,\n      format: getFormat(),\n    },\n  };\n}\n```\n\n资料来源：[libs/providers/langchain-openai/src/utils/tools.ts:1-50]()\n\n### Block Translators\n\nEach provider implements block translators for converting between internal message formats and provider-specific APIs:\n\n```typescript\nBlockTranslator = {\n  translateContent: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletions(message);\n    }\n    return convertToV1FromResponses(message);\n  },\n  translateContentChunk: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletionsChunk(message);\n    }\n    return convertToV1FromResponsesChunk(message);\n  },\n};\n```\n\n资料来源：[libs/langchain-core/src/messages/block_translators/openai.ts:1-30]()\n\n## Provider Implementations\n\n### OpenAI\n\nOpenAI's chat models use the Chat Completions API with comprehensive token usage tracking:\n\n```typescript\nAIMessage {\n  \"id\": \"chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz\",\n  \"content\": \"J'adore la programmation.\",\n  \"response_metadata\": {\n    \"tokenUsage\": {\n      \"completionTokens\": 5,\n      \"promptTokens\": 28,\n      \"totalTokens\": 33\n    },\n    \"finish_reason\": \"stop\",\n    \"system_fingerprint\": \"fp_3aa7262c27\"\n  },\n  \"usage_metadata\": {\n    \"input_tokens\": 28,\n    \"output_tokens\": 5,\n    \"total_tokens\": 33\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-openai/src/chat_models/index.ts:1-100]()\n\n### Anthropic\n\nAnthropic's implementation includes cache control and extended reasoning features:\n\n```typescript\n// System messages support array content for cache control\nconst systemMessage = new SystemMessage([\n  { type: \"text\", text: \"You are a helpful assistant.\" },\n  { type: \"cache_control\", ... }\n]);\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n### Ollama\n\nOllama provides local model hosting integration:\n\n```typescript\nimport { ChatOllama } from \"@langchain/ollama\";\n\nconst model = new ChatOllama({\n  model: \"llama3\", // Default value.\n});\n\nconst result = await model.invoke([\"human\", \"Hello, how are you?\"]);\n```\n\n资料来源：[libs/providers/langchain-ollama/README.md:1-50]()\n\n## Usage Metadata\n\nAll chat models track token usage consistently across providers:\n\n| Metadata Field | Description | Availability |\n|----------------|-------------|--------------|\n| `input_tokens` | Tokens in the prompt | All providers |\n| `output_tokens` | Tokens in the completion | All providers |\n| `total_tokens` | Sum of input and output | All providers |\n| `finish_reason` | Why generation stopped | OpenAI, Groq |\n| `stop_reason` | Stop sequence trigger | Anthropic |\n| `system_fingerprint` | Model version fingerprint | OpenAI |\n\n## Message Content Utilities\n\n### Standard Message Casting\n\nThe language models utility provides helpers for message handling:\n\n```typescript\nfunction castStandardMessageContent<T extends BaseMessage>(message: T) {\n  const Cls = message.constructor as Constructor<T>;\n  return new Cls({\n    ...message,\n    content: message.contentBlocks,\n    response_metadata: {\n      ...message.response_metadata,\n      output_version: \"v1\",\n    },\n  });\n}\n```\n\n资料来源：[libs/langchain-core/src/language_models/utils.ts:1-30]()\n\n## Workflow: Stream Aggregation\n\nFor applications requiring the complete response after streaming:\n\n```typescript\nimport { AIMessageChunk } from '@langchain/core/messages';\nimport { concat } from '@langchain/core/utils';\n\n// Stream and accumulate chunks\nconst stream = await llm.stream(input);\nlet fullResponse = new AIMessageChunk({ content: \"\" });\n\nfor await (const chunk of stream) {\n  fullResponse = concat(fullResponse, chunk);\n}\n```\n\n资料来源：[libs/providers/langchain-openai/src/chat_models/index.ts:1-100]()\n\n## Testing with Matchers\n\nLangChain provides custom Jest matchers for testing chat model outputs:\n\n```typescript\nimport { langchainMatchers } from '@langchain/core/testing/matchers';\n\nexpect.extend(langchainMatchers);\n\n// Common matchers\nexpect(message).toBeAIMessage();\nexpect(message).toBeHumanMessage();\nexpect(message).toHaveToolCalls([{ name: 'calculator' }]);\nexpect(response).toHaveStructuredResponse({ type: 'json' });\n```\n\n资料来源：[libs/langchain-core/src/testing/matchers.ts:1-100]()\n\n## Summary Table: Provider Capabilities\n\n| Provider | Streaming | Tool Calling | Cache Control | Base URL Config |\n|----------|-----------|--------------|---------------|-----------------|\n| OpenAI | ✓ | ✓ | Via API | ✓ |\n| Anthropic | ✓ | ✓ | ✓ | ✓ |\n| Google | ✓ | ✓ | ✗ | ✓ |\n| Mistral AI | ✓ | ✓ | ✗ | ✓ |\n| Ollama | ✓ | ✓ | ✗ | Local |\n| Deepseek | ✓ | ✓ | ✗ | ✓ |\n| Groq | ✓ | ✓ | ✗ | ✓ |\n| xAI | ✓ | ✓ | ✗ | ✓ |\n\n## Common Configuration Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `model` | `string` | Model identifier (e.g., \"gpt-4o\", \"claude-3-opus\") |\n| `temperature` | `number` | Sampling temperature (0.0 - 2.0) |\n| `maxTokens` | `number` | Maximum tokens in response |\n| `timeout` | `number` | Request timeout in milliseconds |\n| `maxRetries` | `number` | Maximum retry attempts |\n\n## See Also\n\n- [Base Chat Models Documentation](../chat_models/base.md)\n- [Tool Calling Guide](../tools/tool_calling.md)\n- [Streaming Guide](../messages/streaming.md)\n- [Message Types Reference](../messages/types.md)\n\n---\n\n<a id='embeddings'></a>\n\n## Embeddings Integration\n\n### 相关页面\n\n相关主题：[Vector Stores](#vector-stores), [Chat Models and LLM Providers](#chat-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/providers/langchain-openai/src/embeddings.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-openai/src/embeddings.ts)\n- [libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n- [libs/langchain-core/src/messages/block_translators/openai.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/messages/block_translators/openai.ts)\n- [README.md](https://github.com/langchain-ai/langchainjs/blob/main/README.md)\n- [libs/langchain-textsplitters/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/README.md)\n</details>\n\n# Embeddings Integration\n\n## Overview\n\nEmbeddings integration in LangChain.js provides a standardized interface for converting text into dense vector representations suitable for machine learning tasks. These embeddings enable semantic search, similarity comparison, and retrieval-augmented generation (RAG) pipelines.\n\nLangChain.js supports multiple embedding providers through a unified `Embeddings` base class, allowing developers to swap between providers like OpenAI, AWS Bedrock, Cohere, Google Gemini, and Mistral without changing application code.\n\n资料来源：[README.md:1-15]()\n\n## Architecture\n\n### Class Hierarchy\n\n```mermaid\ngraph TD\n    A[Embeddings Base Class] --> B[OpenAIEmbeddings]\n    A --> C[AWS Embeddings]\n    A --> D[Cohere Embeddings]\n    A --> E[Google GenAI Embeddings]\n    A --> F[Google VertexAI Embeddings]\n    A --> G[Fireworks Embeddings]\n    A --> H[MistralAI Embeddings]\n    \n    B --> I[Vector Store Integration]\n    C --> I\n    D --> I\n    E --> I\n    F --> I\n    G --> I\n    H --> I\n```\n\n### Data Flow\n\n```mermaid\ngraph LR\n    A[Text Input] --> B[Embeddings.embedQuery]\n    C[Document List] --> D[Embeddings.embedDocuments]\n    B --> E[Vector Representation]\n    D --> F[Batch Vector Output]\n    E --> G[Vector Store]\n    F --> G\n    G --> H[Similarity Search]\n    H --> I[Retrieved Results]\n```\n\n## OpenAI Embeddings\n\n### Class Definition\n\nThe `OpenAIEmbeddings` class extends the base `Embeddings` class and implements the `Partial<OpenAIEmbeddingsParams>` interface. It provides access to OpenAI's embedding models including `text-embedding-ada-002` and the newer `text-embedding-3` series.\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:1-50]()\n\n### Configuration Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `model` | `string` | `\"text-embedding-ada-002\"` | The embedding model to use |\n| `batchSize` | `number` | `512` | Maximum documents per batch |\n| `stripNewLines` | `boolean` | `true` | Remove newlines from input (deprecated) |\n| `dimensions` | `number` | `undefined` | Output dimensions for text-embedding-3+ |\n| `timeout` | `number` | `undefined` | Request timeout in milliseconds |\n| `organization` | `string` | `undefined` | OpenAI organization ID |\n| `encodingFormat` | `\"float\" \\| \"base64\"` | `\"float\"` | Output encoding format |\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:30-60]()\n\n### Constructor Options\n\n```typescript\nconstructor(\n  fields?: Partial<OpenAIEmbeddingsParams> & {\n    verbose?: boolean;\n    openAIApiKey?: OpenAIApiKey;\n    apiKey?: OpenAIApiKey;\n    configuration?: ClientOptions;\n  }\n)\n```\n\nThe constructor accepts API keys through `openAIApiKey` or `apiKey` properties, and allows custom client configuration for advanced use cases like proxy settings or custom endpoints.\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:50-70]()\n\n## Base Embeddings Interface\n\nThe `Embeddings` base class defines the contract that all embedding implementations must follow. It provides:\n\n- `embedQuery(text: string)`: Embed a single text string\n- `embedDocuments(texts: string[])`: Embed multiple documents in batches\n\nThe base implementation handles batching logic, ensuring that large document sets are processed efficiently within the configured `batchSize` limits.\n\n## Integration with Vector Stores\n\n### MemoryVectorStore Example\n\nThe `MemoryVectorStore` class demonstrates typical embeddings integration patterns. It stores documents and performs similarity search using embedded vectors.\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts:1-60]()\n\n### Usage Pattern\n\n```typescript\nimport { MemoryVectorStore } from 'langchain/vectorstores/memory';\nimport { OpenAIEmbeddings } from '@langchain/openai';\n\nconst embeddings = new OpenAIEmbeddings({\n  model: \"text-embedding-3-small\",\n});\n\nconst vectorStore = new MemoryVectorStore(embeddings);\n\n// Add documents\nconst documents = [\n  { pageContent: \"foo\", metadata: { baz: \"bar\" } },\n  { pageContent: \"thud\", metadata: { bar: \"baz\" } },\n];\n\nawait vectorStore.addDocuments(documents);\n\n// Search\nconst results = await vectorStore.similaritySearch(\"thud\", 1);\n```\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts:30-55]()\n\n## Provider Implementations\n\n### Available Providers\n\n| Provider | Package | Models |\n|----------|---------|--------|\n| OpenAI | `@langchain/openai` | text-embedding-ada-002, text-embedding-3-small, text-embedding-3-large |\n| AWS | `@langchain/aws` | Amazon Titan, Cohere embeddings via Bedrock |\n| Cohere | `@langchain/cohere` | embed-english-v3.0, embed-multilingual-v3.0 |\n| Google GenAI | `@langchain/google-genai` | text-embedding-004 |\n| Google VertexAI | `@langchain/google-vertexai` | textembedding-gecko |\n| Fireworks | `@langchain/fireworks` | fireworks-embedding |\n| MistralAI | `@langchain/mistralai` | mistral-embed |\n\n### Common Features\n\nAll embedding providers support:\n\n- Single text embedding via `embedQuery()`\n- Batch embedding via `embedDocuments()` with automatic batching\n- Custom dimensions for supported models\n- Configurable timeouts and API keys\n- Streaming-compatible response handling\n\n## Batch Processing\n\n### Automatic Batching\n\nThe embeddings system automatically handles batching when processing large document sets. The default batch size is 512 documents, but this can be configured per instance.\n\n```typescript\nconst embeddings = new OpenAIEmbeddings({\n  batchSize: 256, // Reduce for lower memory usage\n});\n```\n\n### Batch Size Considerations\n\n| Batch Size | Use Case |\n|------------|----------|\n| 512+ | High throughput, large documents |\n| 256 | Balanced memory/throughput |\n| 64-128 | Low memory environments |\n| 1 | Streaming or real-time processing |\n\n## Output Formats\n\n### Float Arrays\n\nThe default output format returns embeddings as float32 arrays:\n\n```typescript\nconst embedding = await embeddings.embedQuery(\"Hello world\");\n// Returns: number[]\n```\n\n### Base64 Encoding\n\nFor reduced payload sizes, use base64 encoding:\n\n```typescript\nconst embeddings = new OpenAIEmbeddings({\n  encodingFormat: \"base64\",\n});\n```\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:45]()\n\n## Error Handling\n\n### Common Errors\n\n| Error | Cause | Resolution |\n|-------|-------|------------|\n| `AuthenticationError` | Invalid API key | Verify API key configuration |\n| `RateLimitError` | Too many requests | Implement retry with backoff |\n| `TimeoutError` | Request exceeded timeout | Increase timeout or reduce batch size |\n| `InvalidRequestError` | Invalid parameters | Check model name, dimensions, etc. |\n\n## Best Practices\n\n### Performance Optimization\n\n1. **Batch Operations**: Always use `embedDocuments()` for multiple texts instead of calling `embedQuery()` in a loop\n2. **Dimension Selection**: Use lower dimensions (256/512) when full precision isn't required\n3. **Connection Pooling**: Reuse embedding instances across requests\n\n### Security Considerations\n\n1. Store API keys in environment variables, never in source code\n2. Use `.env` files with appropriate access controls\n3. Set up API key restrictions in provider dashboards\n\n### Production Deployment\n\n```typescript\n// Recommended production configuration\nconst embeddings = new OpenAIEmbeddings({\n  model: \"text-embedding-3-small\",\n  batchSize: 512,\n  dimensions: 1536,\n  timeout: 60000,\n});\n```\n\n## Related Components\n\n### Text Splitters\n\nBefore embedding documents, use text splitters to break large texts into manageable chunks:\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\n\nconst docs = await splitter.createDocuments([htmlContent]);\nconst embeddings = await embeddings.embedDocuments(docs.map(d => d.pageContent));\n```\n\n资料来源：[examples/src/langchain-classic/indexes/html_text_splitter.ts:1-25]()\n资料来源：[libs/langchain-textsplitters/README.md:1-30]()\n\n### Message Translation\n\nLangChain.js includes block translators for handling OpenAI message formats when working with embeddings in chat contexts:\n\n```typescript\nBlockTranslator = {\n  translateContent: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletions(message);\n    }\n    return convertToV1FromResponses(message);\n  },\n};\n```\n\n资料来源：[libs/langchain-core/src/messages/block_translators/openai.ts:1-20]()\n\n## API Reference Summary\n\n### OpenAIEmbeddings Constructor\n\n```typescript\nnew OpenAIEmbeddings(fields?: Partial<OpenAIEmbeddingsParams>)\n```\n\n### Methods\n\n| Method | Parameters | Returns | Description |\n|--------|------------|---------|-------------|\n| `embedQuery` | `text: string` | `Promise<number[]>` | Embed single text |\n| `embedDocuments` | `texts: string[]` | `Promise<number[][]>` | Embed document batch |\n\n### Environment Variables\n\n| Variable | Description |\n|----------|-------------|\n| `OPENAI_API_KEY` | OpenAI API authentication key |\n\n---\n\n<a id='agent-framework'></a>\n\n## Agent Framework\n\n### 相关页面\n\n相关主题：[Tools and Toolkits](#tools-toolkits), [Chat Models and LLM Providers](#chat-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain/src/agents/runtime.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/runtime.ts)\n- [libs/langchain/src/agents/stream.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/stream.ts)\n- [libs/langchain/src/agents/middleware.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware.ts)\n- [libs/langchain/src/agents/middleware/hitl.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n- [libs/langchain/src/agents/nodes/types.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/nodes/types.ts)\n- [libs/langchain-classic/src/agents/react/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/react/index.ts)\n- [libs/langchain-classic/src/agents/openai_functions/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/openai_functions/index.ts)\n- [libs/langchain-classic/src/agents/openai_tools/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/openai_tools/index.ts)\n- [libs/langchain-classic/src/agents/structured_chat/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/structured_chat/index.ts)\n- [libs/langchain-classic/src/agents/format_scratchpad/xml.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/format_scratchpad/xml.ts)\n</details>\n\n# Agent Framework\n\nThe LangChain.js Agent Framework provides a comprehensive, modular system for building autonomous and semi-autonomous agents that can reason about tasks, utilize tools, and interact with external environments. The framework is designed around the concept of agents as stateful, middleware-extensible runtime systems that coordinate between large language models (LLMs), tools, and human interventions.\n\n## Architecture Overview\n\nThe Agent Framework follows a layered architecture that separates concerns between agent definition, runtime execution, and middleware extension. At the core, agents are implemented as state machines that manage a series of steps, each potentially involving LLM calls, tool executions, and middleware hooks.\n\nThe architecture is built on several foundational components that work together to create a flexible agent system. The runtime layer manages the execution context, state transitions, and tool invocations, while the middleware layer provides hooks for observability, intervention, and customization at key points in the agent lifecycle.\n\nThe agent types themselves are modular, with different implementations optimized for various use cases—from classic ReAct-style reasoning agents to modern function-calling agents that leverage structured output capabilities from providers like OpenAI and Anthropic.\n\n## Core Components\n\n### Runtime System\n\nThe runtime system is the execution engine that orchestrates agent behavior. It maintains the agent state, manages step-by-step execution, handles tool calls, and integrates middleware for extensibility. The runtime provides a clean interface for starting, pausing, resuming, and terminating agent executions.\n\nThe runtime is responsible for managing the agent's internal memory and context across multiple steps, ensuring that observations from previous tool executions are properly passed back to the LLM for the next reasoning cycle. This cyclical pattern of reasoning, action, and observation forms the fundamental loop of agent execution.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Runtime Engine]\n    B --> C[LLM Reasoning]\n    C --> D{Tool Call?}\n    D -->|Yes| E[Execute Tool]\n    E --> F[Get Observation]\n    F --> B\n    D -->|No| G[Final Response]\n    B --> H[Middleware Hooks]\n    H --> I[Logging/Observability]\n    H --> J[Human-in-the-Loop]\n    H --> K[Custom Logic]\n```\n\n### Action Interface\n\nActions represent the fundamental unit of work that an agent can perform. The `Action` interface defines what an agent can request, including the action name and associated arguments:\n\n```typescript\nexport interface Action {\n  /**\n   * The type or name of action being requested (e.g., \"add_numbers\").\n   */\n  name: string;\n  /**\n   * Key-value pairs of arguments needed for the action (e.g., {\"a\": 1, \"b\": 2}).\n   */\n  args: Record<string, any>;\n}\n```\n\nThis abstraction allows the framework to treat all agent actions uniformly, whether they represent tool invocations, control flow operations, or communication with external systems. The `args` field uses a flexible Record structure to accommodate any argument types required by different tools.\n\n### Agent Nodes\n\nThe agent node system provides a structured way to define the behavior and capabilities of agents. Node types define the available operations, their parameters, and how they should be executed within the agent's runtime context.\n\nThe node type definitions establish a contract for what actions an agent can perform, ensuring type safety and providing documentation for available capabilities. Each node type can define its own schema for arguments, return types, and execution behavior.\n\n## Middleware System\n\nMiddleware in the Agent Framework operates as a plugin system that intercepts and can modify agent behavior at defined points in the execution lifecycle. This pattern enables cross-cutting concerns like logging, monitoring, security, and human oversight without coupling these concerns to the core agent logic.\n\n### Middleware Architecture\n\nThe middleware system follows a chain-of-responsibility pattern where each middleware component can inspect, modify, or terminate agent operations. Middleware is applied at key lifecycle points including before and after LLM calls, before and after tool executions, and at state transitions.\n\nMiddleware components receive context about the current operation, including the agent's state, the action being performed, and any intermediate results. This rich context enables sophisticated decision-making within middleware handlers.\n\n### Human-in-the-Loop (HITL) Middleware\n\nThe HITL middleware provides a mechanism for human oversight and intervention in agent execution. This is particularly valuable for production systems where agent actions need validation before execution, or where humans should have the ability to redirect agent behavior.\n\nThe HITL middleware is configured through the `InterruptOnConfig` interface:\n\n```typescript\nexport const InterruptOnConfigSchema = z.object({\n  /**\n   * When true, interrupts execution before the action runs.\n   * When false, interrupts after the action runs.\n   */\n  interruptBefore: z.boolean().optional(),\n  /**\n   * When true, waits for human approval before continuing.\n   * When false, triggers the interrupt but allows automatic continuation.\n   */\n  waitForApproval: z.boolean().optional(),\n  /**\n   * List of action names that should trigger interrupts.\n   * If undefined, all actions trigger interrupts.\n   */\n  allowedDecisions: z.array(z.string()).optional(),\n  /**\n   * Dynamic callable description\n   */\n  description: z.union([z.string(), DescriptionFunctionSchema]).optional(),\n  /**\n   * JSON schema for the arguments associated with the action, if edits are allowed.\n   */\n  argsSchema: z.record(z.any()).optional(),\n});\n```\n\nThe configuration supports several operation modes. When `interruptBefore` is true, execution pauses before the action runs, allowing review of the planned action. When false, the action executes first and then interrupts, enabling review of both the action and its results. The `allowedDecisions` array restricts which actions trigger interrupts, allowing fine-grained control over when human oversight is required.\n\nThe `description` field accepts either a static string or a dynamic function that generates descriptions at runtime. This is useful for providing contextual information about actions:\n\n```typescript\nconst formatToolDescription: DescriptionFactory = (\n  toolCall: ToolCall,\n  state: AgentBuiltInState,\n  runtime: Runtime<unknown>\n) => {\n  return `Tool: ${toolCall.name}\\nArguments:\\n${JSON.stringify(toolCall.args, null, 2)}`;\n};\n\nconst config: InterruptOnConfig = {\n  allowedDecisions: [\"approve\", \"edit\"],\n  description: formatToolDescription\n};\n```\n\n## Agent Types\n\nThe Agent Framework supports multiple agent implementations, each optimized for different reasoning patterns and use cases.\n\n### ReAct Agent\n\nThe ReAct (Reasoning + Acting) agent implements the classic ReAct pattern where the agent alternates between reasoning steps and action execution. This agent type is particularly effective for tasks requiring multi-step reasoning with tool usage.\n\nThe ReAct agent formats its reasoning in a structured scratchpad that combines observations, thoughts, and actions. The XML formatter provides a standardized way to represent this reasoning trace:\n\n```typescript\nexport function formatXml(intermediateSteps: AgentStep[]) {\n  let log = \"\";\n  for (const step of intermediateSteps) {\n    const { action, observation } = step;\n    log += `<tool>${action.tool}</tool><tool_input>${action.toolInput}\\n</tool_input><observation>${observation}</observation>`;\n  }\n  return log;\n}\n```\n\nThis XML format enables the LLM to easily parse and reason about previous actions and their outcomes, maintaining a clear trace of the agent's reasoning chain.\n\n### OpenAI Functions Agent\n\nThe OpenAI Functions agent leverages OpenAI's function calling capability to constrain agent outputs to a specific set of tool definitions. This approach provides more reliable tool selection compared to open-ended generation, as the LLM must choose from the provided function schemas.\n\nThe agent passes tool definitions directly to the OpenAI API, which returns structured function calls. This eliminates the need for parsing unstructured text and reduces the likelihood of malformed tool invocations.\n\n### OpenAI Tools Agent\n\nThe OpenAI Tools agent is an evolution of the functions agent that uses a more flexible tool definition format. It supports both the traditional function calling interface and newer tool-use patterns, providing compatibility across different OpenAI model versions.\n\nThis agent type is recommended for new projects using OpenAI models, as it supports the latest tool-use capabilities and provides better compatibility with models that have tool-use fine-tuning.\n\n### Structured Chat Agent\n\nThe Structured Chat agent uses structured output parsing to extract tool calls from LLM responses. This approach works with any LLM that supports structured output, not just providers with native function calling support.\n\nThe agent defines a JSON schema for valid actions and uses parsing techniques to extract tool calls from the text response. This provides flexibility in model choice while maintaining reliable tool usage.\n\n## Event System\n\nThe agent framework includes a comprehensive event system for monitoring and debugging agent execution. Events are emitted at key points in the agent lifecycle, allowing external systems to observe and respond to agent behavior.\n\n| Event Type | Description | Common Use Cases |\n|------------|-------------|------------------|\n| `content-block-delta` | Emitted during streaming when content blocks are being updated | Real-time UI updates, partial result handling |\n| `content-block-finish` | Emitted when a content block is complete | Result aggregation, final processing |\n| `usage` | Emitted when usage information is updated | Cost tracking, monitoring, rate limiting |\n| `provider` | Passthrough for provider-specific events | Integration with provider SDKs, custom logging |\n\nThe event system is designed to work seamlessly with streaming responses, where content may arrive incrementally. Events include positional indices and delta information that enable proper reconstruction of complete responses.\n\n## Streaming Support\n\nAgents support streaming responses through the runtime's streaming capabilities. Streaming is particularly valuable for long-running agent tasks where users benefit from seeing partial results as they become available.\n\nThe streaming implementation works by emitting events as the agent produces output. These events can be consumed incrementally, allowing applications to display results in real-time without waiting for complete agent execution.\n\n## Configuration and Integration\n\n### Tool Configuration\n\nTools are passed to agents at initialization and define the actions available to the agent. Each tool has a name, description, and schema that describes its arguments. The schema is typically defined using Zod for runtime type validation.\n\n```typescript\nconst getWeather = tool(\n  async (input) => `Weather in ${input.location}`,\n  {\n    name: \"get_weather\",\n    description: \"Get weather for a location\",\n    schema: z.object({ location: z.string() }),\n  }\n);\n```\n\n### Provider Configuration\n\nAgents can be configured with any LLM provider that LangChain.js supports. The provider configuration includes API keys, model selection, and provider-specific parameters:\n\n```typescript\nimport { ChatAnthropicMessages } from \"@langchain/langchain-anthropic\";\nimport { ChatOpenAI } from \"@langchain/langchain-openai\";\n\nconst anthropicAgent = createAgent({\n  llm: new ChatAnthropicMessages({ model: \"claude-sonnet-4-5-20250929\" }),\n  tools: [getWeather, searchTool],\n});\n\nconst openaiAgent = createAgent({\n  llm: new ChatOpenAI({ model: \"gpt-4\" }),\n  tools: [getWeather, searchTool],\n});\n```\n\n## Migration from Legacy Agents\n\nThe `@langchain/classic` package provides backward compatibility for legacy agent types that were part of LangChain v0.x. These include `LLMChain`, `ConversationalRetrievalQAChain`, and `RetrievalQAChain`.\n\nFor new projects, the recommended approach is to use the new `createAgent` API from the main `langchain` package. This provides better performance, cleaner abstractions, and access to middleware capabilities that are not available in the legacy agents.\n\nThe legacy agents will continue to be supported but will not receive new features. Applications should plan to migrate to the new API over time, taking advantage of improvements in the agent framework.\n\n## Best Practices\n\nWhen building agents with the LangChain.js Agent Framework, several practices help ensure reliable and maintainable implementations. Tool definitions should include clear, descriptive names and comprehensive descriptions that help the LLM understand when and how to use each tool. Argument schemas should validate inputs and provide clear error messages when invalid arguments are passed.\n\nMiddleware should be used judiciously to avoid performance impacts. For high-volume production systems, consider which middleware operations can be performed asynchronously or in batch mode. Human-in-the-loop configurations should be tested thoroughly to ensure the interrupt flow matches expected user interactions.\n\nAgent state should be monitored and logged appropriately. The event system provides hooks for integrating with observability platforms, enabling debugging and performance analysis of agent behavior in production environments.\n\n---\n\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:35-52](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:56-69](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n资料来源：[libs/langchain-classic/src/agents/format_scratchpad/xml.ts:3-12](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/format_scratchpad/xml.ts)\n资料来源：[libs/langchain-classic/README.md:1-45](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n资料来源：[libs/langchain-core/src/language_models/event.ts:1-60](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/language_models/event.ts)\n\n---\n\n<a id='tools-toolkits'></a>\n\n## Tools and Toolkits\n\n### 相关页面\n\n相关主题：[Agent Framework](#agent-framework), [Memory Systems](#memory-system)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain-classic/src/tools/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/base.ts)\n- [libs/langchain-classic/src/tools/dynamic.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/dynamic.ts)\n- [libs/langchain-classic/src/tools/sql.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/sql.ts)\n- [libs/langchain-classic/src/tools/webbrowser.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/webbrowser.ts)\n- [libs/langchain-classic/src/agents/toolkits/sql/sql.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/toolkits/sql/sql.ts)\n- [libs/langchain-classic/src/agents/toolkits/openapi/openapi.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/toolkits/openapi/openapi.ts)\n- [libs/langchain-classic/src/agents/toolkits/vectorstore/vectorstore.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/toolkits/vectorstore/vectorstore.ts)\n- [libs/providers/langchain-openai/src/tools/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-openai/src/tools/index.ts)\n</details>\n\n# Tools and Toolkits\n\n## Overview\n\nIn LangChain.js, **Tools** and **Toolkits** form the fundamental building blocks that enable Large Language Models (LLMs) to interact with external systems, APIs, databases, and web resources. Tools provide a standardized interface for defining callable functions that models can invoke during their reasoning process, while Toolkits bundle collections of related tools into cohesive units optimized for specific use cases.\n\nThe architecture follows a clear separation between individual tool implementations and composed toolkit aggregations, allowing developers to use built-in tools, create custom tools, or assemble pre-built toolkits for common workflows.\n\n## Core Concepts\n\n### What Are Tools?\n\nTools in LangChain.js are essentially callable functions with structured metadata that enables LLMs to understand when and how to use them. Each tool consists of:\n\n| Component | Description |\n|-----------|-------------|\n| **Name** | Unique identifier for the tool |\n| **Description** | Human-readable explanation of tool purpose |\n| **Input Schema** | Zod schema defining acceptable parameters |\n| **Function** | Actual implementation that executes the tool logic |\n\n### What Are Toolkits?\n\nToolkits are pre-configured collections of related tools designed to work together for specific domains or workflows. They provide:\n\n- Optimized tool combinations for common tasks\n- Shared initialization logic and configuration\n- Coordinated error handling and retry strategies\n- Consistent authentication patterns where applicable\n\n## Tool Architecture\n\n### Base Tool Implementation\n\nThe foundational tool abstraction in LangChain.js is defined in `libs/langchain-classic/src/tools/base.ts`. All custom and built-in tools inherit from or conform to this interface.\n\n```mermaid\ngraph TD\n    A[BaseTool Abstract Class] --> B[StructuredTool]\n    A --> C[Tool Interface]\n    B --> D[Custom Tool Implementations]\n    C --> E[Dynamic Tool]\n    D --> F[SQL Tool]\n    D --> G[Web Browser Tool]\n```\n\n### Tool Creation Patterns\n\nLangChain.js supports multiple patterns for creating tools:\n\n#### 1. Function-Based Tool Definition\n\nThe `tool()` function from `@langchain/core/tools` provides a concise API for defining tools:\n\n```typescript\nimport { tool } from \"@langchain/core/tools\";\nimport { z } from \"zod\";\n\nconst getWeather = tool(\n  async (input) => `Weather in ${input.location}`,\n  {\n    name: \"get_weather\",\n    description: \"Get weather for a location\",\n    schema: z.object({ location: z.string() }),\n  }\n);\n```\n\n#### 2. Dynamic Tool Creation\n\nDynamic tools allow runtime tool creation, useful for scenarios where tool definitions are not known at compile time. The implementation in `libs/langchain-classic/src/tools/dynamic.ts` provides the `DynamicTool` class.\n\n资料来源：[libs/langchain-classic/src/tools/dynamic.ts:1-50]()\n\n#### 3. Tool with Extra Parameters\n\nTools can include additional configuration through the `extras` field:\n\n```typescript\nconst deferredTool = tool(\n  async (input) => `Result: ${input.query}`,\n  {\n    name: \"deferred_search\",\n    description: \"Search tool with deferred loading\",\n    schema: z.object({ query: z.string() }),\n    extras: { defer_loading: true },\n  }\n);\n```\n\n资料来源：[libs/providers/langchain-openai/src/tools/index.ts:1-30]()\n\n## Built-in Tools\n\n### SQL Tool\n\nThe SQL Tool enables LLM agents to query relational databases using natural language. It accepts raw SQL queries and returns structured results.\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `db` | Database connection | Database instance to query |\n| `customQueryFunction` | Function | Optional custom query handler |\n| `ignoreInsertOnly` | boolean | Skip INSERT operations |\n\n资料来源：[libs/langchain-classic/src/tools/sql.ts:1-80]()\n\n### Web Browser Tool\n\nThe Web Browser Tool allows agents to navigate websites, extract content, and interact with web resources. It simulates browser operations including page navigation, content extraction, and element interaction.\n\n```mermaid\ngraph LR\n    A[User Query] --> B[WebBrowserTool]\n    B --> C{Action Type}\n    C -->|navigate| D[Load URL]\n    C -->|extract| E[Parse Content]\n    C -->|interact| F[Click/Type]\n    D --> G[HTML Response]\n    E --> H[Extracted Data]\n    F --> I[Interaction Result]\n```\n\n资料来源：[libs/langchain-classic/src/tools/webbrowser.ts:1-100]()\n\n## Toolkits\n\n### SQL Toolkit\n\nThe SQL Toolkit bundles tools for database operations, including query execution, table inspection, and schema understanding. Located at `libs/langchain-classic/src/agents/toolkits/sql/sql.ts`, it provides:\n\n| Tool | Purpose |\n|------|---------|\n| `info` | Describe table schema |\n| `tables` | List available tables |\n| `execute` | Run SQL queries |\n| `query_checker` | Validate SQL syntax |\n\n资料来源：[libs/langchain-classic/src/agents/toolkits/sql/sql.ts:1-150]()\n\n### OpenAPI Toolkit\n\nThe OpenAPI Toolkit enables agents to interact with REST APIs defined in OpenAPI specifications. It parses OpenAPI documents and generates corresponding tools.\n\n```mermaid\ngraph TD\n    A[OpenAPI Spec] --> B[OpenAPIToolkit]\n    B --> C[HTTP GET Tool]\n    B --> D[HTTP POST Tool]\n    B --> E[HTTP PUT Tool]\n    B --> F[HTTP DELETE Tool]\n    C --> G[API Response]\n    D --> G\n    E --> G\n    F --> G\n```\n\n资料来源：[libs/langchain-classic/src/agents/toolkits/openapi/openapi.ts:1-120]()\n\n### Vector Store Toolkit\n\nThe Vector Store Toolkit provides tools for similarity search and document retrieval from vector databases. This is commonly used in Retrieval Augmented Generation (RAG) pipelines.\n\n| Tool | Function |\n|------|----------|\n| ` similarity_search` | Find similar documents |\n| ` similarity_search_with_score` | Search with relevance scores |\n| ` similarity_search_by_vector` | Search using embedding vectors |\n\n资料来源：[libs/langchain-classic/src/agents/toolkits/vectorstore/vectorstore.ts:1-100]()\n\n## Integration with Agents\n\nTools and Toolkits are primarily consumed by LangChain agents. The agent runtime coordinates tool selection, execution, and result handling.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Agent]\n    B --> C[LLM Reasoning]\n    C --> D{Tool Call?}\n    D -->|Yes| E[Select Tool]\n    E --> F[Execute Tool]\n    F --> G[Return Result]\n    G --> C\n    D -->|No| H[Final Response]\n```\n\n### Tool Calling Configuration\n\nAgents can be configured to use tools with various parameters:\n\n```typescript\nconst config: InterruptOnConfig = {\n  allowedDecisions: [\"approve\", \"edit\"],\n  description: formatToolDescription\n};\n```\n\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:1-50]()\n\n## Best Practices\n\n### Tool Design Guidelines\n\n1. **Clear Descriptions**: Write unambiguous tool descriptions that help the LLM understand when to invoke the tool\n2. **Strict Schemas**: Use Zod schemas to validate inputs and prevent errors\n3. **Error Handling**: Return meaningful error messages that guide the agent toward recovery\n4. **Idempotency**: Design tools to be safely re-callable when needed\n5. **Resource Management**: Properly dispose of connections and resources after tool execution\n\n### Toolkit Organization\n\n| Consideration | Recommendation |\n|---------------|----------------|\n| Scope | Bundle tools that share authentication or context |\n| Cohesion | Tools in a toolkit should serve a common goal |\n| Size | Avoid overly large toolkits; prefer composition |\n| Documentation | Document tool dependencies and initialization |\n\n## Conclusion\n\nTools and Toolkits in LangChain.js provide a powerful abstraction for extending LLM capabilities beyond text generation. By leveraging the standardized tool interface, developers can create reusable components that integrate seamlessly with agent workflows, enabling sophisticated AI applications that interact with databases, APIs, web resources, and more.\n\n---\n\n<a id='vector-stores'></a>\n\n## Vector Stores\n\n### 相关页面\n\n相关主题：[Embeddings Integration](#embeddings)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/providers/langchain-pinecone/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pinecone/src/vectorstores.ts)\n- [libs/providers/langchain-qdrant/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/src/vectorstores.ts)\n- [libs/providers/langchain-weaviate/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-weaviate/src/vectorstores.ts)\n- [libs/providers/langchain-redis/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-redis/src/vectorstores.ts)\n- [libs/providers/langchain-mongodb/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-mongodb/src/vectorstores.ts)\n- [libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts)\n- [libs/providers/langchain-pgvector/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pgvector/src/vectorstores.ts)\n- [libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n</details>\n\n# Vector Stores\n\nVector stores are a core component in LangChain.js that enable efficient similarity search over embedded data. They serve as the persistence layer for vector embeddings in retrieval-augmented generation (RAG) pipelines, allowing developers to store, index, and query high-dimensional vectors representing documents, text chunks, or other data.\n\n## Overview\n\nLangChain.js provides a unified interface for interacting with multiple vector database backends through the `@langchain/core` package. This abstraction enables developers to switch between different vector store implementations without changing their application code.\n\n```mermaid\ngraph TD\n    A[Documents] --> B[Text Embedding Models]\n    B --> C[Vector Embeddings]\n    C --> D[Vector Store]\n    E[Query] --> F[Embed Query]\n    F --> G[Similarity Search]\n    G --> H[Retrieved Documents]\n    \n    D --> I[Pinecone]\n    D --> J[Qdrant]\n    D --> K[Weaviate]\n    D --> L[Redis]\n    D --> M[MongoDB]\n    D --> N[Neo4j]\n    D --> O[PGVector]\n    D --> P[Memory]\n```\n\n## Supported Vector Stores\n\nLangChain.js integrates with the following vector database providers:\n\n| Provider | Package | Key Features |\n|----------|---------|--------------|\n| Pinecone | `@langchain/pinecone` | Managed cloud service, serverless indexes |\n| Qdrant | `@langchain/qdrant` | Open-source, hybrid filtering |\n| Weaviate | `@langchain/weaviate` | GraphQL API, modular architecture |\n| Redis | `@langchain/redis` | In-memory, pub/sub capabilities |\n| MongoDB | `@langchain/mongodb` | Document-based, Atlas vector search |\n| Neo4j | `@langchain/neo4j` | Graph database, knowledge graphs |\n| PGVector | `@langchain/pgvector` | PostgreSQL extension, SQL compatibility |\n| Memory | `@langchain/classic` | In-memory, no external dependencies |\n\n资料来源：[libs/providers/langchain-pinecone/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pinecone/README.md)\n资料来源：[libs/providers/langchain-qdrant/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/README.md)\n\n## Core Interface\n\nAll vector stores in LangChain.js implement a common interface defined in `@langchain/core/vectorstores`. This interface ensures consistency across different implementations while allowing provider-specific features.\n\n### Key Methods\n\n| Method | Description |\n|--------|-------------|\n| `addDocuments()` | Add documents with embeddings to the store |\n| `similaritySearch()` | Find similar documents using cosine similarity |\n| `similaritySearchVectorWithScore()` | Search with relevance scores |\n| `delete()` | Remove documents by IDs |\n| `fromTexts()` | Static factory for text-based initialization |\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n\n## Usage Patterns\n\n### Basic Similarity Search\n\nThe following example demonstrates the typical workflow for performing similarity search using the `MemoryVectorStore`:\n\n```typescript\nimport { MemoryVectorStore } from 'langchain/vectorstores/memory';\nimport { OpenAIEmbeddings } from '@langchain/openai';\n\nconst embeddings = new OpenAIEmbeddings({\n  model: \"text-embedding-3-small\",\n});\n\nconst vectorStore = new MemoryVectorStore(embeddings);\n\n// Add documents\nconst document1 = { pageContent: \"foo\", metadata: { baz: \"bar\" } };\nconst document2 = { pageContent: \"thud\", metadata: { bar: \"baz\" } };\nconst documents = [document1, document2];\n\nawait vectorStore.addDocuments(documents);\n\n// Perform similarity search\nconst results = await vectorStore.similaritySearch(\"thud\", 1);\n\nfor (const doc of results) {\n  console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n}\n```\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n\n### Installation\n\nEach vector store package has specific installation requirements:\n\n#### Pinecone\n\n```bash\nnpm install @langchain/pinecone @langchain/core @pinecone-database/pinecone\n```\n\n资料来源：[libs/providers/langchain-pinecone/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pinecone/README.md)\n\n#### Qdrant\n\n```bash\nnpm install @langchain/qdrant\n```\n\n资料来源：[libs/providers/langchain-qdrant/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/README.md)\n\n#### PGVector\n\nThe PostgreSQL vector extension requires a running PostgreSQL instance with `pgvector` installed:\n\n```bash\nnpm install @langchain/pgvector pg\n```\n\n## Architecture\n\n### Document Model\n\nLangChain.js uses a standardized `Document` interface for representing content:\n\n```typescript\ninterface Document {\n  pageContent: string;    // The text content\n  metadata: Record<string, any>;  // Associated metadata\n}\n```\n\nDocuments are processed through embedding models to generate vector representations before storage.\n\n### Embedding Integration\n\nVector stores work in conjunction with embedding models from `@langchain/core` or provider-specific embedding implementations. The embedding model is passed during vector store initialization and is used to:\n\n1. Generate vectors for incoming documents during `addDocuments()`\n2. Generate vectors for query strings during search operations\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant VS as Vector Store\n    participant EM as Embedding Model\n    participant DB as Database\n    \n    App->>VS: addDocuments(documents)\n    VS->>EM: embedDocuments(documents)\n    EM-->>VS: embeddings\n    VS->>DB: store(embeddings + docs)\n    \n    App->>VS: similaritySearch(query)\n    VS->>EM: embedQuery(query)\n    EM-->>VS: queryVector\n    VS->>DB: findSimilar(queryVector)\n    DB-->>VS: results\n    VS-->>App: documents\n```\n\n## Provider-Specific Implementations\n\n### Neo4j Vector Store\n\nThe Neo4j integration leverages graph database capabilities for vector storage, enabling hybrid queries that combine vector similarity with graph traversal:\n\n```typescript\nimport { Neo4jVectorStore } from '@langchain/neo4j';\n```\n\n资料来源：[libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts)\n\n### Weaviate\n\nWeaviate provides hybrid search capabilities combining vector similarity with keyword matching:\n\n```typescript\nimport { WeaviateVectorStore } from '@langchain/weaviate';\n```\n\n资料来源：[libs/providers/langchain-weaviate/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-weaviate/src/vectorstores.ts)\n\n### Redis\n\nRedis vector store offers high-performance in-memory vector operations with built-in pub/sub capabilities:\n\n```typescript\nimport { RedisVectorStore } from '@langchain/redis';\n```\n\n资料来源：[libs/providers/langchain-redis/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-redis/src/vectorstores.ts)\n\n### MongoDB Atlas\n\nMongoDB Atlas provides vector search through its Atlas Search feature, integrated via the official MongoDB driver:\n\n```typescript\nimport { MongoDBAtlasVectorSearch } from '@langchain/mongodb';\n```\n\n资料来源：[libs/providers/langchain-mongodb/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-mongodb/src/vectorstores.ts)\n\n## Development Guidelines\n\n### Adding New Vector Stores\n\nLangChain.js provides a template for creating new vector store integrations. See the official integration template for the required structure:\n\n资料来源：[libs/create-langchain-integration/template/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/create-langchain-integration/template/README.md)\n\n### Package Structure\n\nEach vector store package should follow the LangChain.js monorepo conventions:\n\n| Directory/File | Purpose |\n|---------------|---------|\n| `src/index.ts` | Main exports |\n| `src/vectorstores.ts` | Vector store implementation |\n| `tests/` | Unit and integration tests |\n| `README.md` | Documentation and usage examples |\n\n### Testing\n\nVector store implementations require both unit tests (`.test.ts`) and integration tests (`.int.test.ts`):\n\n```bash\npnpm test\npnpm test:int\n```\n\n## Common Configuration Options\n\nWhile specific options vary by provider, most vector stores support these common parameters:\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `embeddings` | `Embeddings` | The embedding model to use |\n| `textKey` | `string` | Field name for text content |\n| `embeddingKey` | `string` | Field name for embeddings |\n| `namespace` | `string` | Optional namespace isolation |\n| `filter` | `Record<string, any>` | Metadata filtering criteria |\n\n## Best Practices\n\n1. **Embedding Model Consistency**: Always use the same embedding model for indexing and querying to ensure vector compatibility.\n\n2. **Metadata Indexing**: Include relevant metadata during document storage to enable filtered searches.\n\n3. **Batch Operations**: Use batch operations (`addDocuments`) rather than single document additions for better performance.\n\n4. **Connection Management**: Follow provider-specific connection pooling and timeout configurations in production environments.\n\n5. **Dimension Matching**: Ensure your embedding model dimensions match the vector store's expected configuration.\n\n## Related Components\n\nVector stores are typically used in combination with:\n\n- **Document Loaders**: Load documents from various sources\n- **Text Splitters**: Split large documents into searchable chunks\n- **Retrieval Chains**: Combine vector stores with language models for RAG\n- **Memory Systems**: Persist conversation context using vector storage\n\n---\n\n<a id='memory-system'></a>\n\n## Memory Systems\n\n### 相关页面\n\n相关主题：[Agent Framework](#agent-framework)\n\nI cannot generate an accurate technical wiki page about Memory Systems because the provided repository context does not contain the actual source files for memory implementations.\n\nThe requested source files are not present in the retrieved context:\n\n- `libs/langchain-classic/src/memory/buffer_memory.ts`\n- `libs/langchain-classic/src/memory/buffer_window_memory.ts`\n- `libs/langchain-classic/src/memory/summary_buffer.ts`\n- `libs/langchain-classic/src/memory/entity_memory.ts`\n- `libs/langchain-classic/src/memory/vector_store.ts`\n- `libs/langchain-core/src/memory.ts`\n- `libs/langchain-core/src/chat_history.ts`\n\nThe context only contains files related to:\n\n- HTML text splitting\n- Agent HITL middleware\n- Anthropic/OpenAI chat models\n- SHA256 hashing utilities\n- Vector store memory (partial)\n- Computer use tools\n- Language model utilities\n- Agent streaming\n\nTo generate an accurate wiki page, I would need access to the actual memory module source files.\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：langchain-ai/langchainjs\n\n摘要：发现 10 个潜在踩坑项，其中 1 个为 high/blocking；最高优先级：配置坑 - 来源证据：[Feature request] React Native support。\n\n## 1. 配置坑 · 来源证据：[Feature request] React Native support\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：[Feature request] React Native support\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_39933028ef894033b30ff784e81f185f | https://github.com/langchain-ai/langchainjs/issues/4239 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 2. 安装坑 · 来源证据：@langchain/core@1.1.46\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：@langchain/core@1.1.46\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_36a7a58d5cd84bda8dde7918402a6f8a | https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46 | 来源讨论提到 npm 相关条件，需在安装/试用前复核。\n\n## 3. 配置坑 · 来源证据：Must pass in at least 1 record to upsert.\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Must pass in at least 1 record to upsert.\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_26c3acaad9e14ed3953206f25870c0b0 | https://github.com/langchain-ai/langchainjs/issues/10890 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n\n## 4. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | README/documentation is current enough for a first validation pass.\n\n## 5. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | last_activity_observed missing\n\n## 6. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n\n## 7. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n\n## 8. 安全/权限坑 · 来源证据：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4d2a6bed33284a3cbd2f3319321d9e4c | https://github.com/langchain-ai/langchainjs/issues/10894 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n\n## 9. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | issue_or_pr_quality=unknown\n\n## 10. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | release_recency=unknown\n\n<!-- canonical_name: langchain-ai/langchainjs; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "langchainjs",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:598342280",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/langchain-ai/langchainjs"
        },
        {
          "evidence_id": "art_73d783e5de2540acb8b7d277070227b6",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/langchain-ai/langchainjs#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "langchainjs 说明书",
      "toc": [
        "https://github.com/langchain-ai/langchainjs 项目说明书",
        "目录",
        "Introduction to LangChain.js",
        "Architecture Overview",
        "Core Packages",
        "Provider Integrations",
        "Text Processing",
        "Testing Infrastructure",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "0cfcfc66897d8fafeb7e7ed90b7299eace9a7c37",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "pnpm-lock.yaml",
      "package.json",
      "README.md",
      "docs/core_docs/README.md",
      "examples/package.json",
      "examples/openai_openapi.yaml",
      "examples/tsconfig.json",
      "examples/src/index.ts",
      "examples/src/README.md",
      "examples/src/extraction/openai_tool_calling_extraction.ts",
      "examples/src/multi-agent/handoffs.ts",
      "examples/src/multi-agent/skills-sql-assistant.ts",
      "examples/src/multi-agent/subagents-personal-assistant.ts",
      "examples/src/multi-agent/router-knowledge-base.ts",
      "examples/src/multi-agent/handoffs-customer-support.ts",
      "examples/src/cache/cloudflare_kv.ts",
      "examples/src/llms/googlevertexai.ts",
      "examples/src/llms/openai.ts",
      "examples/src/llms/azure_openai-chat.ts",
      "examples/src/llms/googlevertexai-streaming.ts",
      "examples/src/llms/azure_openai.ts",
      "examples/src/createAgent/customSystemPrompts.ts",
      "examples/src/createAgent/updateThreadLevelInTools.ts",
      "examples/src/createAgent/accessExternalContext.ts",
      "examples/src/createAgent/accessLongTermMemory.ts",
      "examples/src/createAgent/accessExternalContextInTools.ts",
      "examples/src/createAgent/tools.ts",
      "examples/src/createAgent/structuredOutput.ts",
      "examples/src/createAgent/updateModelBeforeCall.ts",
      "examples/src/createAgent/streaming.ts",
      "examples/src/createAgent/supervisor.ts",
      "examples/src/createAgent/updateThreadLevel.ts",
      "examples/src/createAgent/updateToolsBeforeModelCall.ts",
      "examples/src/createAgent/accessThreadLevelState.ts",
      "examples/src/createAgent/updateLongTermMemoryInTools.ts",
      "examples/src/createAgent/accessThreadLevelStateInTools.ts",
      "examples/src/createAgent/controlOverMessagePreparation.ts",
      "examples/src/createAgent/accessLongTermMemoryInTools.ts",
      "examples/src/provider/anthropic/memory.ts",
      "examples/src/langchain-classic/chains/api_chain.ts"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# langchainjs - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 langchainjs 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **想在安装前理解开源项目价值和边界的用户**：当前证据主要来自项目文档。 证据：`README.md` Claim：`clm_0002` supported 0.86\n\n## 它能做什么\n\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`AGENTS.md` Claim：`clm_0001` supported 0.86\n\n## 怎么开始\n\n- `npx create-langchain-integration` 证据：`AGENTS.md` Claim：`clm_0003` supported 0.86\n\n## 继续前判断卡\n\n- **当前建议**：先做角色匹配试用\n- **为什么**：这个项目更像角色库，核心风险是选错角色或把角色文案当执行能力；先用 Prompt Preview 试角色匹配，再决定是否沙盒导入。\n\n### 30 秒判断\n\n- **现在怎么做**：先做角色匹配试用\n- **最小安全下一步**：先用 Prompt Preview 试角色匹配；满意后再隔离导入\n- **先别相信**：角色质量和任务匹配不能直接相信。\n- **继续会触碰**：角色选择偏差、命令执行、宿主 AI 配置\n\n### 现在可以相信\n\n- **适合人群线索：想在安装前理解开源项目价值和边界的用户**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0002` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`AGENTS.md` Claim：`clm_0001` supported 0.86\n- **存在 Quick Start / 安装命令线索**（supported）：可以相信项目文档出现过启动或安装入口；不要因此直接在主力环境运行。 证据：`AGENTS.md` Claim：`clm_0003` supported 0.86\n\n### 现在还不能相信\n\n- **角色质量和任务匹配不能直接相信。**（unverified）：角色库证明有很多角色，不证明每个角色都适合你的具体任务，也不证明角色能产生高质量结果。\n- **不能把角色文案当成真实执行能力。**（unverified）：安装前只能判断角色描述和任务画像是否匹配，不能证明它能在宿主 AI 里完成任务。\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。 证据：`AGENTS.md`\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n\n### 继续会触碰什么\n\n- **角色选择偏差**：用户对任务应该由哪个专家角色处理的判断。 原因：选错角色会让 AI 从错误专业视角回答，浪费时间或误导决策。\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`AGENTS.md`\n- **宿主 AI 配置**：Claude/Codex/Cursor/Gemini/OpenCode 等宿主的 plugin、Skill 或规则加载配置。 原因：宿主配置会改变 AI 后续工作方式，可能和用户已有规则冲突。 证据：`AGENTS.md`\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`AGENTS.md`\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：先用交互式试用验证任务画像和角色匹配，不要先导入整套角色库。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **先备份宿主 AI 配置**：Skill、plugin、规则文件可能改变 Claude/Cursor/Codex 的默认行为。（适用：存在插件 manifest、Skill 或宿主规则入口时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **准备移除宿主 plugin / Skill / 规则入口**：如果试装后行为异常，可以把宿主 AI 恢复到试装前状态。\n- **保留原始角色选择记录**：如果输出偏题，可以回到任务画像阶段重新选择角色，而不是继续沿着错误角色推进。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0004` inferred 0.45\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`AGENTS.md` Claim：`clm_0005` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`AGENTS.md` Claim：`clm_0001` supported 0.86\n\n### 上下文规模\n\n- 文件总数：2518\n- 重要文件覆盖：40/2518\n- 证据索引条目：80\n- 角色 / Skill 条目：50\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 langchainjs 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 langchainjs 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 langchainjs 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 50 个角色 / Skill / 项目文档条目。\n\n- **Readme**（project_doc）：These docs have moved! See https://docs.langchain.com/oss/javascript/ Repo https://github.com/langchain-ai/docs 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`docs/core_docs/README.md`\n- **Changesets**（project_doc）：Hello and welcome! This folder has been automatically generated by @changesets/cli , a build tool that works with multi-package repos, or single-package repos to help you version and publish your code. You can find the full documentation for it in our repository https://github.com/changesets/changesets 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`.changeset/README.md`\n- **Dev container**（project_doc）：This project includes a dev container https://containers.dev/ , which lets you use a container as a full-featured dev environment. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`.devcontainer/README.md`\n- **AGENTS.md - AI Agent Guidelines for LangChain.js**（project_doc）：AGENTS.md - AI Agent Guidelines for LangChain.js 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`AGENTS.md`\n- **⚡️ Quick Install**（project_doc）：! npm https://img.shields.io/npm/dm/langchain ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain js.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain js 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`README.md`\n- **Environment Tests**（project_doc）：This directory contains tests that verify LangChain packages work correctly in different JavaScript/TypeScript environments. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`environment_tests/README.md`\n- **langchain-examples**（project_doc）：This folder contains examples of how to use LangChain. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`examples/src/README.md`\n- **@langchain/build**（project_doc）：Pre-configured build system for LangChain packages using tsdown https://tsdown.dev/ . 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`internal/build/README.md`\n- **Model Profiles Generator**（project_doc）：A CLI tool for automatically generating TypeScript model profile files from the models.dev https://models.dev API. This tool fetches model capabilities and constraints, applies provider-level and model-specific overrides, and generates type-safe TypeScript files using the TypeScript AST API. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`internal/model-profiles/README.md`\n- **LangChain.js Standard Tests**（project_doc）：This package contains the base standard tests for LangChain.js. It includes unit, and integration test classes. This package is not intended to be used outside of the LangChain.js project, and thus it is not published to npm. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`internal/standard-tests/README.md`\n- **@langchain/**（project_doc）：This package contains the LangChain.js integrations for through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/create-langchain-integration/template/README.md`\n- **@langchain/classic**（project_doc）：This package contains functionality from LangChain v0.x that has been moved out of the main langchain package as part of the v1.0 release. It exists to provide backward compatibility for existing applications while the core langchain package focuses on the essential building blocks for modern agent development. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain-classic/README.md`\n- **🦜🍎️ @langchain/core**（project_doc）：! npm https://img.shields.io/npm/dm/@langchain/core ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain js.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain js 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain-core/README.md`\n- **LangChain.js MCP Adapters**（project_doc）：! npm version https://img.shields.io/npm/v/@langchain/mcp-adapters.svg https://www.npmjs.com/package/@langchain/mcp-adapters ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain-mcp-adapters/README.md`\n- **LangChainJS-MCP-Adapters Examples**（project_doc）：This directory contains examples demonstrating how to use the @langchain/mcp-adapters library with various MCP servers 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain-mcp-adapters/examples/README.md`\n- **🦜✂️ @langchain/textsplitters**（project_doc）：This package contains various implementations of LangChain.js text splitters, most commonly used as part of retrieval-augmented generation RAG pipelines. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain-textsplitters/README.md`\n- **🦜️🔗 LangChain.js**（project_doc）：! npm https://img.shields.io/npm/dm/langchain ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain js.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain js 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/langchain/README.md`\n- **@langchain/anthropic**（project_doc）：This package contains the LangChain.js integrations for Anthropic through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-anthropic/README.md`\n- **@langchain/aws**（project_doc）：This package contains the LangChain.js integrations for AWS through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-aws/README.md`\n- **@langchain/cloudflare**（project_doc）：This package contains the LangChain.js integrations for Cloudflare through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-cloudflare/README.md`\n- **@langchain/cohere**（project_doc）：This package contains the LangChain.js integrations for Cohere through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-cohere/README.md`\n- **@langchain/deepseek**（project_doc）：This package contains the LangChain.js integrations for DeepSeek. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-deepseek/README.md`\n- **@langchain/exa**（project_doc）：This package contains the LangChain.js integrations for exa through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-exa/README.md`\n- **@langchain/fireworks**（project_doc）：This package contains the LangChain.js integrations for Fireworks AI. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-fireworks/README.md`\n- **@langchain/google-cloud-sql-pg**（project_doc）：The LangChain package for CloudSQL for Postgres provides a way to connect to Cloud SQL instances from the LangChain ecosystem. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-cloud-sql-pg/README.md`\n- **LangChain google-common**（project_doc）：This package contains common resources to access Google AI/ML models and other Google services in an auth-independent way. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-common/README.md`\n- **LangChain google-gauth**（project_doc）：This package contains resources to access Google AI/ML models and other Google services. Authorization to these services use either an API Key or service account credentials that are either stored on the local file system or are provided through the Google Cloud Platform environment it is running on. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-gauth/README.md`\n- **@langchain/google-genai**（project_doc）：This package contains the LangChain.js integrations for Gemini through their generative-ai SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-genai/README.md`\n- **LangChain google-vertexai-web**（project_doc）：This package contains resources to access Google AI/ML models and other Google services via Vertex AI. Authorization to these services use either an API Key or service account credentials that are included in an environment variable. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-vertexai-web/README.md`\n- **LangChain google-vertexai**（project_doc）：This package contains resources to access Google AI/ML models and other Google services via Vertex AI. Authorization to these services use service account credentials stored on the local file system or provided through the Google Cloud Platform environment it is running on. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-vertexai/README.md`\n- **LangChain google-webauth**（project_doc）：This package contains resources to access Google AI/ML models and other Google services. Authorization to these services use either an API Key or service account credentials that are included in an environment variable. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google-webauth/README.md`\n- **@langchain/google**（project_doc）：This package supports access to a variety of Google's models, including the Gemini family of models and their Nano Banana image generation model. You can access these models through either Google's Google AI https://ai.google.dev/ API sometimes also called the Generative AI API or the AI Studio API or through the Google Cloud Platform Vertex AI https://cloud.google.com/vertex-ai service. It does not rely on the \"gen… 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-google/README.md`\n- **@langchain/groq**（project_doc）：This package contains the LangChain.js integrations for Groq via the groq/sdk package. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-groq/README.md`\n- **@langchain/ibm**（project_doc）：This package contains the LangChain.js integrations for IBM watsonx.ai https://www.ibm.com/watsonx via the @ibm-cloud/watsonx-ai SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-ibm/README.md`\n- **@langchain/mistralai**（project_doc）：This package contains the LangChain.js integrations for Mistral through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-mistralai/README.md`\n- **@langchain/mongodb**（project_doc）：This package contains the LangChain.js integrations for MongoDB through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-mongodb/README.md`\n- **@langchain/neo4j**（project_doc）：This package contains the LangChain.js https://github.com/langchain-ai/langchainjs integrations for Neo4j https://neo4j.com/ graph database, including support for Memgraph https://memgraph.com/ which uses the Bolt protocol . 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-neo4j/README.md`\n- **@langchain/ollama**（project_doc）：This package contains the LangChain.js integrations for Ollama via the ollama TypeScript SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-ollama/README.md`\n- **@langchain/openai**（project_doc）：This package contains the LangChain.js integrations for OpenAI through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-openai/README.md`\n- **@langchain/openrouter**（project_doc）：This package contains the LangChain.js integrations for OpenRouter https://openrouter.ai/ . 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-openrouter/README.md`\n- **@langchain/perplexity**（project_doc）：This package provides a LangChain.js https://github.com/langchain-ai/langchainjs integration for Perplexity AI https://www.perplexity.ai/ , including chat models, the Perplexity Search retriever, and the Perplexity Search tool. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-perplexity/README.md`\n- **@langchain/pgvector**（project_doc）：This package contains the LangChain.js https://github.com/langchain-ai/langchainjs integration for pgvector https://github.com/pgvector/pgvector , the open-source vector similarity search extension for PostgreSQL. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-pgvector/README.md`\n- **@langchain/pinecone**（project_doc）：This package contains the LangChain.js integrations for Pinecone through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-pinecone/README.md`\n- **@langchain/qdrant**（project_doc）：This package contains the LangChain.js integration for the Qdrant https://qdrant.tech/ vector database. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-qdrant/README.md`\n- **@langchain/redis**（project_doc）：This package contains the LangChain.js integrations for Redis through their SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-redis/README.md`\n- **@langchain/tavily**（project_doc）：! NPM - Version https://img.shields.io/npm/v/@langchain/tavily?style=flat-square&label=%20 https://www.npmjs.com/package/@langchain/tavily 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-tavily/README.md`\n- **@langchain/together-ai**（project_doc）：This package contains the LangChain.js integrations for Together AI https://www.together.ai/ . 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-together-ai/README.md`\n- **@langchain/weaviate**（project_doc）：This package contains the LangChain.js integrations for Weaviate with the weaviate-client SDK. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-weaviate/README.md`\n- **@langchain/xai**（project_doc）：This package contains the LangChain.js integrations for xAI. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`libs/providers/langchain-xai/README.md`\n- **Contributing to LangChain**（project_doc）：👋 Welcome! Thank you for your interest in contributing. LangChain has helped form the largest developer community in generative AI, and we're always open to new contributors. Whether you're fixing bugs, adding features, improving documentation, or sharing feedback, your involvement helps make LangChain and LangGraph better for everyone 🦜❤️ 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`CONTRIBUTING.md`\n\n## 证据索引\n\n- 共索引 80 条证据。\n\n- **Readme**（documentation）：These docs have moved! See https://docs.langchain.com/oss/javascript/ Repo https://github.com/langchain-ai/docs 证据：`docs/core_docs/README.md`\n- **Changesets**（documentation）：Hello and welcome! This folder has been automatically generated by @changesets/cli , a build tool that works with multi-package repos, or single-package repos to help you version and publish your code. You can find the full documentation for it in our repository https://github.com/changesets/changesets 证据：`.changeset/README.md`\n- **Dev container**（documentation）：This project includes a dev container https://containers.dev/ , which lets you use a container as a full-featured dev environment. 证据：`.devcontainer/README.md`\n- **AGENTS.md - AI Agent Guidelines for LangChain.js**（documentation）：AGENTS.md - AI Agent Guidelines for LangChain.js 证据：`AGENTS.md`\n- **⚡️ Quick Install**（documentation）：! npm https://img.shields.io/npm/dm/langchain ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain js.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain js 证据：`README.md`\n- **Environment Tests**（documentation）：This directory contains tests that verify LangChain packages work correctly in different JavaScript/TypeScript environments. 证据：`environment_tests/README.md`\n- **langchain-examples**（documentation）：This folder contains examples of how to use LangChain. 证据：`examples/src/README.md`\n- **@langchain/build**（documentation）：Pre-configured build system for LangChain packages using tsdown https://tsdown.dev/ . 证据：`internal/build/README.md`\n- **Model Profiles Generator**（documentation）：A CLI tool for automatically generating TypeScript model profile files from the models.dev https://models.dev API. This tool fetches model capabilities and constraints, applies provider-level and model-specific overrides, and generates type-safe TypeScript files using the TypeScript AST API. 证据：`internal/model-profiles/README.md`\n- **LangChain.js Standard Tests**（documentation）：This package contains the base standard tests for LangChain.js. It includes unit, and integration test classes. This package is not intended to be used outside of the LangChain.js project, and thus it is not published to npm. 证据：`internal/standard-tests/README.md`\n- **@langchain/**（documentation）：This package contains the LangChain.js integrations for through their SDK. 证据：`libs/create-langchain-integration/template/README.md`\n- **@langchain/classic**（documentation）：This package contains functionality from LangChain v0.x that has been moved out of the main langchain package as part of the v1.0 release. It exists to provide backward compatibility for existing applications while the core langchain package focuses on the essential building blocks for modern agent development. 证据：`libs/langchain-classic/README.md`\n- **🦜🍎️ @langchain/core**（documentation）：! npm https://img.shields.io/npm/dm/@langchain/core ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain js.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain js 证据：`libs/langchain-core/README.md`\n- **LangChain.js MCP Adapters**（documentation）：! npm version https://img.shields.io/npm/v/@langchain/mcp-adapters.svg https://www.npmjs.com/package/@langchain/mcp-adapters ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT 证据：`libs/langchain-mcp-adapters/README.md`\n- **LangChainJS-MCP-Adapters Examples**（documentation）：This directory contains examples demonstrating how to use the @langchain/mcp-adapters library with various MCP servers 证据：`libs/langchain-mcp-adapters/examples/README.md`\n- **🦜✂️ @langchain/textsplitters**（documentation）：This package contains various implementations of LangChain.js text splitters, most commonly used as part of retrieval-augmented generation RAG pipelines. 证据：`libs/langchain-textsplitters/README.md`\n- **🦜️🔗 LangChain.js**（documentation）：! npm https://img.shields.io/npm/dm/langchain ! License: MIT https://img.shields.io/badge/License-MIT-yellow.svg https://opensource.org/licenses/MIT ! Twitter https://img.shields.io/twitter/url/https/twitter.com/langchain js.svg?style=social&label=Follow%20%40LangChain https://x.com/langchain js 证据：`libs/langchain/README.md`\n- **@langchain/anthropic**（documentation）：This package contains the LangChain.js integrations for Anthropic through their SDK. 证据：`libs/providers/langchain-anthropic/README.md`\n- **@langchain/aws**（documentation）：This package contains the LangChain.js integrations for AWS through their SDK. 证据：`libs/providers/langchain-aws/README.md`\n- **@langchain/cloudflare**（documentation）：This package contains the LangChain.js integrations for Cloudflare through their SDK. 证据：`libs/providers/langchain-cloudflare/README.md`\n- **@langchain/cohere**（documentation）：This package contains the LangChain.js integrations for Cohere through their SDK. 证据：`libs/providers/langchain-cohere/README.md`\n- **@langchain/deepseek**（documentation）：This package contains the LangChain.js integrations for DeepSeek. 证据：`libs/providers/langchain-deepseek/README.md`\n- **@langchain/exa**（documentation）：This package contains the LangChain.js integrations for exa through their SDK. 证据：`libs/providers/langchain-exa/README.md`\n- **@langchain/fireworks**（documentation）：This package contains the LangChain.js integrations for Fireworks AI. 证据：`libs/providers/langchain-fireworks/README.md`\n- **@langchain/google-cloud-sql-pg**（documentation）：The LangChain package for CloudSQL for Postgres provides a way to connect to Cloud SQL instances from the LangChain ecosystem. 证据：`libs/providers/langchain-google-cloud-sql-pg/README.md`\n- **LangChain google-common**（documentation）：This package contains common resources to access Google AI/ML models and other Google services in an auth-independent way. 证据：`libs/providers/langchain-google-common/README.md`\n- **LangChain google-gauth**（documentation）：This package contains resources to access Google AI/ML models and other Google services. Authorization to these services use either an API Key or service account credentials that are either stored on the local file system or are provided through the Google Cloud Platform environment it is running on. 证据：`libs/providers/langchain-google-gauth/README.md`\n- **@langchain/google-genai**（documentation）：This package contains the LangChain.js integrations for Gemini through their generative-ai SDK. 证据：`libs/providers/langchain-google-genai/README.md`\n- **LangChain google-vertexai-web**（documentation）：This package contains resources to access Google AI/ML models and other Google services via Vertex AI. Authorization to these services use either an API Key or service account credentials that are included in an environment variable. 证据：`libs/providers/langchain-google-vertexai-web/README.md`\n- **LangChain google-vertexai**（documentation）：This package contains resources to access Google AI/ML models and other Google services via Vertex AI. Authorization to these services use service account credentials stored on the local file system or provided through the Google Cloud Platform environment it is running on. 证据：`libs/providers/langchain-google-vertexai/README.md`\n- **LangChain google-webauth**（documentation）：This package contains resources to access Google AI/ML models and other Google services. Authorization to these services use either an API Key or service account credentials that are included in an environment variable. 证据：`libs/providers/langchain-google-webauth/README.md`\n- **@langchain/google**（documentation）：This package supports access to a variety of Google's models, including the Gemini family of models and their Nano Banana image generation model. You can access these models through either Google's Google AI https://ai.google.dev/ API sometimes also called the Generative AI API or the AI Studio API or through the Google Cloud Platform Vertex AI https://cloud.google.com/vertex-ai service. It does not rely on the \"genai\" library from Google, but rather uses direct REST calls. 证据：`libs/providers/langchain-google/README.md`\n- **@langchain/groq**（documentation）：This package contains the LangChain.js integrations for Groq via the groq/sdk package. 证据：`libs/providers/langchain-groq/README.md`\n- **@langchain/ibm**（documentation）：This package contains the LangChain.js integrations for IBM watsonx.ai https://www.ibm.com/watsonx via the @ibm-cloud/watsonx-ai SDK. 证据：`libs/providers/langchain-ibm/README.md`\n- **@langchain/mistralai**（documentation）：This package contains the LangChain.js integrations for Mistral through their SDK. 证据：`libs/providers/langchain-mistralai/README.md`\n- **@langchain/mongodb**（documentation）：This package contains the LangChain.js integrations for MongoDB through their SDK. 证据：`libs/providers/langchain-mongodb/README.md`\n- **@langchain/neo4j**（documentation）：This package contains the LangChain.js https://github.com/langchain-ai/langchainjs integrations for Neo4j https://neo4j.com/ graph database, including support for Memgraph https://memgraph.com/ which uses the Bolt protocol . 证据：`libs/providers/langchain-neo4j/README.md`\n- **@langchain/ollama**（documentation）：This package contains the LangChain.js integrations for Ollama via the ollama TypeScript SDK. 证据：`libs/providers/langchain-ollama/README.md`\n- **@langchain/openai**（documentation）：This package contains the LangChain.js integrations for OpenAI through their SDK. 证据：`libs/providers/langchain-openai/README.md`\n- **@langchain/openrouter**（documentation）：This package contains the LangChain.js integrations for OpenRouter https://openrouter.ai/ . 证据：`libs/providers/langchain-openrouter/README.md`\n- **@langchain/perplexity**（documentation）：This package provides a LangChain.js https://github.com/langchain-ai/langchainjs integration for Perplexity AI https://www.perplexity.ai/ , including chat models, the Perplexity Search retriever, and the Perplexity Search tool. 证据：`libs/providers/langchain-perplexity/README.md`\n- **@langchain/pgvector**（documentation）：This package contains the LangChain.js https://github.com/langchain-ai/langchainjs integration for pgvector https://github.com/pgvector/pgvector , the open-source vector similarity search extension for PostgreSQL. 证据：`libs/providers/langchain-pgvector/README.md`\n- **@langchain/pinecone**（documentation）：This package contains the LangChain.js integrations for Pinecone through their SDK. 证据：`libs/providers/langchain-pinecone/README.md`\n- **@langchain/qdrant**（documentation）：This package contains the LangChain.js integration for the Qdrant https://qdrant.tech/ vector database. 证据：`libs/providers/langchain-qdrant/README.md`\n- **@langchain/redis**（documentation）：This package contains the LangChain.js integrations for Redis through their SDK. 证据：`libs/providers/langchain-redis/README.md`\n- **@langchain/tavily**（documentation）：! NPM - Version https://img.shields.io/npm/v/@langchain/tavily?style=flat-square&label=%20 https://www.npmjs.com/package/@langchain/tavily 证据：`libs/providers/langchain-tavily/README.md`\n- **@langchain/together-ai**（documentation）：This package contains the LangChain.js integrations for Together AI https://www.together.ai/ . 证据：`libs/providers/langchain-together-ai/README.md`\n- **@langchain/weaviate**（documentation）：This package contains the LangChain.js integrations for Weaviate with the weaviate-client SDK. 证据：`libs/providers/langchain-weaviate/README.md`\n- **@langchain/xai**（documentation）：This package contains the LangChain.js integrations for xAI. 证据：`libs/providers/langchain-xai/README.md`\n- **Package**（package_manifest）：{ \"name\": \"examples\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Langchain examples\", \"main\": \"./dist/index.js\", \"type\": \"module\", \"files\": \"dist/\" , \"scripts\": { \"build\": \"tsc --declaration --outDir dist/\", \"clean\": \"rm -rf .turbo dist/\", \"start\": \"tsx --experimental-wasm-modules -r dotenv/config src/index.ts\", \"start:dist\": \"pnpm build && node -r dotenv/config dist/index.js\", \"precommit\": \"lint-staged\" }, \"author\": \"LangChain\", \"license\": \"MIT\", \"dependencies\": { \"@aws-sdk/dsql-signer\": \"^3.1022.0\", \"@azure/identity\": \"^4.13.1\", \"@browserbasehq/stagehand\": \"^3.2.0\", \"@clickhouse/client\": \"^1.18.2\", \"@cloudflare/workers-types\": \"^4.20260508.1\", \"@elastic/elasticsearch\": \"^9.3.4\",… 证据：`examples/package.json`\n- **Package**（package_manifest）：{ \"name\": \"langchainjs\", \"author\": \"LangChain\", \"license\": \"MIT\", \"private\": true, \"homepage\": \"https://github.com/langchain-ai/langchainjs/tree/main/\", \"repository\": { \"type\": \"git\", \"url\": \"https://github.com/langchain-ai/langchainjs.git\" }, \"packageManager\": \"pnpm@10.14.0\", \"scripts\": { \"build\": \"turbo build:compile\", \"watch\": \"turbo watch build:compile\", \"clean\": \"turbo clean\", \"dev\": \"pnpm --filter @langchain/build watch\", \"format\": \"oxfmt .\", \"format:check\": \"oxfmt --check .\", \"lint\": \"oxlint .\", \"lint:fix\": \"oxlint . --fix\", \"precommit\": \"turbo precommit\", \"prerelease\": \"BUILD MODE=prerelease pnpm build\", \"release\": \"changeset publish\", \"test\": \"pnpm test:unit && pnpm test:exports:do… 证据：`package.json`\n- **Contributing to LangChain**（documentation）：👋 Welcome! Thank you for your interest in contributing. LangChain has helped form the largest developer community in generative AI, and we're always open to new contributors. Whether you're fixing bugs, adding features, improving documentation, or sharing feedback, your involvement helps make LangChain and LangGraph better for everyone 🦜❤️ 证据：`CONTRIBUTING.md`\n- **Package**（package_manifest）：{ \"name\": \"dependency-range-tests\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Tests dependency ranges for LangChain.\", \"dependencies\": { \"semver\": \"^7.5.4\" } } 证据：`dependency_range_tests/scripts/langchain/node/package.json`\n- **Package**（package_manifest）：{ \"name\": \"dependency-range-tests\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Tests dependency ranges for LangChain.\", \"dependencies\": { \"semver\": \"^7.5.4\" }, \"packageManager\": \"pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748\" } 证据：`dependency_range_tests/scripts/with_standard_tests/anthropic/node/package.json`\n- **Package**（package_manifest）：{ \"name\": \"dependency-range-tests\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Tests dependency ranges for LangChain.\", \"dependencies\": { \"semver\": \"^7.5.4\" }, \"packageManager\": \"pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748\" } 证据：`dependency_range_tests/scripts/with_standard_tests/cohere/node/package.json`\n- **Package**（package_manifest）：{ \"name\": \"dependency-range-tests\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Tests dependency ranges for LangChain.\", \"dependencies\": { \"semver\": \"^7.5.4\" }, \"packageManager\": \"pnpm@10.14.0\" } 证据：`dependency_range_tests/scripts/with_standard_tests/google-vertexai/node/package.json`\n- **Package**（package_manifest）：{ \"name\": \"dependency-range-tests\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Tests dependency ranges for LangChain.\", \"dependencies\": {} } 证据：`dependency_range_tests/scripts/with_standard_tests/node/package.json`\n- **Package**（package_manifest）：{ \"name\": \"dependency-range-tests\", \"version\": \"0.0.0\", \"private\": true, \"description\": \"Tests dependency ranges for LangChain.\", \"dependencies\": { \"semver\": \"^7.5.4\" }, \"packageManager\": \"pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748\" } 证据：`dependency_range_tests/scripts/with_standard_tests/openai/node/package.json`\n- **Package**（package_manifest）：{ \"name\": \"@langchain/build\", \"private\": true, \"version\": \"0.1.1\", \"type\": \"module\", \"main\": \"./src/index.ts\", \"types\": \"./src/index.ts\", \"scripts\": { \"build\": \"exit 0\" }, \"dependencies\": { \"@arethetypeswrong/core\": \"^0.18.2\", \"@typescript/native-preview\": \"^7.0.0-dev.20260401.1\", \"publint\": \"^0.3.19\", \"rolldown\": \"^1.0.0-rc.4\", \"tsdown\": \"^0.21.7\", \"type-fest\": \"^5.3.0\", \"typescript\": \"^6.0.3\", \"unplugin-unused\": \"^0.5.6\" }, \"devDependencies\": { \"@langchain/tsconfig\": \"workspace: \", \"@tsconfig/recommended\": \"^1.0.10\" }, \"optionalDependencies\": { \"@esbuild/win32-x64\": \" \", \"@rolldown/binding-darwin-arm64\": \" \", \"@rolldown/binding-darwin-x64\": \" \", \"@rolldown/binding-linux-arm64-gnu\": \" \", \"… 证据：`internal/build/package.json`\n- **Package**（package_manifest）：{ \"name\": \"@langchain/model-profiles\", \"version\": \"0.0.10\", \"private\": true, \"type\": \"module\", \"scripts\": { \"make\": \"tsx src/cli.ts\", \"test\": \"vitest run\", \"test:watch\": \"vitest\" }, \"dependencies\": { \"@iarna/toml\": \"^2.2.5\", \"@langchain/core\": \"workspace:^\", \"commander\": \"^14.0.3\", \"typescript\": \"^6.0.3\", \"zod\": \"^4.3.6\" }, \"devDependencies\": { \"@types/iarna toml\": \"^2.0.5\", \"@types/node\": \"^25.5.0\", \"tsx\": \"^4.21.0\", \"vitest\": \"^4.1.5\" } } 证据：`internal/model-profiles/package.json`\n- 其余 20 条证据见 `AI_CONTEXT_PACK.json` 或 `EVIDENCE_INDEX.json`。\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`docs/core_docs/README.md`, `.changeset/README.md`, `.devcontainer/README.md`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`docs/core_docs/README.md`, `.changeset/README.md`, `.devcontainer/README.md`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **Introduction to LangChain.js**：importance `high`\n  - source_paths: README.md, libs/langchain/README.md, libs/langchain-core/README.md, libs/langchain-classic/README.md\n- **Getting Started**：importance `high`\n  - source_paths: package.json, libs/langchain/package.json, libs/langchain-core/package.json, examples/package.json, environment_tests/README.md\n- **Package Architecture**：importance `high`\n  - source_paths: pnpm-workspace.yaml, turbo.json, libs/langchain-core/package.json, libs/langchain/package.json, libs/langchain-classic/package.json\n- **Core Abstractions**：importance `high`\n  - source_paths: libs/langchain-core/src/runnables/base.ts, libs/langchain-core/src/language_models/chat_models.ts, libs/langchain-core/src/language_models/llms.ts, libs/langchain-core/src/embeddings.ts, libs/langchain-core/src/vectorstores.ts\n- **Chat Models and LLM Providers**：importance `high`\n  - source_paths: libs/providers/langchain-openai/src/chat_models/base.ts, libs/providers/langchain-anthropic/src/chat_models.ts, libs/providers/langchain-google-common/src/chat_models.ts, libs/providers/langchain-mistralai/src/chat_models.ts, libs/providers/langchain-ollama/src/chat_models.ts\n- **Embeddings Integration**：importance `medium`\n  - source_paths: libs/providers/langchain-openai/src/embeddings.ts, libs/providers/langchain-aws/src/embeddings.ts, libs/providers/langchain-cohere/src/embeddings.ts, libs/providers/langchain-google-genai/src/embeddings.ts, libs/providers/langchain-google-vertexai/src/embeddings.ts\n- **Agent Framework**：importance `high`\n  - source_paths: libs/langchain/src/agents/runtime.ts, libs/langchain/src/agents/stream.ts, libs/langchain/src/agents/middleware.ts, libs/langchain/src/agents/nodes/types.ts, libs/langchain-classic/src/agents/react/index.ts\n- **Tools and Toolkits**：importance `high`\n  - source_paths: libs/langchain-classic/src/tools/base.ts, libs/langchain-classic/src/tools/dynamic.ts, libs/langchain-classic/src/tools/sql.ts, libs/langchain-classic/src/tools/webbrowser.ts, libs/langchain-classic/src/agents/toolkits/sql/sql.ts\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `0cfcfc66897d8fafeb7e7ed90b7299eace9a7c37`\n- inspected_files: `pnpm-lock.yaml`, `package.json`, `README.md`, `docs/core_docs/README.md`, `examples/package.json`, `examples/openai_openapi.yaml`, `examples/tsconfig.json`, `examples/src/index.ts`, `examples/src/README.md`, `examples/src/extraction/openai_tool_calling_extraction.ts`, `examples/src/multi-agent/handoffs.ts`, `examples/src/multi-agent/skills-sql-assistant.ts`, `examples/src/multi-agent/subagents-personal-assistant.ts`, `examples/src/multi-agent/router-knowledge-base.ts`, `examples/src/multi-agent/handoffs-customer-support.ts`, `examples/src/cache/cloudflare_kv.ts`, `examples/src/llms/googlevertexai.ts`, `examples/src/llms/openai.ts`, `examples/src/llms/azure_openai-chat.ts`, `examples/src/llms/googlevertexai-streaming.ts`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 来源证据：[Feature request] React Native support\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：[Feature request] React Native support\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_39933028ef894033b30ff784e81f185f | https://github.com/langchain-ai/langchainjs/issues/4239 | 来源类型 github_issue 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 来源证据：@langchain/core@1.1.46\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：@langchain/core@1.1.46\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_36a7a58d5cd84bda8dde7918402a6f8a | https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46 | 来源讨论提到 npm 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 来源证据：Must pass in at least 1 record to upsert.\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Must pass in at least 1 record to upsert.\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_26c3acaad9e14ed3953206f25870c0b0 | https://github.com/langchain-ai/langchainjs/issues/10890 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 能力判断依赖假设\n\n- Trigger: README/documentation is current enough for a first validation pass.\n- Host AI rule: 将假设转成下游验证清单。\n- Why it matters: 假设不成立时，用户拿不到承诺的能力。\n- Evidence: capability.assumptions | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | README/documentation is current enough for a first validation pass.\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: 维护活跃度未知\n\n- Trigger: 未记录 last_activity_observed。\n- Host AI rule: 补 GitHub 最近 commit、release、issue/PR 响应信号。\n- Why it matters: 新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- Evidence: evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | last_activity_observed missing\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 下游验证发现风险项\n\n- Trigger: no_demo\n- Host AI rule: 进入安全/权限治理复核队列。\n- Why it matters: 下游已经要求复核，不能在页面中弱化。\n- Evidence: downstream_validation.risk_items | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 7: 存在评分风险\n\n- Trigger: no_demo\n- Host AI rule: 把风险写入边界卡，并确认是否需要人工复核。\n- Why it matters: 风险会影响是否适合普通用户安装。\n- Evidence: risks.scoring_risks | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 8: 来源证据：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n\n- Trigger: GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能阻塞安装或首次运行。\n- Evidence: community_evidence:github | cevd_4d2a6bed33284a3cbd2f3319321d9e4c | https://github.com/langchain-ai/langchainjs/issues/10894 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 9: issue/PR 响应质量未知\n\n- Trigger: issue_or_pr_quality=unknown。\n- Host AI rule: 抽样最近 issue/PR，判断是否长期无人处理。\n- Why it matters: 用户无法判断遇到问题后是否有人维护。\n- Evidence: evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | issue_or_pr_quality=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 10: 发布节奏不明确\n\n- Trigger: release_recency=unknown。\n- Host AI rule: 确认最近 release/tag 和 README 安装命令是否一致。\n- Why it matters: 安装命令和文档可能落后于代码，用户踩坑概率升高。\n- Evidence: evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | release_recency=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：langchain-ai/langchainjs\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：local_cli\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 来源证据：[Feature request] React Native support（high）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：@langchain/core@1.1.46（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 来源证据：Must pass in at least 1 record to upsert.（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 能力判断依赖假设（medium）：假设不成立时，用户拿不到承诺的能力。 建议检查：将假设转成下游验证清单。\n- 维护活跃度未知（medium）：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/langchain-ai/langchainjs 项目说明书\n\n生成时间：2026-05-16 06:23:32 UTC\n\n## 目录\n\n- [Introduction to LangChain.js](#introduction)\n- [Getting Started](#getting-started)\n- [Package Architecture](#package-architecture)\n- [Core Abstractions](#core-abstractions)\n- [Chat Models and LLM Providers](#chat-models)\n- [Embeddings Integration](#embeddings)\n- [Agent Framework](#agent-framework)\n- [Tools and Toolkits](#tools-toolkits)\n- [Vector Stores](#vector-stores)\n- [Memory Systems](#memory-system)\n\n<a id='introduction'></a>\n\n## Introduction to LangChain.js\n\n### 相关页面\n\n相关主题：[Getting Started](#getting-started), [Package Architecture](#package-architecture)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchainjs/blob/main/README.md)\n- [libs/langchain/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/README.md)\n- [libs/langchain-core/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/README.md)\n- [libs/langchain-classic/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n</details>\n\n# Introduction to LangChain.js\n\nLangChain.js is a JavaScript/TypeScript framework designed for building applications with Large Language Models (LLMs) through composability. It provides a standardized interface for working with different LLM providers, offers tools for prompt management, and enables the creation of complex chains and agents that can interact with external data sources and APIs.\n\n## Architecture Overview\n\nLangChain.js is organized into a modular monorepo structure with multiple packages that serve different purposes. Each package focuses on specific functionality while maintaining interoperability through shared dependencies.\n\n```mermaid\ngraph TD\n    A[Application Layer] --> B[libs/langchain]\n    A --> C[@langchain/classic]\n    B --> D[@langchain/core]\n    C --> D\n    E[Provider Integrations] --> F[libs/providers/*]\n    F --> D\n    G[Utility Packages] --> H[@langchain/textsplitters]\n    H --> D\n    \n    style A fill:#e1f5fe\n    style D fill:#f3e5f5\n    style F fill:#fff3e0\n```\n\n## Core Packages\n\n### @langchain/core\n\nThe foundational package containing essential abstractions and schemas used throughout the LangChain.js ecosystem.\n\n| Property | Value |\n|----------|-------|\n| Name | @langchain/core |\n| Version | 1.1.46 |\n| Type | ES Module |\n| Engine | Node.js >= 20 |\n\n**Key Dependencies:**\n\n- `@cfworker/json-schema` - JSON schema validation\n- `@standard-schema/spec` - Standard schema support\n- `js-tiktoken` - Token counting\n- `langsmith` - Tracing and evaluation (>= 0.5.0 < 1.0.0)\n- `mustache` - Template rendering\n- `p-queue` - Promise queue management\n- `zod` - Schema validation\n\n资料来源：[libs/langchain-core/package.json:1-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/package.json)\n\n### libs/langchain\n\nThe main LangChain package that provides the primary API surface for building LLM applications. This package focuses on the essential building blocks for modern agent development, including the `createAgent` API.\n\n### @langchain/classic\n\nA backward-compatibility package containing functionality migrated from LangChain v0.x. This package exists to support existing applications while the core `langchain` package focuses on modern agent development.\n\n**When to use @langchain/classic:**\n\n- Existing code using legacy chains (LLMChain, ConversationalRetrievalQAChain, RetrievalQAChain)\n- Using the indexing API with RecordManager\n- Depending on community integrations previously re-exported from langchain\n- Maintaining existing applications not yet ready for the new `createAgent` API\n\n资料来源：[libs/langchain-classic/README.md:1-45](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n\n## Provider Integrations\n\nLangChain.js provides integration packages for various LLM providers through the `libs/providers` directory.\n\n### Available Provider Packages\n\n| Package | Purpose |\n|---------|---------|\n| @langchain/anthropic | Anthropic Claude models integration |\n| @langchain/google-common | Common utilities for Google AI models |\n| @langchain/tavily | Tavily search integration |\n| @langchain/qdrant | Qdrant vector database integration |\n\nEach integration package follows a consistent template structure and includes:\n\n- TypeScript source code\n- ESLint linting configuration\n- Vitest testing framework\n- TypeScript build compilation via tsdown\n\n资料来源：[libs/providers/langchain-tavily/package.json:1-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-tavily/package.json)\n资料来源：[libs/providers/langchain-qdrant/package.json:1-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/package.json)\n\n## Text Processing\n\n### @langchain/textsplitters\n\nA dedicated package for splitting text into chunks, commonly used in retrieval-augmented generation (RAG) pipelines.\n\n**Supported Splitting Strategies:**\n\n| Strategy | Use Case |\n|----------|----------|\n| RecursiveCharacterTextSplitter | General purpose text splitting |\n| HTML Text Splitter | Structured HTML content |\n| Custom Separators | Domain-specific splitting requirements |\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\nconst output = await splitter.createDocuments([text]);\n```\n\n资料来源：[libs/langchain-textsplitters/README.md:1-40](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/README.md)\n资料来源：[examples/src/langchain-classic/indexes/html_text_splitter.ts:1-20](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/langchain-classic/indexes/html_text_splitter.ts)\n\n## Testing Infrastructure\n\nLangChain.js includes custom Jest/Vitest matchers for testing chains, agents, and messages.\n\n```typescript\nexport const langchainMatchers = {\n  toBeHumanMessage,\n  toBeAIMessage,\n  toBeSystemMessage,\n  toBeToolMessage,\n  toHaveToolCalls,\n  toHaveToolCallCount,\n  toContainToolCall,\n  toHaveToolMessages,\n  toHaveBeenInterrupted,\n  toHaveStructuredResponse,\n};\n```\n\n**Matcher Usage:**\n\n```typescript\nexpect(someMessage).toBeHumanMessage();\nexpect(someMessage).toBeAIMessage();\nexpect(someChain).toHaveToolCalls([\n  { name: \"calculator\", args: { expression: \"2+2\" } }\n]);\n```\n\n资料来源：[libs/langchain-core/src/testing/matchers.ts:80-95](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/testing/matchers.ts)\n\n## Agent Middleware\n\n### Human-in-the-Loop (HITL) Middleware\n\nThe framework supports human-in-the-loop workflows through configurable interrupt mechanisms.\n\n```typescript\nconst config: InterruptOnConfig = {\n  allowedDecisions: [\"approve\", \"edit\"],\n  description: formatToolDescription\n};\n```\n\n**Configuration Schema:**\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| allowedDecisions | string[] | Actions the human can take |\n| description | string \\| Function | Description of the interrupt |\n| argsSchema | Record | JSON schema for action arguments |\n\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:60-80](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n\n## Message Block Translators\n\nLangChain.js provides standardized message translation between different API formats, particularly for OpenAI and Anthropic.\n\n```typescript\nconst BlockTranslator = {\n  translateContent: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletions(message);\n    }\n    return convertToV1FromResponses(message);\n  },\n  translateContentChunk: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletionsChunk(message);\n    }\n    return convertToV1FromResponsesChunk(message);\n  },\n};\n```\n\n资料来源：[libs/langchain-core/src/messages/block_translators/openai.ts:1-15](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/messages/block_translators/openai.ts)\n\n## Namespace System\n\nLangChain.js uses a namespace utility for tracking and identifying LangChain-specific objects.\n\n```typescript\nexport const ns = createNamespace(\"langchain\");\n```\n\nNamespaces provide:\n- Hierarchical path organization (`ns.sub(\"component\")`)\n- Instance type checking (`ns.isInstance(obj)`)\n- Branded type creation for type safety\n\n资料来源：[libs/langchain-core/src/utils/namespace.ts:1-45](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/utils/namespace.ts)\n\n## Legacy Chains (Deprecated)\n\nThe `@langchain/classic` package includes deprecated chain implementations:\n\n| Chain | Purpose |\n|-------|---------|\n| LLMChain | Basic LLM call with prompt template |\n| ConversationalRetrievalQAChain | Q&A with conversation memory |\n| RetrievalQAChain | Q&A over documents |\n| StuffDocumentsChain | Stuff documents into prompt |\n| MapReduceDocumentsChain | Map-reduce document operations |\n| RefineDocumentsChain | Iterative document refinement |\n\n资料来源：[libs/langchain-classic/README.md:15-30](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n\n## Development Workflow\n\n### Repository Structure\n\n```\nlangchainjs/\n├── libs/\n│   ├── langchain/              # Main LangChain package\n│   ├── langchain-core/         # Core abstractions\n│   ├── langchain-classic/      # Legacy v0.x functionality\n│   ├── langchain-textsplitters/ # Text splitting utilities\n│   └── providers/              # Provider integrations\n├── examples/                   # Usage examples\n└── libs/create-langchain-integration/  # Package template\n```\n\n### Building Packages\n\n```bash\npnpm install\npnpm build\n```\n\n### Running Examples\n\n```bash\ncp .env.example .env\n# Edit .env with API keys\npnpm run start ./src/path/to/example.ts\n```\n\n资料来源：[examples/src/README.md:1-25](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/README.md)\n\n## Migration Guide\n\n### From langchain v0.x to v1.0\n\n**For new projects:** Use the `langchain` v1.0 package with `createAgent` API.\n\n**For existing projects:** Install `@langchain/classic` for backward compatibility:\n\n```bash\nnpm install @langchain/classic @langchain/core\n```\n\nThe new APIs provide:\n- Cleaner, more powerful agent building\n- Middleware support\n- Better performance for modern workflows\n- Focused API surface with less complexity\n- Active development on v1.0 features\n\n资料来源：[libs/langchain-classic/README.md:30-55](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n\n## Version Compatibility\n\nWhen using multiple LangChain packages, ensure they share the same `@langchain/core` instance:\n\n```json\n{\n  \"dependencies\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"overrides\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"pnpm\": {\n    \"overrides\": {\n      \"@langchain/core\": \"^0.3.0\"\n    }\n  }\n}\n```\n\n资料来源：[libs/create-langchain-integration/template/README.md:20-35](https://github.com/langchain-ai/langchainjs/blob/main/libs/create-langchain-integration/template/README.md)\n\n---\n\n<a id='getting-started'></a>\n\n## Getting Started\n\n### 相关页面\n\n相关主题：[Introduction to LangChain.js](#introduction)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchainjs/blob/main/README.md)\n- [libs/langchain-textsplitters/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/README.md)\n- [libs/langchain-classic/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n- [libs/providers/langchain-anthropic/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-anthropic/README.md)\n- [libs/providers/langchain-cohere/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-cohere/README.md)\n- [libs/providers/langchain-aws/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-aws/README.md)\n- [examples/src/README.md](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/README.md)\n- [libs/create-langchain-integration/template/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/create-langchain-integration/template/README.md)\n</details>\n\n# Getting Started\n\nLangChain.js is a comprehensive framework for building LLM-powered applications. It enables developers to chain together interoperable components and third-party integrations, simplifying AI application development while future-proofing decisions as underlying technology evolves.\n\n资料来源：[README.md:1-15]()\n\n## Prerequisites\n\n### System Requirements\n\n| Requirement | Version/Notes |\n|-------------|--------------|\n| Node.js | v18.x or higher recommended |\n| Package Manager | npm, yarn, or pnpm |\n| Runtime | Node.js or Edge (Vercel, Cloudflare Workers) |\n\n### Environment Setup\n\nBefore installing LangChain.js packages, ensure your environment is properly configured. Most integrations require API keys from respective service providers.\n\n```bash\n# Example API key environment variables\nexport ANTHROPIC_API_KEY=\"your-key-here\"\nexport OPENAI_API_KEY=\"your-key-here\"\nexport COHERE_API_KEY=\"your-key-here\"\nexport TOGETHER_AI_API_KEY=\"your-key-here\"\nexport BEDROCK_AWS_REGION=\"us-east-1\"\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-30]()\n\n## Installation\n\nLangChain.js follows a modular architecture with packages organized by functionality. Understanding the package structure is essential for proper installation.\n\n### Package Architecture Overview\n\n```mermaid\ngraph TD\n    A[Application] --> B[langchain v1.0]\n    A --> C[@langchain/classic]\n    B --> D[@langchain/core]\n    C --> D\n    B --> E[@langchain/textsplitters]\n    B --> F[Provider Packages]\n    F --> G[@langchain/anthropic]\n    F --> H[@langchain/openai]\n    F --> I[@langchain/cohere]\n    F --> J[@langchain/aws]\n    F --> K[@langchain/google-common]\n```\n\n资料来源：[libs/langchain-classic/README.md:1-50]()\n\n### Core Packages\n\n#### @langchain/core\n\nThe foundational package that all other LangChain packages depend on. It provides base classes, interfaces, and utilities used across the ecosystem.\n\n```bash npm2yarn\nnpm install @langchain/core\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-15]()\n\n#### langchain (v1.0)\n\nThe main package for building agents with modern APIs, including the `createAgent` function with middleware support.\n\n```bash npm2yarn\nnpm install langchain\n```\n\n#### @langchain/textsplitters\n\nSpecialized package for splitting documents into chunks, commonly used in retrieval-augmented generation (RAG) pipelines.\n\n```bash npm2yarn\nnpm install @langchain/textsplitters @langchain/core\n```\n\n资料来源：[libs/langchain-textsplitters/README.md:1-25]()\n\n### Provider Packages\n\nLangChain.js provides official integrations for various LLM providers:\n\n| Package | Purpose | Documentation |\n|---------|---------|---------------|\n| `@langchain/anthropic` | Anthropic Claude models | [Link](https://js.langchain.com/docs/integrations/chat/anthropic) |\n| `@langchain/openai` | OpenAI GPT models | [Link](https://js.langchain.com/docs/integrations/chat/openai) |\n| `@langchain/cohere` | Cohere models | [Link](https://js.langchain.com/docs/integrations/chat/cohere) |\n| `@langchain/aws` | AWS Bedrock models | [Link](https://js.langchain.com/docs/integrations/chat/aws-bedrock) |\n| `@langchain/together-ai` | Together AI models | [Link](https://js.langchain.com/docs/integrations/chat/together) |\n\n```bash npm2yarn\nnpm install @langchain/anthropic @langchain/core\nnpm install @langchain/openai @langchain/core\nnpm install @langchain/cohere @langchain/core\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-25]()\n\n### @langchain/classic (Legacy)\n\nFor existing applications using LangChain v0.x, the `@langchain/classic` package provides backward compatibility with legacy chains and functionality.\n\n```bash npm2yarn\nnpm install @langchain/classic\n```\n\nThis package requires `@langchain/core` as a peer dependency:\n\n```bash npm2yarn\nnpm install @langchain/core\n```\n\n资料来源：[libs/langchain-classic/README.md:1-60]()\n\n## Core Dependency Management\n\nWhen using multiple LangChain packages together, ensure all packages resolve to the same instance of `@langchain/core` to avoid conflicts.\n\nAdd the following configuration to your `package.json`:\n\n```json\n{\n  \"name\": \"your-project\",\n  \"version\": \"0.0.0\",\n  \"dependencies\": {\n    \"@langchain/anthropic\": \"^0.0.9\",\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"resolutions\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"overrides\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"pnpm\": {\n    \"overrides\": {\n      \"@langchain/core\": \"^0.3.0\"\n    }\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-cohere/README.md:15-45]()\n\n## Development Setup\n\n### Building from Source\n\nTo develop with LangChain.js locally or run examples:\n\n```bash\n# Install dependencies from repository root\npnpm install\n\n# Build all packages\npnpm build\n```\n\n资料来源：[examples/src/README.md:1-20]()\n\n### Running Examples\n\nMost examples require API keys. Configure your environment by copying the example environment file:\n\n```bash\ncp .env.example .env\n```\n\nThen edit `.env` with your API keys. Run examples using the provided script:\n\n```bash\n# From the examples/ directory\npnpm run start <path to example>\n\n# Example\npnpm run start ./src/prompts/few_shot.ts\n```\n\nTo run examples with transpiled JavaScript:\n\n```bash\npnpm run start:dist <path to example>\n\n# Example\npnpm run start:dist ./dist/prompts/few_shot.js\n```\n\n资料来源：[examples/src/README.md:1-45]()\n\n### Testing\n\nTest files follow naming conventions:\n- Unit tests: `*.test.ts`\n- Integration tests: `*.int.test.ts`\n\n```bash\n# Run all tests\npnpm test\n\n# Run integration tests specifically\npnpm test:int\n```\n\nFor individual packages:\n\n```bash\n# Build a specific package\npnpm build --filter @langchain/textsplitters\n\n# Run tests for a specific package\ncd libs/langchain-textsplitters\npnpm test\n```\n\n资料来源：[libs/langchain-textsplitters/README.md:25-50]()\n\n## Quick Start Example\n\n### Basic Chat Model Usage\n\n```typescript\nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { HumanMessage } from \"@langchain/core/messages\";\n\nconst model = new ChatAnthropic({\n  model: \"claude-sonnet-4-5-20250514\",\n});\n\nconst response = await model.invoke([\n  new HumanMessage(\"Translate \"I love programming\" into French.\")\n]);\n\nconsole.log(response.content);\n// Output: J'adore la programmation.\n```\n\n资料来源：[libs/providers/langchain-anthropic/README.md:1-50]()\n\n### Document Splitting for RAG\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst text = `<!DOCTYPE html>\n<html>\n  <head>\n    <title>LangChain</title>\n  </head>\n  <body>\n    <h1>Welcome</h1>\n    <p>Building applications with LLMs</p>\n  </body>\n</html>`;\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\n\nconst output = await splitter.createDocuments([text]);\nconsole.log(output);\n```\n\n资料来源：[examples/src/langchain-classic/indexes/html_text_splitter.ts:1-30]()\n\n## Choosing the Right Package\n\n### For New Projects\n\nUse `langchain` v1.0 for new projects. It provides:\n\n- **`createAgent`**: A cleaner, more powerful way to build agents with middleware support\n- **Better performance**: Optimized for modern agent workflows\n- **Focused API surface**: Less complexity, easier to learn\n- **Active development**: New features and improvements focus on v1.0 APIs\n\n### For Legacy Projects\n\nUse `@langchain/classic` if you:\n\n- Have existing code using legacy chains (e.g., `LLMChain`, `ConversationalRetrievalQAChain`)\n- Use the indexing API\n- Depend on functionality from `@langchain/community` previously re-exported from `langchain`\n- Are maintaining an existing application and not yet ready to migrate\n\n### Package Decision Flow\n\n```mermaid\ngraph TD\n    A[New Project?] -->|Yes| B[Use langchain v1.0]\n    A -->|No| C[Maintaining Existing Code?]\n    C -->|Yes| D[Using Legacy Chains?]\n    C -->|No| E[Use langchain v1.0]\n    D -->|Yes| F[Use @langchain/classic]\n    D -->|No| E\n```\n\n资料来源：[libs/langchain-classic/README.md:40-80]()\n\n## Additional Resources\n\n| Resource | Description |\n|----------|-------------|\n| [Documentation](https://docs.langchain.com/oss/javascript/langchain/overview) | Official LangChain.js documentation |\n| [Deep Agents](https://docs.langchain.com/oss/javascript/deepagents/) | Higher-level package for common agent patterns |\n| [Release Notes](https://docs.langchain.com/oss/javascript/releases/langchain-v1) | Version-specific changelog and migration guides |\n| [API Reference](https://api.js.langchain.com/) | Generated API documentation |\n\n资料来源：[README.md:15-30]()\n\n---\n\n<a id='package-architecture'></a>\n\n## Package Architecture\n\n### 相关页面\n\n相关主题：[Introduction to LangChain.js](#introduction), [Core Abstractions](#core-abstractions)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [pnpm-workspace.yaml](https://github.com/langchain-ai/langchainjs/blob/main/pnpm-workspace.yaml)\n- [turbo.json](https://github.com/langchain-ai/langchainjs/blob/main/turbo.json)\n- [libs/langchain-core/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/package.json)\n- [libs/langchain/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/package.json)\n- [libs/langchain-classic/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/package.json)\n- [libs/langchain-mcp-adapters/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-mcp-adapters/package.json)\n- [libs/langchain-textsplitters/package.json](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/package.json)\n</details>\n\n# Package Architecture\n\n## Overview\n\nThe LangChain.js repository employs a sophisticated **monorepo architecture** built on pnpm workspaces and Turbo. This design enables modular development, shared tooling, and independent versioning across multiple packages. The monorepo structure separates concerns into distinct layers: core abstractions, main integration packages, provider-specific SDKs, and community contributions. This architectural approach allows developers to install only the dependencies they need while maintaining a coherent ecosystem where packages can reference and compose with each other seamlessly.\n\n## Monorepo Structure\n\n### Workspace Organization\n\nThe repository uses **pnpm workspaces** to manage multiple packages under a single repository. This enables efficient dependency management, shared node_modules, and unified versioning strategies across the ecosystem.\n\n```mermaid\ngraph TD\n    A[Root: langchainjs] --> B[libs/]\n    A --> C[examples/]\n    A --> D[docs/]\n    B --> E[langchain-core/]\n    B --> F[langchain/]\n    B --> G[langchain-classic/]\n    B --> H[providers/]\n    B --> I[langchain-textsplitters/]\n    B --> J[community/]\n    H --> K[langchain-openai/]\n    H --> L[langchain-anthropic/]\n    H --> M[langchain-google/]\n    H --> N[langchain-pinecone/]\n    H --> O[langchain-qdrant/]\n```\n\n### Build Pipeline with Turbo\n\nThe build system uses **Turbo** to orchestrate builds, tests, and linting across packages. Turbo's caching mechanism significantly improves build times by only rebuilding packages that have changed since the last build.\n\n```mermaid\ngraph LR\n    A[pnpm install] --> B[turbo build]\n    B --> C[langchain-core]\n    C --> D[langchain]\n    C --> E[providers]\n    C --> F[community]\n    D --> G[langchain-classic]\n    G --> H[examples]\n```\n\n## Core Packages\n\n### @langchain/core\n\nThe foundational package that contains all core abstractions, interfaces, and utilities. This package is a peer dependency for all other LangChain packages and must be kept synchronized across the ecosystem.\n\n```mermaid\ngraph TD\n    A[@langchain/core] --> B[Messages & Chat Models]\n    A --> C[Tools & Toolkits]\n    A --> D[Vector Stores]\n    A --> E[Document Loaders]\n    A --> F[Output Parsers]\n    A --> G[Retrievers]\n    A --> H[Callbacks]\n    A --> I[Memory]\n```\n\n**Key Responsibilities:**\n\n- Defines base classes for all major abstractions (BaseLanguageModel, BaseRetriever, BaseChatModel)\n- Provides TypeScript utilities and type guards for common patterns\n- Exports core message types (AIMessage, HumanMessage, SystemMessage, ToolMessage)\n- Implements hash utilities and data transformation functions\n\n### @langchain/langchain\n\nThe main package that re-exports functionality from core and community packages. This serves as a convenient entry point for users who want access to all integrations without managing individual package dependencies.\n\n### @langchain/classic\n\nContains legacy chain implementations from v0.x that have been deprecated or replaced in v1.0. This package provides migration paths for existing users while maintaining backward compatibility.\n\n| Component | Description |\n|-----------|-------------|\n| LLMChain | Basic chain for calling an LLM with a prompt template |\n| ConversationalRetrievalQAChain | Chain for conversational question-answering over documents |\n| RetrievalQAChain | Chain for question-answering over documents without conversation memory |\n| StuffDocumentsChain | Chain for stuffing documents into a prompt |\n| MapReduceDocumentsChain | Chain for map-reduce operations over documents |\n| RefineDocumentsChain | Chain for iterative refinement over documents |\n\n## Text Processing Packages\n\n### @langchain/textsplitters\n\nProvides various implementations of text splitters commonly used in retrieval-augmented generation (RAG) pipelines. Text splitters break large documents into smaller chunks for embedding and retrieval.\n\n**Supported Languages:**\n\n| Language | Use Case |\n|----------|----------|\n| html | HTML document splitting |\n| markdown | Markdown document splitting |\n| javascript | JavaScript/TypeScript code splitting |\n| python | Python code splitting |\n| text | Plain text splitting |\n\n**Example Usage:**\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\nconst output = await splitter.createDocuments([text]);\n```\n\n## Provider Integration Packages\n\nThe provider packages in `libs/providers/` contain integrations for specific third-party services. Each package follows a consistent structure and can be used independently or alongside other LangChain packages.\n\n### Package Naming Convention\n\nProvider packages follow the naming pattern `@langchain/[provider-name]`, where the provider name typically corresponds to the service being integrated. All provider packages depend on `@langchain/core` and may optionally depend on the provider's official SDK.\n\n### Provider Packages\n\n| Package | Dependency |\n|---------|------------|\n| @langchain/anthropic | @langchain/core, @anthropic-ai/sdk |\n| @langchain/openai | @langchain/core, openai |\n| @langchain/google | @langchain/core, @google-ai/generativelanguage |\n| @langchain/pinecone | @langchain/core, @pinecone-database/pinecone |\n| @langchain/qdrant | @langchain/core, qdrant-client |\n\n### Common Package Structure\n\nEach provider package follows a standardized structure:\n\n```mermaid\ngraph TD\n    A[Package Root] --> B[README.md]\n    A --> C[package.json]\n    A --> D[src/index.ts]\n    A --> E[src/chat_models/]\n    A --> F[src/output_parsers/]\n    A --> G[tests/]\n    E --> H[index.ts]\n    G --> I[*.test.ts]\n    G --> J[*.int.test.ts]\n```\n\n## Dependency Management\n\n### Core Dependency Synchronization\n\nWhen using multiple LangChain packages in a project, it is critical to ensure all packages depend on the same instance of `@langchain/core`. Package managers may resolve different versions of `@langchain/core` as separate instances, causing runtime errors.\n\n**Recommended Configuration (package.json):**\n\n```json\n{\n  \"name\": \"your-project\",\n  \"version\": \"0.0.0\",\n  \"dependencies\": {\n    \"@langchain/openai\": \"^0.0.0\",\n    \"@langchain/anthropic\": \"^0.0.0\",\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"resolutions\": {\n    \"@langchain/core\": \"^0.3.0\"\n  },\n  \"overrides\": {\n    \"@langchain/core\": \"^0.3.0\"\n  }\n}\n```\n\nDifferent package managers require specific configuration fields:\n\n| Package Manager | Field |\n|----------------|-------|\n| npm/yarn | `resolutions` |\n| pnpm | `pnpm.overrides` |\n\n## Entry Points and Exports\n\n### Export Strategy\n\nPackages can export functionality through two mechanisms: re-exports from `src/index.ts` or explicit entry points defined in the `exports` field of `package.json`. The exports field enables conditional imports and tree-shaking optimization.\n\n### Deprecation Warnings\n\nWhen importing from deprecated entry points, packages emit console warnings directing users to the new import paths. These warnings can be suppressed by setting the environment variable `LANGCHAIN_SUPPRESS_MIGRATION_WARNINGS` to `\"true\"`.\n\n```typescript\n// Deprecated warning format\nif (getEnvironmentVariable(\"LANGCHAIN_SUPPRESS_MIGRATION_WARNINGS\") !== \"true\") {\n  console.warn(warningText);\n}\n```\n\n## Development Workflow\n\n### Installing Dependencies\n\n```bash\npnpm install\n```\n\n### Building Packages\n\nIndividual packages can be built using the Turbo filter:\n\n```bash\npnpm build --filter @langchain/core\n```\n\n### Running Tests\n\n| Test Type | Command | File Pattern |\n|-----------|---------|--------------|\n| Unit Tests | `pnpm test` | `*.test.ts` |\n| Integration Tests | `pnpm test:int` | `*.int.test.ts` |\n\nTest files should be located in a `tests/` directory within the `src/` folder.\n\n### Linting and Formatting\n\n```bash\npnpm lint && pnpm format\n```\n\n## Migration Path\n\n### From langchain v0.x to v1.0\n\nThe v1.0 release introduces significant architectural changes. Legacy functionality has been moved to `@langchain/classic`, while new abstractions live in `@langchain/core` and provider-specific packages. Users upgrading from v0.x should:\n\n1. Install `@langchain/classic` for backward compatibility\n2. Install new provider packages as needed\n3. Update import statements to use new package paths\n4. Remove imports from deprecated `langchain/` subpaths\n\n```bash\nnpm install @langchain/classic\nnpm install @langchain/anthropic @langchain/core\n```\n\n## Summary\n\nThe LangChain.js package architecture reflects a commitment to modularity, performance, and developer experience. By separating concerns across well-defined packages, the ecosystem enables efficient development while maintaining interoperability. The monorepo structure, powered by pnpm workspaces and Turbo, provides the foundation for managing this complexity at scale.\n\n---\n\n<a id='core-abstractions'></a>\n\n## Core Abstractions\n\n### 相关页面\n\n相关主题：[Package Architecture](#package-architecture), [Chat Models and LLM Providers](#chat-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain-core/src/runnables/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/runnables/base.ts)\n- [libs/langchain-core/src/language_models/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/language_models/chat_models.ts)\n- [libs/langchain-core/src/language_models/llms.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/language_models/llms.ts)\n- [libs/langchain-core/src/embeddings.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/embeddings.ts)\n- [libs/langchain-core/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/vectorstores.ts)\n- [libs/langchain-core/src/callbacks/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/callbacks/base.ts)\n- [libs/langchain-core/src/prompts/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/prompts/index.ts)\n- [libs/langchain-core/src/output_parsers/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/output_parsers/index.ts)\n</details>\n\n# Core Abstractions\n\nThe **Core Abstractions** in LangChain.js form the foundational building blocks that enable interoperability between different components in LLM-powered applications. These abstractions define standardized interfaces for language models, embeddings, vector stores, prompts, output parsers, and runnable components, allowing developers to swap implementations without changing application logic.\n\n## Overview\n\nLangChain.js implements a set of abstract base classes in `@langchain/core` that establish contracts for:\n\n- **Runnable components** that can be composed into processing pipelines\n- **Language models** (chat and completion models)\n- **Embedding models** for vector representations\n- **Vector stores** for semantic search and retrieval\n- **Callback systems** for observability and event handling\n- **Prompt templates** for structured input formatting\n- **Output parsers** for structured response extraction\n\nThis abstraction layer enables loose coupling between components, making it straightforward to migrate between different model providers, vector stores, or embedding implementations while maintaining the same application code.\n\n## Runnable Architecture\n\n### BaseRunnable\n\nThe `BaseRunnable` class is the central abstraction that all runnable components extend. It provides a unified interface for invoking, batching, streaming, and composing operations.\n\n```mermaid\ngraph TD\n    A[BaseRunnable] --> B[invoke input]\n    A --> C[batch inputs]\n    A --> D[stream output]\n    A --> E[async operations]\n    \n    B --> F[Runnable Sequence]\n    B --> G[Runnable Branch]\n    B --> H[Runnable Map]\n    \n    style A fill:#e1f5fe\n    style F fill:#fff3e0\n    style G fill:#fff3e0\n    style H fill:#fff3e0\n```\n\nThe `BaseRunnable` interface defines the following core methods:\n\n| Method | Description | Signature |\n|--------|-------------|-----------|\n| `invoke` | Synchronous single input processing | `(input: Input, options?: RunnableConfig): Promise<Output>` |\n| `batch` | Process multiple inputs efficiently | `(inputs: Input[], options?: RunnableConfig): Promise<Output[]>` |\n| `stream` | Stream output chunks | `(input: Input, options?: RunnableConfig): Promise<Readable>` |\n| `pipe` | Chain runnables together | `(coerceToRunnable(other)): BaseRunnable` |\n| `withConfig` | Attach configuration | `(config: RunnableConfig): Runnable` |\n\n资料来源：[libs/langchain-core/src/runnables/base.ts]()\n\n### RunnableConfig\n\nConfiguration options passed to runnable operations:\n\n```typescript\ninterface RunnableConfig {\n  tags?: string[];\n  metadata?: Record<string, unknown>;\n  callbacks?: CallbackManager | CallbackHandler | Array<CallbackManager | CallbackHandler>;\n  maxConcurrency?: number;\n  maxTokens?: number;\n  recursionLimit?: number;\n  runName?: string;\n}\n```\n\n资料来源：[libs/langchain-core/src/runnables/base.ts]()\n\n## Language Model Abstractions\n\n### BaseChatModel\n\nThe `BaseChatModel` abstract class defines the contract for chat-oriented language models that process messages and return chat results.\n\n```mermaid\ngraph LR\n    A[HumanMessage] --> B[BaseChatModel]\n    C[SystemMessage] --> B\n    D[AIMessage] --> B\n    \n    B --> E[invoke]\n    B --> F[batch]\n    B --> G[stream]\n    \n    E --> H[BaseMessageChunk]\n    F --> I[BaseMessage[]]\n    G --> J[Generator of chunks]\n    \n    style B fill:#e8f5e9\n```\n\nKey properties and methods:\n\n| Property/Method | Type | Description |\n|-----------------|------|-------------|\n| `_llmType` | `string` (protected) | Identifier for the specific LLM type |\n| `_invocationParams` | `Record<string, unknown>` (protected) | Parameters for the API call |\n| `invoke` | `Method` | Process input and return chat result |\n| `generate` | `Method` | Generate responses with usage metadata |\n| `stream` | `Method` | Stream response chunks |\n\n资料来源：[libs/langchain-core/src/language_models/chat_models.ts]()\n\n### BaseLLM\n\nThe `BaseLLM` class provides the abstraction for traditional completion-based language models:\n\n| Property/Method | Type | Description |\n|-----------------|------|-------------|\n| `_llmType` | `string` (protected) | Identifier for the LLM type |\n| `_call` | `protected abstract method` | Core completion logic |\n| `_streamResponseChunks` | `protected method` | Optional streaming support |\n| `generate` | `Method` | Generate completions for prompts |\n\n资料来源：[libs/langchain-core/src/language_models/llms.ts]()\n\n### Message Types\n\nLangChain.js uses a structured message system:\n\n| Message Type | Description |\n|-------------|-------------|\n| `HumanMessage` | Input from the user |\n| `AIMessage` | Response from the model |\n| `SystemMessage` | System-level instructions |\n| `ToolMessage` | Response from tool execution |\n| `FunctionMessage` | Legacy function call responses |\n\n## Embedding Abstractions\n\n### BaseEmbeddings\n\nThe `BaseEmbeddings` abstract class provides a standardized interface for generating vector embeddings from text:\n\n```mermaid\ngraph TD\n    A[BaseEmbeddings] --> B[embedQuery text]\n    A --> C[embedDocuments texts]\n    \n    B --> D[float array]\n    C --> E[float array[]]\n    \n    style A fill:#fce4ec\n```\n\n| Method | Description | Parameters |\n|--------|-------------|------------|\n| `embedQuery` | Generate embedding for a single query | `document: string` |\n| `embedDocuments` | Generate embeddings for multiple documents | `documents: string[]` |\n\nBoth methods return `Promise<number[][]>` representing the embedding vectors.\n\n资料来源：[libs/langchain-core/src/embeddings.ts]()\n\n## Vector Store Abstractions\n\n### VectorStore\n\nThe `VectorStore` class provides an abstraction for storing and querying vector embeddings:\n\n```mermaid\ngraph TD\n    A[VectorStore] --> B[addDocuments]\n    A --> C[similaritySearch]\n    A --> D[similaritySearchVectorWithScore]\n    A --> E[maxMarginalRelevanceSearch]\n    \n    B --> F[Document[]]\n    C --> G[Document[] by query]\n    D --> H[Document[] with scores]\n    E --> I[Diverse results]\n    \n    style A fill:#fff8e1\n```\n\nCore methods:\n\n| Method | Description | Parameters |\n|--------|-------------|------------|\n| `addDocuments` | Add documents to the store | `documents: Document[], addOptions?` |\n| `similaritySearch` | Find similar documents | `query: string, k?, filter?` |\n| `similaritySearchVectorWithScore` | Search by embedding vector | `query: number[], k?, filter?` |\n| `maxMarginalRelevanceSearch` | MMR diversity search | `query: string, k?, fetchK?, lambda?` |\n| `delete` | Remove documents by ID | `params` |\n\n资料来源：[libs/langchain-core/src/vectorstores.ts]()\n\n### Document Structure\n\nDocuments are the fundamental unit stored in vector stores:\n\n```typescript\ninterface Document {\n  pageContent: string;  // The text content\n  metadata: Record<string, unknown>;  // Associated metadata\n}\n```\n\n## Callback System\n\n### BaseCallbackHandler\n\nThe `BaseCallbackHandler` provides a hook system for observing and logging LangChain operations:\n\n```mermaid\ngraph LR\n    A[LangChain Event] --> B[CallbackManager]\n    B --> C[onChainStart]\n    B --> D[onChainEnd]\n    B --> E[onChainError]\n    B --> F[onLLMStart]\n    B --> G[onLLMEnd]\n    B --> H[onToolStart]\n    B --> I[onToolEnd]\n    \n    style B fill:#e3f2fd\n```\n\nEvent handlers:\n\n| Event | Handler | Trigger |\n|-------|---------|---------|\n| Chain lifecycle | `onChainStart`, `onChainEnd`, `onChainError` | Chain execution |\n| LLM lifecycle | `onLLMStart`, `onLLMEnd`, `onLLMError` | Model calls |\n| Tool lifecycle | `onToolStart`, `onToolEnd`, `onToolError` | Tool execution |\n| Retriever | `onRetrieverStart`, `onRetrieverEnd` | Retrieval operations |\n| Text generation | `onText`, `onLLMNewToken` | Streaming events |\n\n资料来源：[libs/langchain-core/src/callbacks/base.ts]()\n\n## Prompt Abstractions\n\n### BasePromptTemplate\n\nThe `BasePromptTemplate` abstract class standardizes prompt creation and formatting:\n\n```mermaid\ngraph TD\n    A[BasePromptTemplate] --> B[merge]\n    A --> C[partial]\n    A --> D[invoke]\n    \n    E[Input Values] --> C\n    F[Partial Values] --> B\n    G[Runtime Input] --> D\n    \n    B --> H[PromptTemplate]\n    C --> I[PromptTemplate]\n    D --> J[Formatted String]\n    \n    style A fill:#f3e5f5\n```\n\nKey methods:\n\n| Method | Description |\n|--------|-------------|\n| `invoke` | Format prompt with input values |\n| `partial` | Create partially filled template |\n| `merge` | Combine multiple templates |\n| `save` | Serialize template to file |\n\n资料来源：[libs/langchain-core/src/prompts/index.ts]()\n\n### PromptValue\n\nPrompt values represent the formatted input to language models:\n\n```typescript\ninterface PromptValue {\n  toChatMessages(): BaseMessage[];\n  toString(): string;\n}\n```\n\n## Output Parser Abstractions\n\n### BaseOutputParser\n\nThe `BaseOutputParser` class provides a standardized interface for parsing and validating model outputs:\n\n```mermaid\ngraph TD\n    A[BaseOutputParser] --> B[parse text]\n    A --> C[parseWithPrompt]\n    A --> D[getFormatInstructions]\n    \n    E[Raw Output] --> B\n    F[Prompt + Output] --> C\n    \n    B --> G[Structured Result]\n    C --> G\n    D --> H[Format Instructions]\n    \n    style A fill:#e0f7fa\n```\n\nRequired methods:\n\n| Method | Description | Return Type |\n|--------|-------------|-------------|\n| `parse` | Parse raw output | `Promise<T>` |\n| `parseWithPrompt` | Parse with prompt context | `Promise<T>` |\n| `getFormatInstructions` | Get parsing instructions | `string` |\n\nOptional methods:\n\n| Method | Description |\n|--------|-------------|\n| `invoke` | Unified parsing interface |\n| `resultType` | TypeScript type information |\n\n资料来源：[libs/langchain-core/src/output_parsers/index.ts]()\n\n## Component Composition\n\nLangChain.js enables powerful composition through the pipe operator:\n\n```mermaid\ngraph LR\n    A[PromptTemplate] -->|\"pipe\"| B[ChatModel]\n    B -->|\"pipe\"| C[OutputParser]\n    \n    style A fill:#fff8e1\n    style B fill:#e8f5e9\n    style C fill:#e0f7fa\n```\n\nExample composition:\n\n```typescript\nconst chain = prompt.pipe(chatModel).pipe(outputParser);\nconst result = await chain.invoke({ input: \"your question\" });\n```\n\nThis composition model allows:\n\n- Sequential processing through `pipe()`\n- Parallel execution with `RunnableParallel`\n- Conditional branching with `RunnableBranch`\n- Error handling and fallbacks\n\n## Configuration and Initialization\n\nAll core abstractions accept configuration through constructor options:\n\n```typescript\ninterface BaseLanguageModelParams {\n  callbacks?: CallbackManager;\n  tags?: string[];\n  metadata?: Record<string, unknown>;\n}\n\ninterface BaseChatModel extends BaseLanguageModelParams {\n  temperature?: number;\n  topP?: number;\n  maxTokens?: number;\n  modelName?: string;\n}\n```\n\n## Extension Pattern\n\nTo implement a custom component, extend the base class and implement required abstract methods:\n\n```typescript\nimport { BaseLLM } from \"@langchain/core/language_models/llms\";\n\nclass MyCustomLLM extends BaseLLM {\n  _llmType() {\n    return \"my_custom_llm\";\n  }\n\n  async _call(prompt: string): Promise<string> {\n    // Implementation\n    return response;\n  }\n}\n```\n\n## Summary\n\nThe Core Abstractions in LangChain.js provide:\n\n| Layer | Purpose | Key Classes |\n|-------|---------|-------------|\n| **Runnable** | Unified component interface | `BaseRunnable`, `RunnableSequence` |\n| **Language Models** | Model interoperability | `BaseChatModel`, `BaseLLM` |\n| **Embeddings** | Vector generation | `BaseEmbeddings` |\n| **Vector Stores** | Semantic storage | `VectorStore` |\n| **Callbacks** | Observability | `BaseCallbackHandler` |\n| **Prompts** | Structured inputs | `BasePromptTemplate` |\n| **Output Parsers** | Structured outputs | `BaseOutputParser` |\n\nThese abstractions enable portable, testable, and composable LLM applications while maintaining flexibility to swap implementations as requirements evolve.\n\n---\n\n<a id='chat-models'></a>\n\n## Chat Models and LLM Providers\n\n### 相关页面\n\n相关主题：[Embeddings Integration](#embeddings), [Agent Framework](#agent-framework)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/providers/langchain-openai/src/chat_models/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-openai/src/chat_models/base.ts)\n- [libs/providers/langchain-anthropic/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-anthropic/src/chat_models.ts)\n- [libs/providers/langchain-google-common/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-google-common/src/chat_models.ts)\n- [libs/providers/langchain-mistralai/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-mistralai/src/chat_models.ts)\n- [libs/providers/langchain-ollama/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-ollama/src/chat_models.ts)\n- [libs/providers/langchain-deepseek/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-deepseek/src/chat_models.ts)\n- [libs/providers/langchain-groq/src/chat_models.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-groq/src/chat_models.ts)\n- [libs/providers/langchain-xai/src/chat_models/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-xai/src/chat_models/index.ts)\n</details>\n\n# Chat Models and LLM Providers\n\nLangChain.js provides a unified abstraction layer for interacting with various Large Language Model (LLM) providers through a standardized chat model interface. This system enables developers to seamlessly switch between different providers (OpenAI, Anthropic, Google, Mistral AI, Ollama, Deepseek, Groq, xAI) while maintaining consistent API patterns for invocation, streaming, and tool usage.\n\n## Architecture Overview\n\nThe chat model architecture follows a provider-specific implementation pattern where each LLM provider package contains its own chat model class that extends common base abstractions from `@langchain/core`.\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[Chat Model Interface]\n    B --> C[Provider Implementations]\n    C --> D[OpenAI ChatModels]\n    C --> E[Anthropic ChatModels]\n    C --> F[Google ChatModels]\n    C --> G[MistralAI ChatModels]\n    C --> H[Ollama ChatModels]\n    C --> I[Deepseek ChatModels]\n    C --> J[Groq ChatModels]\n    C --> K[xAI ChatModels]\n    B --> L[Tool Calling Abstraction]\n    B --> M[Streaming Abstraction]\n```\n\n## Core Message Types\n\nLangChain.js defines standardized message types that form the foundation of the chat model system. All providers return variations of these message types with provider-specific metadata.\n\n### AIMessage\n\nThe primary return type from chat model invocations, containing the model's response along with usage and response metadata.\n\n```typescript\nAIMessage {\n  \"id\": \"msg_01QDpd78JUHpRP6bRRNyzbW3\",\n  \"content\": \"Here's the translation to French:\\n\\nJ'adore la programmation.\",\n  \"response_metadata\": {\n    \"id\": \"msg_01QDpd78JUHpRP6bRRNyzbW3\",\n    \"model\": \"claude-sonnet-4-5-20250929\",\n    \"stop_reason\": \"end_turn\",\n    \"stop_sequence\": null,\n    \"usage\": {\n      \"input_tokens\": 25,\n      \"output_tokens\": 19\n    },\n    \"type\": \"message\",\n    \"role\": \"assistant\"\n  },\n  \"usage_metadata\": {\n    \"input_tokens\": 25,\n    \"output_tokens\": 19,\n    \"total_tokens\": 44\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n### AIMessageChunk\n\nUsed for streaming responses, representing incremental pieces of the complete response. Each chunk contains partial content that accumulates to form the full message.\n\n```typescript\nAIMessageChunk {\n  \"id\": \"msg_01N8MwoYxiKo9w4chE4gXUs4\",\n  \"content\": \"Here\",\n  \"additional_kwargs\": {\n    \"id\": \"msg_01N8MwoYxiKo9w4chE4gXUs4\",\n    \"type\": \"message\",\n    \"role\": \"assistant\",\n    \"model\": \"claude-sonnet-4-5-20250929\"\n  },\n  \"usage_metadata\": {\n    \"input_tokens\": 25,\n    \"output_tokens\": 1,\n    \"total_tokens\": 26\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n### Message Structure Types\n\nThe core message system defines flexible structures for tools and tool calls:\n\n| Type | Purpose | Fields |\n|------|---------|--------|\n| `MessageToolDefinition` | Defines a tool's input/output schema | `input: TInput`, `output: TOutput` |\n| `MessageToolSet` | Collection of available tools | `{ [key: string]: MessageToolDefinition }` |\n| `MessageStructure` | Base structure for messages | Extends to include tools, tool calls |\n| `$MessageToolCallBlock` | Tool call invocation | `type`, `name`, `args`, `id?` |\n\n资料来源：[libs/langchain-core/src/messages/message.ts:1-100]()\n\n## Invocation Patterns\n\n### Single Invocation\n\nThe primary method for synchronous chat completion, accepting various input formats:\n\n```typescript\nconst input = `Translate \"I love programming\" into French.`;\n\n// Models accept string, list of messages, or formatted prompt\nconst result = await llm.invoke(input);\nconsole.log(result);\n```\n\n资料来源：[libs/providers/langchain-openai/src/chat_models/index.ts:1-100]()\n\n### Streaming\n\nAll chat models support streaming for real-time response generation:\n\n```typescript\nfor await (const chunk of await llm.stream(input)) {\n  console.log(chunk);\n}\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n## Tool Calling System\n\nLangChain.js provides a unified tool calling abstraction that works across providers with provider-specific translations.\n\n### Tool Format Translation\n\nThe OpenAI provider demonstrates how tools are translated to provider-specific formats:\n\n```typescript\nfunction convertToCustomTool(tool: FunctionDef & {\n  description?: string;\n  parameters?: JsonSchema;\n}): IClient.Chat.ChatCompletionCustomTool {\n  const getFormat = () => {\n    if (!tool.format) {\n      return undefined;\n    }\n    if (tool.format.type === \"grammar\") {\n      return {\n        type: \"grammar\" as const,\n        grammar: {\n          definition: tool.format.definition,\n          syntax: tool.format.syntax,\n        },\n      };\n    }\n    if (tool.format.type === \"text\") {\n      return {\n        type: \"text\" as const,\n      };\n    }\n    return undefined;\n  };\n  return {\n    type: \"custom\",\n    custom: {\n      name: tool.name,\n      description: tool.description,\n      format: getFormat(),\n    },\n  };\n}\n```\n\n资料来源：[libs/providers/langchain-openai/src/utils/tools.ts:1-50]()\n\n### Block Translators\n\nEach provider implements block translators for converting between internal message formats and provider-specific APIs:\n\n```typescript\nBlockTranslator = {\n  translateContent: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletions(message);\n    }\n    return convertToV1FromResponses(message);\n  },\n  translateContentChunk: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletionsChunk(message);\n    }\n    return convertToV1FromResponsesChunk(message);\n  },\n};\n```\n\n资料来源：[libs/langchain-core/src/messages/block_translators/openai.ts:1-30]()\n\n## Provider Implementations\n\n### OpenAI\n\nOpenAI's chat models use the Chat Completions API with comprehensive token usage tracking:\n\n```typescript\nAIMessage {\n  \"id\": \"chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz\",\n  \"content\": \"J'adore la programmation.\",\n  \"response_metadata\": {\n    \"tokenUsage\": {\n      \"completionTokens\": 5,\n      \"promptTokens\": 28,\n      \"totalTokens\": 33\n    },\n    \"finish_reason\": \"stop\",\n    \"system_fingerprint\": \"fp_3aa7262c27\"\n  },\n  \"usage_metadata\": {\n    \"input_tokens\": 28,\n    \"output_tokens\": 5,\n    \"total_tokens\": 33\n  }\n}\n```\n\n资料来源：[libs/providers/langchain-openai/src/chat_models/index.ts:1-100]()\n\n### Anthropic\n\nAnthropic's implementation includes cache control and extended reasoning features:\n\n```typescript\n// System messages support array content for cache control\nconst systemMessage = new SystemMessage([\n  { type: \"text\", text: \"You are a helpful assistant.\" },\n  { type: \"cache_control\", ... }\n]);\n```\n\n资料来源：[libs/providers/langchain-anthropic/src/chat_models.ts:1-100]()\n\n### Ollama\n\nOllama provides local model hosting integration:\n\n```typescript\nimport { ChatOllama } from \"@langchain/ollama\";\n\nconst model = new ChatOllama({\n  model: \"llama3\", // Default value.\n});\n\nconst result = await model.invoke([\"human\", \"Hello, how are you?\"]);\n```\n\n资料来源：[libs/providers/langchain-ollama/README.md:1-50]()\n\n## Usage Metadata\n\nAll chat models track token usage consistently across providers:\n\n| Metadata Field | Description | Availability |\n|----------------|-------------|--------------|\n| `input_tokens` | Tokens in the prompt | All providers |\n| `output_tokens` | Tokens in the completion | All providers |\n| `total_tokens` | Sum of input and output | All providers |\n| `finish_reason` | Why generation stopped | OpenAI, Groq |\n| `stop_reason` | Stop sequence trigger | Anthropic |\n| `system_fingerprint` | Model version fingerprint | OpenAI |\n\n## Message Content Utilities\n\n### Standard Message Casting\n\nThe language models utility provides helpers for message handling:\n\n```typescript\nfunction castStandardMessageContent<T extends BaseMessage>(message: T) {\n  const Cls = message.constructor as Constructor<T>;\n  return new Cls({\n    ...message,\n    content: message.contentBlocks,\n    response_metadata: {\n      ...message.response_metadata,\n      output_version: \"v1\",\n    },\n  });\n}\n```\n\n资料来源：[libs/langchain-core/src/language_models/utils.ts:1-30]()\n\n## Workflow: Stream Aggregation\n\nFor applications requiring the complete response after streaming:\n\n```typescript\nimport { AIMessageChunk } from '@langchain/core/messages';\nimport { concat } from '@langchain/core/utils';\n\n// Stream and accumulate chunks\nconst stream = await llm.stream(input);\nlet fullResponse = new AIMessageChunk({ content: \"\" });\n\nfor await (const chunk of stream) {\n  fullResponse = concat(fullResponse, chunk);\n}\n```\n\n资料来源：[libs/providers/langchain-openai/src/chat_models/index.ts:1-100]()\n\n## Testing with Matchers\n\nLangChain provides custom Jest matchers for testing chat model outputs:\n\n```typescript\nimport { langchainMatchers } from '@langchain/core/testing/matchers';\n\nexpect.extend(langchainMatchers);\n\n// Common matchers\nexpect(message).toBeAIMessage();\nexpect(message).toBeHumanMessage();\nexpect(message).toHaveToolCalls([{ name: 'calculator' }]);\nexpect(response).toHaveStructuredResponse({ type: 'json' });\n```\n\n资料来源：[libs/langchain-core/src/testing/matchers.ts:1-100]()\n\n## Summary Table: Provider Capabilities\n\n| Provider | Streaming | Tool Calling | Cache Control | Base URL Config |\n|----------|-----------|--------------|---------------|-----------------|\n| OpenAI | ✓ | ✓ | Via API | ✓ |\n| Anthropic | ✓ | ✓ | ✓ | ✓ |\n| Google | ✓ | ✓ | ✗ | ✓ |\n| Mistral AI | ✓ | ✓ | ✗ | ✓ |\n| Ollama | ✓ | ✓ | ✗ | Local |\n| Deepseek | ✓ | ✓ | ✗ | ✓ |\n| Groq | ✓ | ✓ | ✗ | ✓ |\n| xAI | ✓ | ✓ | ✗ | ✓ |\n\n## Common Configuration Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `model` | `string` | Model identifier (e.g., \"gpt-4o\", \"claude-3-opus\") |\n| `temperature` | `number` | Sampling temperature (0.0 - 2.0) |\n| `maxTokens` | `number` | Maximum tokens in response |\n| `timeout` | `number` | Request timeout in milliseconds |\n| `maxRetries` | `number` | Maximum retry attempts |\n\n## See Also\n\n- [Base Chat Models Documentation](../chat_models/base.md)\n- [Tool Calling Guide](../tools/tool_calling.md)\n- [Streaming Guide](../messages/streaming.md)\n- [Message Types Reference](../messages/types.md)\n\n---\n\n<a id='embeddings'></a>\n\n## Embeddings Integration\n\n### 相关页面\n\n相关主题：[Vector Stores](#vector-stores), [Chat Models and LLM Providers](#chat-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/providers/langchain-openai/src/embeddings.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-openai/src/embeddings.ts)\n- [libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n- [libs/langchain-core/src/messages/block_translators/openai.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/messages/block_translators/openai.ts)\n- [README.md](https://github.com/langchain-ai/langchainjs/blob/main/README.md)\n- [libs/langchain-textsplitters/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-textsplitters/README.md)\n</details>\n\n# Embeddings Integration\n\n## Overview\n\nEmbeddings integration in LangChain.js provides a standardized interface for converting text into dense vector representations suitable for machine learning tasks. These embeddings enable semantic search, similarity comparison, and retrieval-augmented generation (RAG) pipelines.\n\nLangChain.js supports multiple embedding providers through a unified `Embeddings` base class, allowing developers to swap between providers like OpenAI, AWS Bedrock, Cohere, Google Gemini, and Mistral without changing application code.\n\n资料来源：[README.md:1-15]()\n\n## Architecture\n\n### Class Hierarchy\n\n```mermaid\ngraph TD\n    A[Embeddings Base Class] --> B[OpenAIEmbeddings]\n    A --> C[AWS Embeddings]\n    A --> D[Cohere Embeddings]\n    A --> E[Google GenAI Embeddings]\n    A --> F[Google VertexAI Embeddings]\n    A --> G[Fireworks Embeddings]\n    A --> H[MistralAI Embeddings]\n    \n    B --> I[Vector Store Integration]\n    C --> I\n    D --> I\n    E --> I\n    F --> I\n    G --> I\n    H --> I\n```\n\n### Data Flow\n\n```mermaid\ngraph LR\n    A[Text Input] --> B[Embeddings.embedQuery]\n    C[Document List] --> D[Embeddings.embedDocuments]\n    B --> E[Vector Representation]\n    D --> F[Batch Vector Output]\n    E --> G[Vector Store]\n    F --> G\n    G --> H[Similarity Search]\n    H --> I[Retrieved Results]\n```\n\n## OpenAI Embeddings\n\n### Class Definition\n\nThe `OpenAIEmbeddings` class extends the base `Embeddings` class and implements the `Partial<OpenAIEmbeddingsParams>` interface. It provides access to OpenAI's embedding models including `text-embedding-ada-002` and the newer `text-embedding-3` series.\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:1-50]()\n\n### Configuration Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `model` | `string` | `\"text-embedding-ada-002\"` | The embedding model to use |\n| `batchSize` | `number` | `512` | Maximum documents per batch |\n| `stripNewLines` | `boolean` | `true` | Remove newlines from input (deprecated) |\n| `dimensions` | `number` | `undefined` | Output dimensions for text-embedding-3+ |\n| `timeout` | `number` | `undefined` | Request timeout in milliseconds |\n| `organization` | `string` | `undefined` | OpenAI organization ID |\n| `encodingFormat` | `\"float\" \\| \"base64\"` | `\"float\"` | Output encoding format |\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:30-60]()\n\n### Constructor Options\n\n```typescript\nconstructor(\n  fields?: Partial<OpenAIEmbeddingsParams> & {\n    verbose?: boolean;\n    openAIApiKey?: OpenAIApiKey;\n    apiKey?: OpenAIApiKey;\n    configuration?: ClientOptions;\n  }\n)\n```\n\nThe constructor accepts API keys through `openAIApiKey` or `apiKey` properties, and allows custom client configuration for advanced use cases like proxy settings or custom endpoints.\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:50-70]()\n\n## Base Embeddings Interface\n\nThe `Embeddings` base class defines the contract that all embedding implementations must follow. It provides:\n\n- `embedQuery(text: string)`: Embed a single text string\n- `embedDocuments(texts: string[])`: Embed multiple documents in batches\n\nThe base implementation handles batching logic, ensuring that large document sets are processed efficiently within the configured `batchSize` limits.\n\n## Integration with Vector Stores\n\n### MemoryVectorStore Example\n\nThe `MemoryVectorStore` class demonstrates typical embeddings integration patterns. It stores documents and performs similarity search using embedded vectors.\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts:1-60]()\n\n### Usage Pattern\n\n```typescript\nimport { MemoryVectorStore } from 'langchain/vectorstores/memory';\nimport { OpenAIEmbeddings } from '@langchain/openai';\n\nconst embeddings = new OpenAIEmbeddings({\n  model: \"text-embedding-3-small\",\n});\n\nconst vectorStore = new MemoryVectorStore(embeddings);\n\n// Add documents\nconst documents = [\n  { pageContent: \"foo\", metadata: { baz: \"bar\" } },\n  { pageContent: \"thud\", metadata: { bar: \"baz\" } },\n];\n\nawait vectorStore.addDocuments(documents);\n\n// Search\nconst results = await vectorStore.similaritySearch(\"thud\", 1);\n```\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts:30-55]()\n\n## Provider Implementations\n\n### Available Providers\n\n| Provider | Package | Models |\n|----------|---------|--------|\n| OpenAI | `@langchain/openai` | text-embedding-ada-002, text-embedding-3-small, text-embedding-3-large |\n| AWS | `@langchain/aws` | Amazon Titan, Cohere embeddings via Bedrock |\n| Cohere | `@langchain/cohere` | embed-english-v3.0, embed-multilingual-v3.0 |\n| Google GenAI | `@langchain/google-genai` | text-embedding-004 |\n| Google VertexAI | `@langchain/google-vertexai` | textembedding-gecko |\n| Fireworks | `@langchain/fireworks` | fireworks-embedding |\n| MistralAI | `@langchain/mistralai` | mistral-embed |\n\n### Common Features\n\nAll embedding providers support:\n\n- Single text embedding via `embedQuery()`\n- Batch embedding via `embedDocuments()` with automatic batching\n- Custom dimensions for supported models\n- Configurable timeouts and API keys\n- Streaming-compatible response handling\n\n## Batch Processing\n\n### Automatic Batching\n\nThe embeddings system automatically handles batching when processing large document sets. The default batch size is 512 documents, but this can be configured per instance.\n\n```typescript\nconst embeddings = new OpenAIEmbeddings({\n  batchSize: 256, // Reduce for lower memory usage\n});\n```\n\n### Batch Size Considerations\n\n| Batch Size | Use Case |\n|------------|----------|\n| 512+ | High throughput, large documents |\n| 256 | Balanced memory/throughput |\n| 64-128 | Low memory environments |\n| 1 | Streaming or real-time processing |\n\n## Output Formats\n\n### Float Arrays\n\nThe default output format returns embeddings as float32 arrays:\n\n```typescript\nconst embedding = await embeddings.embedQuery(\"Hello world\");\n// Returns: number[]\n```\n\n### Base64 Encoding\n\nFor reduced payload sizes, use base64 encoding:\n\n```typescript\nconst embeddings = new OpenAIEmbeddings({\n  encodingFormat: \"base64\",\n});\n```\n\n资料来源：[libs/providers/langchain-openai/src/embeddings.ts:45]()\n\n## Error Handling\n\n### Common Errors\n\n| Error | Cause | Resolution |\n|-------|-------|------------|\n| `AuthenticationError` | Invalid API key | Verify API key configuration |\n| `RateLimitError` | Too many requests | Implement retry with backoff |\n| `TimeoutError` | Request exceeded timeout | Increase timeout or reduce batch size |\n| `InvalidRequestError` | Invalid parameters | Check model name, dimensions, etc. |\n\n## Best Practices\n\n### Performance Optimization\n\n1. **Batch Operations**: Always use `embedDocuments()` for multiple texts instead of calling `embedQuery()` in a loop\n2. **Dimension Selection**: Use lower dimensions (256/512) when full precision isn't required\n3. **Connection Pooling**: Reuse embedding instances across requests\n\n### Security Considerations\n\n1. Store API keys in environment variables, never in source code\n2. Use `.env` files with appropriate access controls\n3. Set up API key restrictions in provider dashboards\n\n### Production Deployment\n\n```typescript\n// Recommended production configuration\nconst embeddings = new OpenAIEmbeddings({\n  model: \"text-embedding-3-small\",\n  batchSize: 512,\n  dimensions: 1536,\n  timeout: 60000,\n});\n```\n\n## Related Components\n\n### Text Splitters\n\nBefore embedding documents, use text splitters to break large texts into manageable chunks:\n\n```typescript\nimport { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n\nconst splitter = RecursiveCharacterTextSplitter.fromLanguage(\"html\", {\n  chunkSize: 175,\n  chunkOverlap: 20,\n});\n\nconst docs = await splitter.createDocuments([htmlContent]);\nconst embeddings = await embeddings.embedDocuments(docs.map(d => d.pageContent));\n```\n\n资料来源：[examples/src/langchain-classic/indexes/html_text_splitter.ts:1-25]()\n资料来源：[libs/langchain-textsplitters/README.md:1-30]()\n\n### Message Translation\n\nLangChain.js includes block translators for handling OpenAI message formats when working with embeddings in chat contexts:\n\n```typescript\nBlockTranslator = {\n  translateContent: (message) => {\n    if (typeof message.content === \"string\") {\n      return convertToV1FromChatCompletions(message);\n    }\n    return convertToV1FromResponses(message);\n  },\n};\n```\n\n资料来源：[libs/langchain-core/src/messages/block_translators/openai.ts:1-20]()\n\n## API Reference Summary\n\n### OpenAIEmbeddings Constructor\n\n```typescript\nnew OpenAIEmbeddings(fields?: Partial<OpenAIEmbeddingsParams>)\n```\n\n### Methods\n\n| Method | Parameters | Returns | Description |\n|--------|------------|---------|-------------|\n| `embedQuery` | `text: string` | `Promise<number[]>` | Embed single text |\n| `embedDocuments` | `texts: string[]` | `Promise<number[][]>` | Embed document batch |\n\n### Environment Variables\n\n| Variable | Description |\n|----------|-------------|\n| `OPENAI_API_KEY` | OpenAI API authentication key |\n\n---\n\n<a id='agent-framework'></a>\n\n## Agent Framework\n\n### 相关页面\n\n相关主题：[Tools and Toolkits](#tools-toolkits), [Chat Models and LLM Providers](#chat-models)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain/src/agents/runtime.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/runtime.ts)\n- [libs/langchain/src/agents/stream.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/stream.ts)\n- [libs/langchain/src/agents/middleware.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware.ts)\n- [libs/langchain/src/agents/middleware/hitl.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n- [libs/langchain/src/agents/nodes/types.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/nodes/types.ts)\n- [libs/langchain-classic/src/agents/react/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/react/index.ts)\n- [libs/langchain-classic/src/agents/openai_functions/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/openai_functions/index.ts)\n- [libs/langchain-classic/src/agents/openai_tools/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/openai_tools/index.ts)\n- [libs/langchain-classic/src/agents/structured_chat/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/structured_chat/index.ts)\n- [libs/langchain-classic/src/agents/format_scratchpad/xml.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/format_scratchpad/xml.ts)\n</details>\n\n# Agent Framework\n\nThe LangChain.js Agent Framework provides a comprehensive, modular system for building autonomous and semi-autonomous agents that can reason about tasks, utilize tools, and interact with external environments. The framework is designed around the concept of agents as stateful, middleware-extensible runtime systems that coordinate between large language models (LLMs), tools, and human interventions.\n\n## Architecture Overview\n\nThe Agent Framework follows a layered architecture that separates concerns between agent definition, runtime execution, and middleware extension. At the core, agents are implemented as state machines that manage a series of steps, each potentially involving LLM calls, tool executions, and middleware hooks.\n\nThe architecture is built on several foundational components that work together to create a flexible agent system. The runtime layer manages the execution context, state transitions, and tool invocations, while the middleware layer provides hooks for observability, intervention, and customization at key points in the agent lifecycle.\n\nThe agent types themselves are modular, with different implementations optimized for various use cases—from classic ReAct-style reasoning agents to modern function-calling agents that leverage structured output capabilities from providers like OpenAI and Anthropic.\n\n## Core Components\n\n### Runtime System\n\nThe runtime system is the execution engine that orchestrates agent behavior. It maintains the agent state, manages step-by-step execution, handles tool calls, and integrates middleware for extensibility. The runtime provides a clean interface for starting, pausing, resuming, and terminating agent executions.\n\nThe runtime is responsible for managing the agent's internal memory and context across multiple steps, ensuring that observations from previous tool executions are properly passed back to the LLM for the next reasoning cycle. This cyclical pattern of reasoning, action, and observation forms the fundamental loop of agent execution.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Runtime Engine]\n    B --> C[LLM Reasoning]\n    C --> D{Tool Call?}\n    D -->|Yes| E[Execute Tool]\n    E --> F[Get Observation]\n    F --> B\n    D -->|No| G[Final Response]\n    B --> H[Middleware Hooks]\n    H --> I[Logging/Observability]\n    H --> J[Human-in-the-Loop]\n    H --> K[Custom Logic]\n```\n\n### Action Interface\n\nActions represent the fundamental unit of work that an agent can perform. The `Action` interface defines what an agent can request, including the action name and associated arguments:\n\n```typescript\nexport interface Action {\n  /**\n   * The type or name of action being requested (e.g., \"add_numbers\").\n   */\n  name: string;\n  /**\n   * Key-value pairs of arguments needed for the action (e.g., {\"a\": 1, \"b\": 2}).\n   */\n  args: Record<string, any>;\n}\n```\n\nThis abstraction allows the framework to treat all agent actions uniformly, whether they represent tool invocations, control flow operations, or communication with external systems. The `args` field uses a flexible Record structure to accommodate any argument types required by different tools.\n\n### Agent Nodes\n\nThe agent node system provides a structured way to define the behavior and capabilities of agents. Node types define the available operations, their parameters, and how they should be executed within the agent's runtime context.\n\nThe node type definitions establish a contract for what actions an agent can perform, ensuring type safety and providing documentation for available capabilities. Each node type can define its own schema for arguments, return types, and execution behavior.\n\n## Middleware System\n\nMiddleware in the Agent Framework operates as a plugin system that intercepts and can modify agent behavior at defined points in the execution lifecycle. This pattern enables cross-cutting concerns like logging, monitoring, security, and human oversight without coupling these concerns to the core agent logic.\n\n### Middleware Architecture\n\nThe middleware system follows a chain-of-responsibility pattern where each middleware component can inspect, modify, or terminate agent operations. Middleware is applied at key lifecycle points including before and after LLM calls, before and after tool executions, and at state transitions.\n\nMiddleware components receive context about the current operation, including the agent's state, the action being performed, and any intermediate results. This rich context enables sophisticated decision-making within middleware handlers.\n\n### Human-in-the-Loop (HITL) Middleware\n\nThe HITL middleware provides a mechanism for human oversight and intervention in agent execution. This is particularly valuable for production systems where agent actions need validation before execution, or where humans should have the ability to redirect agent behavior.\n\nThe HITL middleware is configured through the `InterruptOnConfig` interface:\n\n```typescript\nexport const InterruptOnConfigSchema = z.object({\n  /**\n   * When true, interrupts execution before the action runs.\n   * When false, interrupts after the action runs.\n   */\n  interruptBefore: z.boolean().optional(),\n  /**\n   * When true, waits for human approval before continuing.\n   * When false, triggers the interrupt but allows automatic continuation.\n   */\n  waitForApproval: z.boolean().optional(),\n  /**\n   * List of action names that should trigger interrupts.\n   * If undefined, all actions trigger interrupts.\n   */\n  allowedDecisions: z.array(z.string()).optional(),\n  /**\n   * Dynamic callable description\n   */\n  description: z.union([z.string(), DescriptionFunctionSchema]).optional(),\n  /**\n   * JSON schema for the arguments associated with the action, if edits are allowed.\n   */\n  argsSchema: z.record(z.any()).optional(),\n});\n```\n\nThe configuration supports several operation modes. When `interruptBefore` is true, execution pauses before the action runs, allowing review of the planned action. When false, the action executes first and then interrupts, enabling review of both the action and its results. The `allowedDecisions` array restricts which actions trigger interrupts, allowing fine-grained control over when human oversight is required.\n\nThe `description` field accepts either a static string or a dynamic function that generates descriptions at runtime. This is useful for providing contextual information about actions:\n\n```typescript\nconst formatToolDescription: DescriptionFactory = (\n  toolCall: ToolCall,\n  state: AgentBuiltInState,\n  runtime: Runtime<unknown>\n) => {\n  return `Tool: ${toolCall.name}\\nArguments:\\n${JSON.stringify(toolCall.args, null, 2)}`;\n};\n\nconst config: InterruptOnConfig = {\n  allowedDecisions: [\"approve\", \"edit\"],\n  description: formatToolDescription\n};\n```\n\n## Agent Types\n\nThe Agent Framework supports multiple agent implementations, each optimized for different reasoning patterns and use cases.\n\n### ReAct Agent\n\nThe ReAct (Reasoning + Acting) agent implements the classic ReAct pattern where the agent alternates between reasoning steps and action execution. This agent type is particularly effective for tasks requiring multi-step reasoning with tool usage.\n\nThe ReAct agent formats its reasoning in a structured scratchpad that combines observations, thoughts, and actions. The XML formatter provides a standardized way to represent this reasoning trace:\n\n```typescript\nexport function formatXml(intermediateSteps: AgentStep[]) {\n  let log = \"\";\n  for (const step of intermediateSteps) {\n    const { action, observation } = step;\n    log += `<tool>${action.tool}</tool><tool_input>${action.toolInput}\\n</tool_input><observation>${observation}</observation>`;\n  }\n  return log;\n}\n```\n\nThis XML format enables the LLM to easily parse and reason about previous actions and their outcomes, maintaining a clear trace of the agent's reasoning chain.\n\n### OpenAI Functions Agent\n\nThe OpenAI Functions agent leverages OpenAI's function calling capability to constrain agent outputs to a specific set of tool definitions. This approach provides more reliable tool selection compared to open-ended generation, as the LLM must choose from the provided function schemas.\n\nThe agent passes tool definitions directly to the OpenAI API, which returns structured function calls. This eliminates the need for parsing unstructured text and reduces the likelihood of malformed tool invocations.\n\n### OpenAI Tools Agent\n\nThe OpenAI Tools agent is an evolution of the functions agent that uses a more flexible tool definition format. It supports both the traditional function calling interface and newer tool-use patterns, providing compatibility across different OpenAI model versions.\n\nThis agent type is recommended for new projects using OpenAI models, as it supports the latest tool-use capabilities and provides better compatibility with models that have tool-use fine-tuning.\n\n### Structured Chat Agent\n\nThe Structured Chat agent uses structured output parsing to extract tool calls from LLM responses. This approach works with any LLM that supports structured output, not just providers with native function calling support.\n\nThe agent defines a JSON schema for valid actions and uses parsing techniques to extract tool calls from the text response. This provides flexibility in model choice while maintaining reliable tool usage.\n\n## Event System\n\nThe agent framework includes a comprehensive event system for monitoring and debugging agent execution. Events are emitted at key points in the agent lifecycle, allowing external systems to observe and respond to agent behavior.\n\n| Event Type | Description | Common Use Cases |\n|------------|-------------|------------------|\n| `content-block-delta` | Emitted during streaming when content blocks are being updated | Real-time UI updates, partial result handling |\n| `content-block-finish` | Emitted when a content block is complete | Result aggregation, final processing |\n| `usage` | Emitted when usage information is updated | Cost tracking, monitoring, rate limiting |\n| `provider` | Passthrough for provider-specific events | Integration with provider SDKs, custom logging |\n\nThe event system is designed to work seamlessly with streaming responses, where content may arrive incrementally. Events include positional indices and delta information that enable proper reconstruction of complete responses.\n\n## Streaming Support\n\nAgents support streaming responses through the runtime's streaming capabilities. Streaming is particularly valuable for long-running agent tasks where users benefit from seeing partial results as they become available.\n\nThe streaming implementation works by emitting events as the agent produces output. These events can be consumed incrementally, allowing applications to display results in real-time without waiting for complete agent execution.\n\n## Configuration and Integration\n\n### Tool Configuration\n\nTools are passed to agents at initialization and define the actions available to the agent. Each tool has a name, description, and schema that describes its arguments. The schema is typically defined using Zod for runtime type validation.\n\n```typescript\nconst getWeather = tool(\n  async (input) => `Weather in ${input.location}`,\n  {\n    name: \"get_weather\",\n    description: \"Get weather for a location\",\n    schema: z.object({ location: z.string() }),\n  }\n);\n```\n\n### Provider Configuration\n\nAgents can be configured with any LLM provider that LangChain.js supports. The provider configuration includes API keys, model selection, and provider-specific parameters:\n\n```typescript\nimport { ChatAnthropicMessages } from \"@langchain/langchain-anthropic\";\nimport { ChatOpenAI } from \"@langchain/langchain-openai\";\n\nconst anthropicAgent = createAgent({\n  llm: new ChatAnthropicMessages({ model: \"claude-sonnet-4-5-20250929\" }),\n  tools: [getWeather, searchTool],\n});\n\nconst openaiAgent = createAgent({\n  llm: new ChatOpenAI({ model: \"gpt-4\" }),\n  tools: [getWeather, searchTool],\n});\n```\n\n## Migration from Legacy Agents\n\nThe `@langchain/classic` package provides backward compatibility for legacy agent types that were part of LangChain v0.x. These include `LLMChain`, `ConversationalRetrievalQAChain`, and `RetrievalQAChain`.\n\nFor new projects, the recommended approach is to use the new `createAgent` API from the main `langchain` package. This provides better performance, cleaner abstractions, and access to middleware capabilities that are not available in the legacy agents.\n\nThe legacy agents will continue to be supported but will not receive new features. Applications should plan to migrate to the new API over time, taking advantage of improvements in the agent framework.\n\n## Best Practices\n\nWhen building agents with the LangChain.js Agent Framework, several practices help ensure reliable and maintainable implementations. Tool definitions should include clear, descriptive names and comprehensive descriptions that help the LLM understand when and how to use each tool. Argument schemas should validate inputs and provide clear error messages when invalid arguments are passed.\n\nMiddleware should be used judiciously to avoid performance impacts. For high-volume production systems, consider which middleware operations can be performed asynchronously or in batch mode. Human-in-the-loop configurations should be tested thoroughly to ensure the interrupt flow matches expected user interactions.\n\nAgent state should be monitored and logged appropriately. The event system provides hooks for integrating with observability platforms, enabling debugging and performance analysis of agent behavior in production environments.\n\n---\n\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:35-52](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:56-69](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain/src/agents/middleware/hitl.ts)\n资料来源：[libs/langchain-classic/src/agents/format_scratchpad/xml.ts:3-12](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/format_scratchpad/xml.ts)\n资料来源：[libs/langchain-classic/README.md:1-45](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/README.md)\n资料来源：[libs/langchain-core/src/language_models/event.ts:1-60](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-core/src/language_models/event.ts)\n\n---\n\n<a id='tools-toolkits'></a>\n\n## Tools and Toolkits\n\n### 相关页面\n\n相关主题：[Agent Framework](#agent-framework), [Memory Systems](#memory-system)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/langchain-classic/src/tools/base.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/base.ts)\n- [libs/langchain-classic/src/tools/dynamic.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/dynamic.ts)\n- [libs/langchain-classic/src/tools/sql.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/sql.ts)\n- [libs/langchain-classic/src/tools/webbrowser.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/tools/webbrowser.ts)\n- [libs/langchain-classic/src/agents/toolkits/sql/sql.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/toolkits/sql/sql.ts)\n- [libs/langchain-classic/src/agents/toolkits/openapi/openapi.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/toolkits/openapi/openapi.ts)\n- [libs/langchain-classic/src/agents/toolkits/vectorstore/vectorstore.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/agents/toolkits/vectorstore/vectorstore.ts)\n- [libs/providers/langchain-openai/src/tools/index.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-openai/src/tools/index.ts)\n</details>\n\n# Tools and Toolkits\n\n## Overview\n\nIn LangChain.js, **Tools** and **Toolkits** form the fundamental building blocks that enable Large Language Models (LLMs) to interact with external systems, APIs, databases, and web resources. Tools provide a standardized interface for defining callable functions that models can invoke during their reasoning process, while Toolkits bundle collections of related tools into cohesive units optimized for specific use cases.\n\nThe architecture follows a clear separation between individual tool implementations and composed toolkit aggregations, allowing developers to use built-in tools, create custom tools, or assemble pre-built toolkits for common workflows.\n\n## Core Concepts\n\n### What Are Tools?\n\nTools in LangChain.js are essentially callable functions with structured metadata that enables LLMs to understand when and how to use them. Each tool consists of:\n\n| Component | Description |\n|-----------|-------------|\n| **Name** | Unique identifier for the tool |\n| **Description** | Human-readable explanation of tool purpose |\n| **Input Schema** | Zod schema defining acceptable parameters |\n| **Function** | Actual implementation that executes the tool logic |\n\n### What Are Toolkits?\n\nToolkits are pre-configured collections of related tools designed to work together for specific domains or workflows. They provide:\n\n- Optimized tool combinations for common tasks\n- Shared initialization logic and configuration\n- Coordinated error handling and retry strategies\n- Consistent authentication patterns where applicable\n\n## Tool Architecture\n\n### Base Tool Implementation\n\nThe foundational tool abstraction in LangChain.js is defined in `libs/langchain-classic/src/tools/base.ts`. All custom and built-in tools inherit from or conform to this interface.\n\n```mermaid\ngraph TD\n    A[BaseTool Abstract Class] --> B[StructuredTool]\n    A --> C[Tool Interface]\n    B --> D[Custom Tool Implementations]\n    C --> E[Dynamic Tool]\n    D --> F[SQL Tool]\n    D --> G[Web Browser Tool]\n```\n\n### Tool Creation Patterns\n\nLangChain.js supports multiple patterns for creating tools:\n\n#### 1. Function-Based Tool Definition\n\nThe `tool()` function from `@langchain/core/tools` provides a concise API for defining tools:\n\n```typescript\nimport { tool } from \"@langchain/core/tools\";\nimport { z } from \"zod\";\n\nconst getWeather = tool(\n  async (input) => `Weather in ${input.location}`,\n  {\n    name: \"get_weather\",\n    description: \"Get weather for a location\",\n    schema: z.object({ location: z.string() }),\n  }\n);\n```\n\n#### 2. Dynamic Tool Creation\n\nDynamic tools allow runtime tool creation, useful for scenarios where tool definitions are not known at compile time. The implementation in `libs/langchain-classic/src/tools/dynamic.ts` provides the `DynamicTool` class.\n\n资料来源：[libs/langchain-classic/src/tools/dynamic.ts:1-50]()\n\n#### 3. Tool with Extra Parameters\n\nTools can include additional configuration through the `extras` field:\n\n```typescript\nconst deferredTool = tool(\n  async (input) => `Result: ${input.query}`,\n  {\n    name: \"deferred_search\",\n    description: \"Search tool with deferred loading\",\n    schema: z.object({ query: z.string() }),\n    extras: { defer_loading: true },\n  }\n);\n```\n\n资料来源：[libs/providers/langchain-openai/src/tools/index.ts:1-30]()\n\n## Built-in Tools\n\n### SQL Tool\n\nThe SQL Tool enables LLM agents to query relational databases using natural language. It accepts raw SQL queries and returns structured results.\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `db` | Database connection | Database instance to query |\n| `customQueryFunction` | Function | Optional custom query handler |\n| `ignoreInsertOnly` | boolean | Skip INSERT operations |\n\n资料来源：[libs/langchain-classic/src/tools/sql.ts:1-80]()\n\n### Web Browser Tool\n\nThe Web Browser Tool allows agents to navigate websites, extract content, and interact with web resources. It simulates browser operations including page navigation, content extraction, and element interaction.\n\n```mermaid\ngraph LR\n    A[User Query] --> B[WebBrowserTool]\n    B --> C{Action Type}\n    C -->|navigate| D[Load URL]\n    C -->|extract| E[Parse Content]\n    C -->|interact| F[Click/Type]\n    D --> G[HTML Response]\n    E --> H[Extracted Data]\n    F --> I[Interaction Result]\n```\n\n资料来源：[libs/langchain-classic/src/tools/webbrowser.ts:1-100]()\n\n## Toolkits\n\n### SQL Toolkit\n\nThe SQL Toolkit bundles tools for database operations, including query execution, table inspection, and schema understanding. Located at `libs/langchain-classic/src/agents/toolkits/sql/sql.ts`, it provides:\n\n| Tool | Purpose |\n|------|---------|\n| `info` | Describe table schema |\n| `tables` | List available tables |\n| `execute` | Run SQL queries |\n| `query_checker` | Validate SQL syntax |\n\n资料来源：[libs/langchain-classic/src/agents/toolkits/sql/sql.ts:1-150]()\n\n### OpenAPI Toolkit\n\nThe OpenAPI Toolkit enables agents to interact with REST APIs defined in OpenAPI specifications. It parses OpenAPI documents and generates corresponding tools.\n\n```mermaid\ngraph TD\n    A[OpenAPI Spec] --> B[OpenAPIToolkit]\n    B --> C[HTTP GET Tool]\n    B --> D[HTTP POST Tool]\n    B --> E[HTTP PUT Tool]\n    B --> F[HTTP DELETE Tool]\n    C --> G[API Response]\n    D --> G\n    E --> G\n    F --> G\n```\n\n资料来源：[libs/langchain-classic/src/agents/toolkits/openapi/openapi.ts:1-120]()\n\n### Vector Store Toolkit\n\nThe Vector Store Toolkit provides tools for similarity search and document retrieval from vector databases. This is commonly used in Retrieval Augmented Generation (RAG) pipelines.\n\n| Tool | Function |\n|------|----------|\n| ` similarity_search` | Find similar documents |\n| ` similarity_search_with_score` | Search with relevance scores |\n| ` similarity_search_by_vector` | Search using embedding vectors |\n\n资料来源：[libs/langchain-classic/src/agents/toolkits/vectorstore/vectorstore.ts:1-100]()\n\n## Integration with Agents\n\nTools and Toolkits are primarily consumed by LangChain agents. The agent runtime coordinates tool selection, execution, and result handling.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Agent]\n    B --> C[LLM Reasoning]\n    C --> D{Tool Call?}\n    D -->|Yes| E[Select Tool]\n    E --> F[Execute Tool]\n    F --> G[Return Result]\n    G --> C\n    D -->|No| H[Final Response]\n```\n\n### Tool Calling Configuration\n\nAgents can be configured to use tools with various parameters:\n\n```typescript\nconst config: InterruptOnConfig = {\n  allowedDecisions: [\"approve\", \"edit\"],\n  description: formatToolDescription\n};\n```\n\n资料来源：[libs/langchain/src/agents/middleware/hitl.ts:1-50]()\n\n## Best Practices\n\n### Tool Design Guidelines\n\n1. **Clear Descriptions**: Write unambiguous tool descriptions that help the LLM understand when to invoke the tool\n2. **Strict Schemas**: Use Zod schemas to validate inputs and prevent errors\n3. **Error Handling**: Return meaningful error messages that guide the agent toward recovery\n4. **Idempotency**: Design tools to be safely re-callable when needed\n5. **Resource Management**: Properly dispose of connections and resources after tool execution\n\n### Toolkit Organization\n\n| Consideration | Recommendation |\n|---------------|----------------|\n| Scope | Bundle tools that share authentication or context |\n| Cohesion | Tools in a toolkit should serve a common goal |\n| Size | Avoid overly large toolkits; prefer composition |\n| Documentation | Document tool dependencies and initialization |\n\n## Conclusion\n\nTools and Toolkits in LangChain.js provide a powerful abstraction for extending LLM capabilities beyond text generation. By leveraging the standardized tool interface, developers can create reusable components that integrate seamlessly with agent workflows, enabling sophisticated AI applications that interact with databases, APIs, web resources, and more.\n\n---\n\n<a id='vector-stores'></a>\n\n## Vector Stores\n\n### 相关页面\n\n相关主题：[Embeddings Integration](#embeddings)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [libs/providers/langchain-pinecone/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pinecone/src/vectorstores.ts)\n- [libs/providers/langchain-qdrant/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/src/vectorstores.ts)\n- [libs/providers/langchain-weaviate/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-weaviate/src/vectorstores.ts)\n- [libs/providers/langchain-redis/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-redis/src/vectorstores.ts)\n- [libs/providers/langchain-mongodb/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-mongodb/src/vectorstores.ts)\n- [libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts)\n- [libs/providers/langchain-pgvector/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pgvector/src/vectorstores.ts)\n- [libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n</details>\n\n# Vector Stores\n\nVector stores are a core component in LangChain.js that enable efficient similarity search over embedded data. They serve as the persistence layer for vector embeddings in retrieval-augmented generation (RAG) pipelines, allowing developers to store, index, and query high-dimensional vectors representing documents, text chunks, or other data.\n\n## Overview\n\nLangChain.js provides a unified interface for interacting with multiple vector database backends through the `@langchain/core` package. This abstraction enables developers to switch between different vector store implementations without changing their application code.\n\n```mermaid\ngraph TD\n    A[Documents] --> B[Text Embedding Models]\n    B --> C[Vector Embeddings]\n    C --> D[Vector Store]\n    E[Query] --> F[Embed Query]\n    F --> G[Similarity Search]\n    G --> H[Retrieved Documents]\n    \n    D --> I[Pinecone]\n    D --> J[Qdrant]\n    D --> K[Weaviate]\n    D --> L[Redis]\n    D --> M[MongoDB]\n    D --> N[Neo4j]\n    D --> O[PGVector]\n    D --> P[Memory]\n```\n\n## Supported Vector Stores\n\nLangChain.js integrates with the following vector database providers:\n\n| Provider | Package | Key Features |\n|----------|---------|--------------|\n| Pinecone | `@langchain/pinecone` | Managed cloud service, serverless indexes |\n| Qdrant | `@langchain/qdrant` | Open-source, hybrid filtering |\n| Weaviate | `@langchain/weaviate` | GraphQL API, modular architecture |\n| Redis | `@langchain/redis` | In-memory, pub/sub capabilities |\n| MongoDB | `@langchain/mongodb` | Document-based, Atlas vector search |\n| Neo4j | `@langchain/neo4j` | Graph database, knowledge graphs |\n| PGVector | `@langchain/pgvector` | PostgreSQL extension, SQL compatibility |\n| Memory | `@langchain/classic` | In-memory, no external dependencies |\n\n资料来源：[libs/providers/langchain-pinecone/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pinecone/README.md)\n资料来源：[libs/providers/langchain-qdrant/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/README.md)\n\n## Core Interface\n\nAll vector stores in LangChain.js implement a common interface defined in `@langchain/core/vectorstores`. This interface ensures consistency across different implementations while allowing provider-specific features.\n\n### Key Methods\n\n| Method | Description |\n|--------|-------------|\n| `addDocuments()` | Add documents with embeddings to the store |\n| `similaritySearch()` | Find similar documents using cosine similarity |\n| `similaritySearchVectorWithScore()` | Search with relevance scores |\n| `delete()` | Remove documents by IDs |\n| `fromTexts()` | Static factory for text-based initialization |\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n\n## Usage Patterns\n\n### Basic Similarity Search\n\nThe following example demonstrates the typical workflow for performing similarity search using the `MemoryVectorStore`:\n\n```typescript\nimport { MemoryVectorStore } from 'langchain/vectorstores/memory';\nimport { OpenAIEmbeddings } from '@langchain/openai';\n\nconst embeddings = new OpenAIEmbeddings({\n  model: \"text-embedding-3-small\",\n});\n\nconst vectorStore = new MemoryVectorStore(embeddings);\n\n// Add documents\nconst document1 = { pageContent: \"foo\", metadata: { baz: \"bar\" } };\nconst document2 = { pageContent: \"thud\", metadata: { bar: \"baz\" } };\nconst documents = [document1, document2];\n\nawait vectorStore.addDocuments(documents);\n\n// Perform similarity search\nconst results = await vectorStore.similaritySearch(\"thud\", 1);\n\nfor (const doc of results) {\n  console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n}\n```\n\n资料来源：[libs/langchain-classic/src/vectorstores/memory.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-classic/src/vectorstores/memory.ts)\n\n### Installation\n\nEach vector store package has specific installation requirements:\n\n#### Pinecone\n\n```bash\nnpm install @langchain/pinecone @langchain/core @pinecone-database/pinecone\n```\n\n资料来源：[libs/providers/langchain-pinecone/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-pinecone/README.md)\n\n#### Qdrant\n\n```bash\nnpm install @langchain/qdrant\n```\n\n资料来源：[libs/providers/langchain-qdrant/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-qdrant/README.md)\n\n#### PGVector\n\nThe PostgreSQL vector extension requires a running PostgreSQL instance with `pgvector` installed:\n\n```bash\nnpm install @langchain/pgvector pg\n```\n\n## Architecture\n\n### Document Model\n\nLangChain.js uses a standardized `Document` interface for representing content:\n\n```typescript\ninterface Document {\n  pageContent: string;    // The text content\n  metadata: Record<string, any>;  // Associated metadata\n}\n```\n\nDocuments are processed through embedding models to generate vector representations before storage.\n\n### Embedding Integration\n\nVector stores work in conjunction with embedding models from `@langchain/core` or provider-specific embedding implementations. The embedding model is passed during vector store initialization and is used to:\n\n1. Generate vectors for incoming documents during `addDocuments()`\n2. Generate vectors for query strings during search operations\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant VS as Vector Store\n    participant EM as Embedding Model\n    participant DB as Database\n    \n    App->>VS: addDocuments(documents)\n    VS->>EM: embedDocuments(documents)\n    EM-->>VS: embeddings\n    VS->>DB: store(embeddings + docs)\n    \n    App->>VS: similaritySearch(query)\n    VS->>EM: embedQuery(query)\n    EM-->>VS: queryVector\n    VS->>DB: findSimilar(queryVector)\n    DB-->>VS: results\n    VS-->>App: documents\n```\n\n## Provider-Specific Implementations\n\n### Neo4j Vector Store\n\nThe Neo4j integration leverages graph database capabilities for vector storage, enabling hybrid queries that combine vector similarity with graph traversal:\n\n```typescript\nimport { Neo4jVectorStore } from '@langchain/neo4j';\n```\n\n资料来源：[libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-neo4j/src/vectorstores/neo4j_vector.ts)\n\n### Weaviate\n\nWeaviate provides hybrid search capabilities combining vector similarity with keyword matching:\n\n```typescript\nimport { WeaviateVectorStore } from '@langchain/weaviate';\n```\n\n资料来源：[libs/providers/langchain-weaviate/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-weaviate/src/vectorstores.ts)\n\n### Redis\n\nRedis vector store offers high-performance in-memory vector operations with built-in pub/sub capabilities:\n\n```typescript\nimport { RedisVectorStore } from '@langchain/redis';\n```\n\n资料来源：[libs/providers/langchain-redis/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-redis/src/vectorstores.ts)\n\n### MongoDB Atlas\n\nMongoDB Atlas provides vector search through its Atlas Search feature, integrated via the official MongoDB driver:\n\n```typescript\nimport { MongoDBAtlasVectorSearch } from '@langchain/mongodb';\n```\n\n资料来源：[libs/providers/langchain-mongodb/src/vectorstores.ts](https://github.com/langchain-ai/langchainjs/blob/main/libs/providers/langchain-mongodb/src/vectorstores.ts)\n\n## Development Guidelines\n\n### Adding New Vector Stores\n\nLangChain.js provides a template for creating new vector store integrations. See the official integration template for the required structure:\n\n资料来源：[libs/create-langchain-integration/template/README.md](https://github.com/langchain-ai/langchainjs/blob/main/libs/create-langchain-integration/template/README.md)\n\n### Package Structure\n\nEach vector store package should follow the LangChain.js monorepo conventions:\n\n| Directory/File | Purpose |\n|---------------|---------|\n| `src/index.ts` | Main exports |\n| `src/vectorstores.ts` | Vector store implementation |\n| `tests/` | Unit and integration tests |\n| `README.md` | Documentation and usage examples |\n\n### Testing\n\nVector store implementations require both unit tests (`.test.ts`) and integration tests (`.int.test.ts`):\n\n```bash\npnpm test\npnpm test:int\n```\n\n## Common Configuration Options\n\nWhile specific options vary by provider, most vector stores support these common parameters:\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `embeddings` | `Embeddings` | The embedding model to use |\n| `textKey` | `string` | Field name for text content |\n| `embeddingKey` | `string` | Field name for embeddings |\n| `namespace` | `string` | Optional namespace isolation |\n| `filter` | `Record<string, any>` | Metadata filtering criteria |\n\n## Best Practices\n\n1. **Embedding Model Consistency**: Always use the same embedding model for indexing and querying to ensure vector compatibility.\n\n2. **Metadata Indexing**: Include relevant metadata during document storage to enable filtered searches.\n\n3. **Batch Operations**: Use batch operations (`addDocuments`) rather than single document additions for better performance.\n\n4. **Connection Management**: Follow provider-specific connection pooling and timeout configurations in production environments.\n\n5. **Dimension Matching**: Ensure your embedding model dimensions match the vector store's expected configuration.\n\n## Related Components\n\nVector stores are typically used in combination with:\n\n- **Document Loaders**: Load documents from various sources\n- **Text Splitters**: Split large documents into searchable chunks\n- **Retrieval Chains**: Combine vector stores with language models for RAG\n- **Memory Systems**: Persist conversation context using vector storage\n\n---\n\n<a id='memory-system'></a>\n\n## Memory Systems\n\n### 相关页面\n\n相关主题：[Agent Framework](#agent-framework)\n\nI cannot generate an accurate technical wiki page about Memory Systems because the provided repository context does not contain the actual source files for memory implementations.\n\nThe requested source files are not present in the retrieved context:\n\n- `libs/langchain-classic/src/memory/buffer_memory.ts`\n- `libs/langchain-classic/src/memory/buffer_window_memory.ts`\n- `libs/langchain-classic/src/memory/summary_buffer.ts`\n- `libs/langchain-classic/src/memory/entity_memory.ts`\n- `libs/langchain-classic/src/memory/vector_store.ts`\n- `libs/langchain-core/src/memory.ts`\n- `libs/langchain-core/src/chat_history.ts`\n\nThe context only contains files related to:\n\n- HTML text splitting\n- Agent HITL middleware\n- Anthropic/OpenAI chat models\n- SHA256 hashing utilities\n- Vector store memory (partial)\n- Computer use tools\n- Language model utilities\n- Agent streaming\n\nTo generate an accurate wiki page, I would need access to the actual memory module source files.\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：langchain-ai/langchainjs\n\n摘要：发现 10 个潜在踩坑项，其中 1 个为 high/blocking；最高优先级：配置坑 - 来源证据：[Feature request] React Native support。\n\n## 1. 配置坑 · 来源证据：[Feature request] React Native support\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：[Feature request] React Native support\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_39933028ef894033b30ff784e81f185f | https://github.com/langchain-ai/langchainjs/issues/4239 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 2. 安装坑 · 来源证据：@langchain/core@1.1.46\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：@langchain/core@1.1.46\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_36a7a58d5cd84bda8dde7918402a6f8a | https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46 | 来源讨论提到 npm 相关条件，需在安装/试用前复核。\n\n## 3. 配置坑 · 来源证据：Must pass in at least 1 record to upsert.\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Must pass in at least 1 record to upsert.\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_26c3acaad9e14ed3953206f25870c0b0 | https://github.com/langchain-ai/langchainjs/issues/10890 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n\n## 4. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | README/documentation is current enough for a first validation pass.\n\n## 5. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | last_activity_observed missing\n\n## 6. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n\n## 7. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n\n## 8. 安全/权限坑 · 来源证据：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4d2a6bed33284a3cbd2f3319321d9e4c | https://github.com/langchain-ai/langchainjs/issues/10894 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n\n## 9. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | issue_or_pr_quality=unknown\n\n## 10. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | release_recency=unknown\n\n<!-- canonical_name: langchain-ai/langchainjs; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：langchain-ai/langchainjs\n\n摘要：发现 10 个潜在踩坑项，其中 1 个为 high/blocking；最高优先级：配置坑 - 来源证据：[Feature request] React Native support。\n\n## 1. 配置坑 · 来源证据：[Feature request] React Native support\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：[Feature request] React Native support\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_39933028ef894033b30ff784e81f185f | https://github.com/langchain-ai/langchainjs/issues/4239 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 2. 安装坑 · 来源证据：@langchain/core@1.1.46\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：@langchain/core@1.1.46\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_36a7a58d5cd84bda8dde7918402a6f8a | https://github.com/langchain-ai/langchainjs/releases/tag/%40langchain/core%401.1.46 | 来源讨论提到 npm 相关条件，需在安装/试用前复核。\n\n## 3. 配置坑 · 来源证据：Must pass in at least 1 record to upsert.\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Must pass in at least 1 record to upsert.\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_26c3acaad9e14ed3953206f25870c0b0 | https://github.com/langchain-ai/langchainjs/issues/10890 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n\n## 4. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | README/documentation is current enough for a first validation pass.\n\n## 5. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | last_activity_observed missing\n\n## 6. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n\n## 7. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | no_demo; severity=medium\n\n## 8. 安全/权限坑 · 来源证据：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：bug(@langchain/openai): Bare JSON.parse in Responses API converter crashes on structured output with trailing characters\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4d2a6bed33284a3cbd2f3319321d9e4c | https://github.com/langchain-ai/langchainjs/issues/10894 | 来源讨论提到 node 相关条件，需在安装/试用前复核。\n\n## 9. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | issue_or_pr_quality=unknown\n\n## 10. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:598342280 | https://github.com/langchain-ai/langchainjs | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# langchainjs - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for langchain-ai/langchainjs.\n\nProject:\n- Name: langchainjs\n- Repository: https://github.com/langchain-ai/langchainjs\n- Summary: The agent engineering platform\n- Host target: local_cli\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: The agent engineering platform\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: The agent engineering platform\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. introduction: Introduction to LangChain.js. Produce one small intermediate artifact and wait for confirmation.\n2. getting-started: Getting Started. Produce one small intermediate artifact and wait for confirmation.\n3. package-architecture: Package Architecture. Produce one small intermediate artifact and wait for confirmation.\n4. core-abstractions: Core Abstractions. Produce one small intermediate artifact and wait for confirmation.\n5. chat-models: Chat Models and LLM Providers. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/langchain-ai/langchainjs\n- https://github.com/langchain-ai/langchainjs#readme\n- README.md\n- libs/langchain/README.md\n- libs/langchain-core/README.md\n- libs/langchain-classic/README.md\n- package.json\n- libs/langchain/package.json\n- libs/langchain-core/package.json\n- examples/package.json\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：langchain-ai/langchainjs\n\n## 官方安装入口\n\n### Node.js / npm · 官方安装入口\n\n```bash\nnpm install -S langchain\n```\n\n来源：https://github.com/langchain-ai/langchainjs#readme\n\n## 来源\n\n- repo: https://github.com/langchain-ai/langchainjs\n- docs: https://github.com/langchain-ai/langchainjs#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_d18ecddff9bf424e9e8c59d6cb1811bd"
}
