{
  "canonical_name": "langchain-ai/langchain-mcp-adapters",
  "compilation_id": "pack_96616bfcf77f427dbc708e605ad110ee",
  "created_at": "2026-05-15T14:29:39.887063+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=mcp_config, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=mcp_config, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `pip install langchain-mcp-adapters` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "pip install langchain-mcp-adapters",
      "sandbox_container_image": "python:3.12-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "llm_execute_isolated_install",
      "sandbox_validation_id": "sbx_38a1e0e8dcbd448e89200a517839d5cc"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_c36b23311c130812d6b225416dd1d1c7",
    "canonical_name": "langchain-ai/langchain-mcp-adapters",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/langchain-ai/langchain-mcp-adapters",
    "slug": "langchain-mcp-adapters",
    "source_packet_id": "phit_0d0ed02354f040b2b380a766555e2976",
    "source_validation_id": "dval_3abacb7c691849fc8f2ee8ca09a88b62"
  },
  "merchandising": {
    "best_for": "需要工具连接与集成能力，并使用 mcp_host的用户",
    "github_forks": 424,
    "github_stars": 3525,
    "one_liner_en": "LangChain 🔌 MCP",
    "one_liner_zh": "LangChain 🔌 MCP",
    "primary_category": {
      "category_id": "tool-integrations",
      "confidence": "medium",
      "name_en": "Tool Integrations",
      "name_zh": "工具连接与集成",
      "reason": "matched_keywords:mcp, github"
    },
    "target_user": "使用 mcp_host 等宿主 AI 的用户",
    "title_en": "langchain-mcp-adapters",
    "title_zh": "langchain-mcp-adapters 能力包",
    "visible_tags": [
      {
        "label_en": "MCP Tools",
        "label_zh": "MCP 工具",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-mcp-tools",
        "type": "product_domain"
      },
      {
        "label_en": "Knowledge Base Q&A",
        "label_zh": "知识库问答",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-knowledge-base-q-a",
        "type": "user_job"
      },
      {
        "label_en": "Structured Data Extraction",
        "label_zh": "结构化数据提取",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-structured-data-extraction",
        "type": "core_capability"
      },
      {
        "label_en": "Node-based Workflow",
        "label_zh": "节点式流程编排",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-node-based-workflow",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Open Source Tool",
        "label_zh": "开源工具",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-open-source-tool",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_0d0ed02354f040b2b380a766555e2976",
  "page_model": {
    "artifacts": {
      "artifact_slug": "langchain-mcp-adapters",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "pip install langchain-mcp-adapters",
          "label": "Python / pip · 官方安装入口",
          "source": "https://github.com/langchain-ai/langchain-mcp-adapters#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "MCP 工具",
        "知识库问答",
        "结构化数据提取",
        "节点式流程编排",
        "开源工具"
      ],
      "eyebrow": "工具连接与集成",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要工具连接与集成能力，并使用 mcp_host的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "LangChain 🔌 MCP"
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "mcp_host",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "mcp_config, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Prompts and Resources auto-discovery",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_bf1812b74caa4e989767a9307a8ffc16 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/62 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "high",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Prompts and Resources auto-discovery",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_a5093182914b4df0b7ad2cd560bacdf2 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/492 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "high",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个运行相关的待验证问题：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py",
            "category": "运行坑",
            "evidence": [
              "community_evidence:github | cevd_ac102050dd4841d6954559a3413e0b92 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/496 | 来源类型 github_issue 暴露的待验证使用条件。"
            ],
            "severity": "high",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：langchain-mcp-adapters==0.2.2",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_0c6ca0722ab046379d28ecf30f8d2bcf | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：langchain-mcp-adapters==0.2.2",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：langchain-mcp-adapters==0.1.10",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_8b18dbf32ccd41e38b272a458f4040f5 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：langchain-mcp-adapters==0.1.10",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：langchain-mcp-adapters==0.1.14",
            "category": "能力坑",
            "evidence": [
              "community_evidence:github | cevd_6727e0d698e54fc38d7c60e262978ac2 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：langchain-mcp-adapters==0.1.14",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个运行相关的待验证问题：langchain-mcp-adapters==0.1.12",
            "category": "运行坑",
            "evidence": [
              "community_evidence:github | cevd_e71a46a9e0374d139555a78f229b0469 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：langchain-mcp-adapters==0.1.12",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0",
            "category": "维护坑",
            "evidence": [
              "community_evidence:github | cevd_59483f9a6a16414c9087b1751fba8efc | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：langchain-mcp-adapters==0.2.0",
            "user_impact": "可能影响升级、迁移或版本选择。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0a1",
            "category": "维护坑",
            "evidence": [
              "community_evidence:github | cevd_4e7fcda1716948898295279af95f8f96 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：langchain-mcp-adapters==0.2.0a1",
            "user_impact": "可能影响升级、迁移或版本选择。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "downstream_validation.risk_items | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "进入安全/权限治理复核队列。",
            "title": "下游验证发现风险项",
            "user_impact": "下游已经要求复核，不能在页面中弱化。"
          },
          {
            "body": "No sandbox install has been executed yet; downstream must verify before user use.",
            "category": "安全/权限坑",
            "evidence": [
              "risks.safety_notes | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | No sandbox install has been executed yet; downstream must verify before user use."
            ],
            "severity": "medium",
            "suggested_check": "转成明确权限清单和安全审查提示。",
            "title": "存在安全注意事项",
            "user_impact": "用户安装前需要知道权限边界和敏感操作。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "risks.scoring_risks | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "把风险写入边界卡，并确认是否需要人工复核。",
            "title": "存在评分风险",
            "user_impact": "风险会影响是否适合普通用户安装。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call",
            "category": "安全/权限坑",
            "evidence": [
              "community_evidence:github | cevd_8c46dab4b6dd4a6e92c96af49ea47647 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/502 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call",
            "user_impact": "可能影响授权、密钥配置或安全边界。"
          },
          {
            "body": "issue_or_pr_quality=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | issue_or_pr_quality=unknown"
            ],
            "severity": "low",
            "suggested_check": "抽样最近 issue/PR，判断是否长期无人处理。",
            "title": "issue/PR 响应质量未知",
            "user_impact": "用户无法判断遇到问题后是否有人维护。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 17 个潜在踩坑项，其中 3 个为 high/blocking；最高优先级：安装坑 - 来源证据：Prompts and Resources auto-discovery。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 27,
        "forks": 424,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 3525
      },
      "source_url": "https://github.com/langchain-ai/langchain-mcp-adapters",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "LangChain 🔌 MCP",
      "title": "langchain-mcp-adapters 能力包",
      "trial_prompt": "# langchain-mcp-adapters - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for langchain-ai/langchain-mcp-adapters.\n\nProject:\n- Name: langchain-mcp-adapters\n- Repository: https://github.com/langchain-ai/langchain-mcp-adapters\n- Summary: LangChain 🔌 MCP\n- Host target: mcp_host\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: LangChain 🔌 MCP\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: LangChain 🔌 MCP\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. page-introduction: Introduction. Produce one small intermediate artifact and wait for confirmation.\n2. page-installation: Installation. Produce one small intermediate artifact and wait for confirmation.\n3. page-quickstart: Quick Start Guide. Produce one small intermediate artifact and wait for confirmation.\n4. page-architecture: System Architecture. Produce one small intermediate artifact and wait for confirmation.\n5. page-tool-conversion: Tool Conversion. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/langchain-ai/langchain-mcp-adapters\n- https://github.com/langchain-ai/langchain-mcp-adapters#readme\n- README.md\n- langchain_mcp_adapters/__init__.py\n- pyproject.toml\n- langchain_mcp_adapters/tools.py\n- langchain_mcp_adapters/client.py\n- langchain_mcp_adapters/sessions.py\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: `MultiServerMCPClient.get_tools()` silently returns no tools when any si（https://github.com/langchain-ai/langchain-mcp-adapters/issues/492）；github/github_issue: Feature Request: Support passing server-defined params extensions (e.g. （https://github.com/langchain-ai/langchain-mcp-adapters/issues/502）；github/github_issue: Prompts and Resources auto-discovery（https://github.com/langchain-ai/langchain-mcp-adapters/issues/62）；github/github_issue: Fix TypeError in resources.py and make __aexit__ an async coroutine in c（https://github.com/langchain-ai/langchain-mcp-adapters/issues/496）；github/github_release: langchain-mcp-adapters==0.2.2（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2）；github/github_release: langchain-mcp-adapters==0.2.1（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.1）；github/github_release: langchain-mcp-adapters==0.2.0（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0）；github/github_release: langchain-mcp-adapters==0.2.0a1（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1）；github/github_release: langchain-mcp-adapters==0.1.14（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14）；github/github_release: langchain-mcp-adapters==0.1.13（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.13）；github/github_release: langchain-mcp-adapters==0.1.12（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12）；github/github_release: langchain-mcp-adapters==0.1.10（https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "`MultiServerMCPClient.get_tools()` silently returns no tools when any si",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/issues/492"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Feature Request: Support passing server-defined params extensions (e.g. ",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/issues/502"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Prompts and Resources auto-discovery",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/issues/62"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Fix TypeError in resources.py and make __aexit__ an async coroutine in c",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/issues/496"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.2.2",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.2.1",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.1"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.2.0",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.2.0a1",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.1.14",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.1.13",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.13"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.1.12",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "langchain-mcp-adapters==0.1.10",
              "url": "https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10"
            }
          ],
          "status": "已收录 12 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "工具连接与集成",
      "desc": "LangChain 🔌 MCP",
      "effort": "安装已验证",
      "forks": 424,
      "icon": "link",
      "name": "langchain-mcp-adapters 能力包",
      "risk": "可发布",
      "slug": "langchain-mcp-adapters",
      "stars": 3525,
      "tags": [
        "MCP 工具",
        "知识库问答",
        "结构化数据提取",
        "节点式流程编排",
        "开源工具"
      ],
      "thumb": "gray",
      "type": "MCP 配置"
    },
    "manual": {
      "markdown": "# https://github.com/langchain-ai/langchain-mcp-adapters 项目说明书\n\n生成时间：2026-05-15 14:10:43 UTC\n\n## 目录\n\n- [Introduction](#page-introduction)\n- [Installation](#page-installation)\n- [Quick Start Guide](#page-quickstart)\n- [System Architecture](#page-architecture)\n- [Package Structure](#page-package-structure)\n- [Tool Conversion](#page-tool-conversion)\n- [MultiServerMCPClient](#page-multiserver-client)\n- [Transport Types](#page-transport-types)\n- [Callbacks](#page-callbacks)\n- [Tool Call Interceptors](#page-interceptors)\n\n<a id='page-introduction'></a>\n\n## Introduction\n\n### 相关页面\n\n相关主题：[Installation](#page-installation), [Quick Start Guide](#page-quickstart)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n- [langchain_mcp_adapters/__init__.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/__init__.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n</details>\n\n# Introduction\n\nLangChain MCP Adapters is a Python library that bridges the gap between the Model Context Protocol (MCP) ecosystem and LangChain/LangGraph applications. This library provides a lightweight wrapper that converts MCP tools, prompts, and resources into LangChain-compatible formats, enabling seamless integration of MCP servers with AI agents and applications built on the LangChain framework.\n\n## Overview\n\nThe Model Context Protocol (MCP) is an open protocol developed by Anthropic that enables AI applications to connect with external data sources, tools, and services. MCP defines a standard interface for AI models to interact with various resources through a client-server architecture.\n\nLangChain MCP Adapters serves as the integration layer between these two ecosystems. It allows developers to:\n\n- Use MCP servers as tool providers for LangChain and LangGraph agents\n- Load tools from multiple MCP servers simultaneously\n- Convert MCP resources into LangChain Blob objects for processing\n- Transform MCP prompts into formats compatible with LangChain\n- Intercept and modify tool call behavior through a configurable middleware pattern\n\n资料来源：[README.md:1-20](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n\n## Architecture\n\nThe library follows a modular architecture with clear separation of concerns across several key components:\n\n```mermaid\ngraph TD\n    A[LangChain/LangGraph Agent] --> B[langchain-mcp-adapters]\n    B --> C[Tools Adapter]\n    B --> D[Resources Adapter]\n    B --> E[Prompts Adapter]\n    B --> F[MultiServerMCPClient]\n    C --> G[MCP ClientSession]\n    D --> G\n    E --> G\n    F --> H[Connection Manager]\n    H --> I[StdioConnection]\n    H --> J[StreamableHttpConnection]\n    H --> K[SSEConnection]\n    H --> L[WebsocketConnection]\n    G --> M[MCP Server 1]\n    G --> N[MCP Server 2]\n    G --> O[MCP Server N]\n```\n\n### Core Components\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| `MultiServerMCPClient` | `client.py` | Manages connections to multiple MCP servers |\n| `load_mcp_tools()` | `tools.py` | Converts MCP tools to LangChain tools |\n| `load_mcp_resources()` | `resources.py` | Converts MCP resources to LangChain Blobs |\n| `load_mcp_prompt()` | `prompts.py` | Converts MCP prompts to LangChain prompts |\n| `ToolCallInterceptor` | `interceptors.py` | Middleware for tool call lifecycle management |\n\n资料来源：[langchain_mcp_adapters/__init__.py:1-12](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/__init__.py)\n\n## Supported Transports\n\nThe library supports multiple transport mechanisms for connecting to MCP servers. Each transport type is implemented in the sessions module and provides different capabilities for various deployment scenarios.\n\n```mermaid\ngraph LR\n    A[Client Application] --> B[Transport Layer]\n    B --> C[stdio]\n    B --> D[streamable-http]\n    B --> E[SSE]\n    B --> F[WebSocket]\n    C --> G[Local Process]\n    D --> H[HTTP Server]\n    E --> H\n    F --> H\n```\n\n### Transport Comparison\n\n| Transport | Use Case | Headers Support | Stateful | Notes |\n|-----------|----------|-----------------|----------|-------|\n| `stdio` | Local subprocesses | No | Yes | Standard I/O communication |\n| `streamable-http` | HTTP-based servers | Yes | Configurable | Recommended for stateless deployments |\n| `sse` | Server-Sent Events | Yes | Yes | Bidirectional communication |\n| `websocket` | Persistent connections | No | Yes | Low latency, real-time |\n\n资料来源：[langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n\n## Tool Conversion Process\n\nWhen loading MCP tools, the library performs a series of conversions to transform the tool definitions into LangChain-compatible `StructuredTool` objects. This process involves mapping MCP tool schemas, descriptions, and execution semantics.\n\n```mermaid\ngraph TD\n    A[MCP Tool Definition] --> B[Extract inputSchema]\n    B --> C[Create StructuredTool]\n    C --> D[Wrap with interceptor chain]\n    D --> E[Return BaseTool]\n    E --> F[Used by LangChain Agent]\n    F --> G[Tool call invocation]\n    G --> H[MCP ClientSession.call_tool]\n    H --> I[Result conversion]\n    I --> J[Return to Agent]\n```\n\n### Tool Result Handling\n\nThe tool adapter handles various content types returned by MCP tools:\n\n| MCP Content Type | LangChain Output | Notes |\n|------------------|------------------|-------|\n| `TextContent` | `{\"type\": \"text\", \"text\": ...}` | Direct text conversion |\n| `ImageContent` | `{\"type\": \"image\", \"base64\": ..., \"mime_type\": ...}` | Image data with MIME type |\n| `ResourceLink` (image/*) | `{\"type\": \"image\", \"url\": ...}` | Image URL reference |\n| `ResourceLink` (other) | `{\"type\": \"file\", \"url\": ...}` | File URL reference |\n| `EmbeddedResource` (text) | `{\"type\": \"text\", \"text\": ...}` | Embedded text content |\n| `EmbeddedResource` (blob) | `{\"type\": \"image\"/\"file\", ...}` | Binary content |\n\n资料来源：[langchain_mcp_adapters/tools.py:70-130](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n\n## Interceptor System\n\nThe library provides a powerful interceptor mechanism that allows developers to intercept and modify tool call behavior. This follows the onion pattern (also known as decorator pattern) for composable middleware.\n\n```mermaid\ngraph TD\n    A[Request] --> B[Interceptor 1]\n    B --> C[Interceptor 2]\n    C --> D[Interceptor N]\n    D --> E[Base Handler<br/>session.call_tool]\n    E --> F[Interceptor N Result]\n    F --> G[Interceptor 2 Result]\n    G --> H[Interceptor 1 Result]\n    H --> I[Response]\n```\n\n### ToolCallInterceptor Interface\n\nInterceptors implement the `ToolCallInterceptor` protocol and can:\n\n- Modify tool arguments before execution\n- Change the tool name being called\n- Add or modify HTTP headers for requests\n- Transform or wrap the result\n- Handle errors and retry logic\n- Support LangGraph's `Command` for state modification\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-50](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n\n## Resource Conversion\n\nMCP resources are converted to LangChain `Blob` objects, enabling integration with LangChain's document loading and processing capabilities.\n\n```mermaid\ngraph TD\n    A[MCP Resource URI] --> B[session.read_resource]\n    B --> C[ResourceContents]\n    C --> D{Content Type?}\n    D -->|TextResourceContents| E[Extract text]\n    D -->|BlobResourceContents| F[base64 decode]\n    E --> G[Blob.from_data]\n    F --> G\n    G --> H[LangChain Blob]\n```\n\n资料来源：[langchain_mcp_adapters/resources.py:1-60](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n\n## Basic Usage Patterns\n\n### Single Server with load_mcp_tools\n\n```python\nfrom mcp import ClientSession\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\n# Initialize MCP client session\nasync with ClientSession(read, write) as session:\n    await session.initialize()\n    tools = await load_mcp_tools(session)\n    # Use tools with LangChain agent\n```\n\n### Multi-Server with MultiServerMCPClient\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"./math_server.py\"],\n        \"transport\": \"stdio\",\n    },\n    \"weather\": {\n        \"url\": \"http://localhost:8000/mcp\",\n        \"transport\": \"http\",\n    }\n})\ntools = await client.get_tools()\n```\n\n资料来源：[README.md:40-80](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n\n## Installation\n\nThe library can be installed via pip:\n\n```bash\npip install langchain-mcp-adapters\n```\n\nFor LangGraph integration with full agent capabilities:\n\n```bash\npip install langchain-mcp-adapters langgraph \"langchain[openai]\"\n```\n\n资料来源：[README.md:25-30](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n\n## Key Features Summary\n\n| Feature | Description |\n|---------|-------------|\n| Tool Conversion | Convert MCP tools to LangChain `StructuredTool` objects |\n| Multi-Server Support | Connect to multiple MCP servers simultaneously |\n| Resource Loading | Convert MCP resources to LangChain Blobs |\n| Transport Flexibility | Support for stdio, HTTP, SSE, and WebSocket transports |\n| Interceptor Middleware | Hook into tool call lifecycle for custom behavior |\n| LangGraph Integration | Full compatibility with LangGraph agents and state management |\n| Pagination Support | Automatic handling of paginated tool listings |\n\n## Related Documentation\n\n- [Tools Module](./tools) - Detailed guide on tool conversion and execution\n- [Client Module](./client) - Multi-server client configuration and usage\n- [Resources Module](./resources) - Resource loading and conversion\n- [Interceptors](./interceptors) - Middleware and request/response modification\n- [Sessions](./sessions) - Transport layer implementation details\n\n---\n\n<a id='page-installation'></a>\n\n## Installation\n\n### 相关页面\n\n相关主题：[Introduction](#page-introduction), [Quick Start Guide](#page-quickstart)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [pyproject.toml](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/pyproject.toml)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n</details>\n\n# Installation\n\nThis page documents how to install and set up the **langchain-mcp-adapters** library, which provides a lightweight wrapper that makes [Anthropic Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) tools compatible with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph).\n\n## Overview\n\nThe `langchain-mcp-adapters` library bridges MCP servers with LangChain/LangGraph ecosystems. It enables:\n\n- Converting MCP tools into LangChain tools\n- Connecting to multiple MCP servers simultaneously\n- Loading and managing MCP resources as LangChain Blob objects\n- Intercepting and modifying tool call execution\n\n资料来源：[README.md:1-20]()\n\n## Prerequisites\n\n### Python Version\n\n| Version | Support Status |\n|---------|----------------|\n| Python 3.10+ | Required |\n| Python 3.11+ | Recommended |\n| Python 3.12+ | Supported |\n\n### Required Dependencies\n\nThe following packages are automatically installed as dependencies:\n\n| Package | Purpose | Min Version |\n|---------|---------|-------------|\n| `langchain-core` | Core LangChain functionality | Latest stable |\n| `mcp` | Model Context Protocol SDK | Latest stable |\n| `pydantic` | Data validation and settings | V2 |\n| `httpx` | HTTP client for streamable HTTP transport | Latest stable |\n\n### Optional Dependencies\n\n| Package | Purpose | Install Command |\n|---------|---------|-----------------|\n| `langgraph` | For LangGraph agent support | `pip install langgraph` |\n| `langchain[openai]` | OpenAI integration for agents | `pip install \"langchain[openai]\"` |\n\n资料来源：[langchain_mcp_adapters/tools.py:1-50]()\n\n## Basic Installation\n\n### Standard Installation\n\nInstall the core package using pip:\n\n```bash\npip install langchain-mcp-adapters\n```\n\n资料来源：[README.md:32]()\n\n### With LangGraph Support\n\nFor full LangGraph agent functionality:\n\n```bash\npip install langchain-mcp-adapters langgraph \"langchain[openai]\"\n```\n\nThis installs:\n- The MCP adapters library\n- LangGraph for building stateful agents\n- OpenAI integration for LLM-powered agents\n\n资料来源：[README.md:32-36]()\n\n## Environment Configuration\n\n### OpenAI API Key\n\nIf using OpenAI models with the library, set your API key:\n\n```bash\nexport OPENAI_API_KEY=<your_api_key>\n```\n\nAlternatively, pass it programmatically:\n\n```python\nimport os\nos.environ[\"OPENAI_API_KEY\"] = \"your-api-key\"\n```\n\n## Package Dependencies Graph\n\n```mermaid\ngraph TD\n    subgraph \"langchain-mcp-adapters\"\n        A[tools.py] --> B[Base Tools Module]\n        A --> C[Tool Interceptors]\n        D[resources.py] --> E[Resource Adapter]\n        F[client.py] --> G[MultiServerMCPClient]\n        H[sessions.py] --> I[Session Management]\n    end\n    \n    subgraph \"Required Dependencies\"\n        J[langchain-core] --> B\n        J --> E\n        K[mcp Python SDK] --> B\n        K --> G\n        K --> I\n        L[pydantic] --> B\n        M[httpx] --> I\n    end\n    \n    subgraph \"Optional Dependencies\"\n        N[langgraph] -.->|if installed| B\n        N -.->|if installed| G\n    end\n```\n\n## Installation Verification\n\nAfter installation, verify the package is correctly installed:\n\n```python\nimport langchain_mcp_adapters\nprint(langchain_mcp_adapters.__version__)\n```\n\nTest basic MCP tool loading:\n\n```python\nfrom langchain_mcp_adapters.tools import load_mcp_tools\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\n# Verify imports work\nprint(\"Installation verified successfully!\")\n```\n\n## Transport-Specific Installation Notes\n\nThe library supports multiple MCP server transport types, each with specific requirements:\n\n### Standard I/O (stdio) Transport\n\nNo additional dependencies required. Uses the built-in `mcp` SDK stdio client.\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-100]()\n\n### Streamable HTTP Transport\n\nRequires `httpx` for HTTP client functionality (included by default).\n\n```bash\npip install langchain-mcp-adapters\n# httpx is installed as a dependency\n```\n\n### Server-Sent Events (SSE) Transport\n\nRequires `httpx` with SSE support (included by default).\n\n资料来源：[langchain_mcp_adapters/sessions.py:100-200]()\n\n## Installing Development Version\n\n### From Source\n\nTo install the latest development version from the repository:\n\n```bash\ngit clone https://github.com/langchain-ai/langchain-mcp-adapters.git\ncd langchain-mcp-adapters\npip install -e .\n```\n\n### With Development Dependencies\n\n```bash\ngit clone https://github.com/langchain-ai/langchain-mcp-adapters.git\ncd langchain-mcp-adapters\npip install -e \".[dev]\"\n```\n\n## Dependency Resolution\n\n### Core Dependencies\n\nThe package requires these core dependencies which are installed automatically:\n\n```toml\n# From pyproject.toml\ndependencies = [\n    \"langchain-core>=0.0.1\",\n    \"mcp>=1.0.0\",\n    \"pydantic>=2.0.0\",\n    \"httpx>=0.25.0\",\n]\n```\n\n### Optional Feature Dependencies\n\n| Feature | Dependencies |\n|---------|--------------|\n| LangGraph Support | `langgraph` |\n| All Features | `langgraph`, `langchain[openai]` |\n\n资料来源：[pyproject.toml](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/pyproject.toml)\n\n## Importing the Package\n\nAfter installation, import the main components:\n\n```python\n# Core tools module\nfrom langchain_mcp_adapters.tools import load_mcp_tools, convert_mcp_tool_to_langchain_tool\n\n# Multi-server client\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\n# Resource adapter\nfrom langchain_mcp_adapters.resources import load_mcp_resources, get_mcp_resource\n\n# Session management\nfrom langchain_mcp_adapters.sessions import create_session, Connection\n\n# Interceptors (optional)\nfrom langchain_mcp_adapters.interceptors import ToolCallInterceptor\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:1-50]()\n\n## Next Steps\n\nAfter installation, proceed to:\n\n1. **[Quickstart Guide](README.md)** - Get started with basic MCP tool usage\n2. **[Multi-Server Setup](README.md)** - Connect to multiple MCP servers\n3. **[LangGraph Integration](README.md)** - Build agents with MCP tools\n4. **[Client Configuration](README.md)** - Configure connection options and transports\n\n---\n\n<a id='page-quickstart'></a>\n\n## Quick Start Guide\n\n### 相关页面\n\n相关主题：[Introduction](#page-introduction), [Tool Conversion](#page-tool-conversion), [MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>Relevant Source Files</summary>\n\nThe following source files were used to generate this page:\n\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py)\n</details>\n\n# Quick Start Guide\n\nThis guide provides a comprehensive introduction to **langchain-mcp-adapters**, a library that bridges [Anthropic's Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) servers with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph) applications.\n\n## Overview\n\nThe langchain-mcp-adapters library serves two primary purposes:\n\n1. **Tool Conversion**: Transform MCP tools into LangChain-compatible tools that integrate seamlessly with LangGraph agents\n2. **Multi-Server Client**: Manage connections to multiple MCP servers simultaneously\n\nThe library provides a lightweight wrapper that enables developers to leverage MCP servers' capabilities within the LangChain ecosystem without additional boilerplate code.\n\n## Installation\n\nInstall the core package along with required dependencies:\n\n```bash\npip install langchain-mcp-adapters\n```\n\nFor development with OpenAI models:\n\n```bash\npip install langchain-mcp-adapters langgraph \"langchain[openai]\"\n```\n\n## Architecture Overview\n\nThe library follows a layered architecture where MCP client sessions interact with server tools, prompts, and resources through adapter classes that convert data formats between MCP and LangChain standards.\n\n```mermaid\ngraph TD\n    A[LangChain / LangGraph Application] --> B[langchain-mcp-adapters]\n    B --> C[MultiServerMCPClient]\n    B --> D[Individual Tool Conversion]\n    C --> E[MCP Server 1]\n    C --> F[MCP Server 2]\n    C --> N[MCP Server N]\n    D --> E\n    D --> F\n    D --> N\n    E --> G[stdio Transport]\n    F --> H[HTTP Transport]\n    F --> I[SSE Transport]\n    F --> J[WebSocket Transport]\n```\n\n## Core Components\n\n### MultiServerMCPClient\n\nThe `MultiServerMCPClient` manages connections to multiple MCP servers and provides unified access to their tools, prompts, and resources.\n\n**资料来源**：[langchain_mcp_adapters/client.py:1-50]()\n\n#### Connection Configuration\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `command` | `str` | Executable command (e.g., `\"python\"`, `\"node\"`) |\n| `args` | `list[str]` | Command arguments |\n| `transport` | `str` | Transport type: `stdio`, `http`, `sse`, `websocket` |\n| `url` | `str` | Server URL for HTTP/SSE/WebSocket transports |\n| `headers` | `dict[str, str]` | Custom HTTP headers for requests |\n\n#### Supported Transports\n\n| Transport | Use Case | Notes |\n|----------|----------|-------|\n| `stdio` | Local subprocess servers | Communication via stdin/stdout |\n| `http` | Remote HTTP servers | REST-based communication |\n| `sse` | Servers using Server-Sent Events | Real-time streaming |\n| `websocket` | WebSocket connections | Bidirectional communication |\n\n**资料来源**：[langchain_mcp_adapters/client.py:1-100]()\n\n## Basic Usage Patterns\n\n### Pattern 1: Direct Session Usage\n\nFor single-server scenarios, create an MCP session and load tools directly:\n\n```python\nfrom mcp import ClientSession\nfrom mcp.client.streamable_http import streamablehttp_client\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nasync with streamablehttp_client(\"http://localhost:3000/mcp\") as (read, write, _):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        tools = await load_mcp_tools(session)\n        # Use tools with LangChain/LangGraph\n```\n\n**资料来源**：[README.md:1-50]()\n\n### Pattern 2: MultiServerMCPClient with stdio\n\nConnect to locally running MCP servers using standard I/O:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n    }\n)\ntools = await client.get_tools()\n```\n\n**资料来源**：[README.md:50-100]()\n\n### Pattern 3: MultiServerMCPClient with HTTP\n\nConnect to remote MCP servers via HTTP transport:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient(\n    {\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\ntools = await client.get_tools()\n```\n\n**资料来源**：[README.md:100-150]()\n\n### Pattern 4: Explicit Session Management\n\nFor advanced scenarios requiring direct session access:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nclient = MultiServerMCPClient({...})\nasync with client.session(\"math\") as session:\n    tools = await load_mcp_tools(session)\n```\n\n**资料来源**：[langchain_mcp_adapters/client.py:50-80]()\n\n## Tool Loading\n\n### load_mcp_tools Function\n\nThe `load_mcp_tools` function retrieves all available tools from an MCP session and converts them to LangChain tools.\n\n**资料来源**：[langchain_mcp_adapters/tools.py:100-200]()\n\n#### Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `session` | `ClientSession` | Yes | MCP client session |\n| `connection` | `Connection` | No | Connection config if session is `None` |\n| `callbacks` | `Callbacks` | No | Event notification handlers |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | No | Interceptors for tool call processing |\n| `server_name` | `str` | No | Server identifier for logging |\n| `tool_name_prefix` | `bool` | No | Prefix tool names with server name (default: `False`) |\n\n#### Return Value\n\nReturns a `list[BaseTool]` containing LangChain-compatible tool objects. Each tool's metadata includes annotations from the MCP tool definition.\n\n**资料来源**：[langchain_mcp_adapters/tools.py:200-300]()\n\n## Integration with LangGraph\n\n### Complete Agent Setup\n\nThe following example demonstrates a full LangGraph agent setup using MCP tools:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langgraph.graph import StateGraph, MessagesState, START\nfrom langgraph.prebuilt import ToolNode, tools_condition\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-4.1\")\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"./examples/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\n\ntools = await client.get_tools()\n\ndef call_model(state: MessagesState):\n    response = model.bind_tools(tools).invoke(state[\"messages\"])\n    return {\"messages\": response}\n\nbuilder = StateGraph(MessagesState)\nbuilder.add_node(\"call_model\", call_model)\nbuilder.add_node(\"tools\", ToolNode(tools))\nbuilder.add_edge(START, \"call_model\")\nbuilder.add_conditional_edges(\n    \"call_model\",\n    tools_condition,\n)\n# Continue with compile and execution\n```\n\n**资料来源**：[README.md:150-200]()\n\n### Workflow Diagram\n\n```mermaid\ngraph LR\n    A[User Message] --> B[call_model Node]\n    B --> C{tools_condition}\n    C -->|END| D[Response to User]\n    C -->|tools| E[ToolNode]\n    E --> F[MCP Tool Execution]\n    F --> G[Tool Result]\n    G --> B\n```\n\n## Tool Interceptors\n\nTool interceptors allow you to modify tool call requests and responses in an onion-pattern chain:\n\n```mermaid\ngraph TD\n    A[Request] --> B[Interceptor 1]\n    B --> C[Interceptor 2]\n    C --> D[Interceptor N]\n    D --> E[Execute Tool]\n    E --> D\n    D --> C\n    C --> B\n    B --> F[Response]\n```\n\n**资料来源**：[langchain_mcp_adapters/interceptors.py:1-50]()\n\n### Creating a Custom Interceptor\n\n```python\nfrom langchain_mcp_adapters.interceptors import (\n    ToolCallInterceptor,\n    MCPToolCallRequest,\n    MCPToolCallResult,\n)\n\nasync def logging_interceptor(\n    request: MCPToolCallRequest, \n    next_handler\n) -> MCPToolCallResult:\n    print(f\"Calling tool: {request.name} with args: {request.args}\")\n    result = await next_handler(request)\n    print(f\"Tool result: {result}\")\n    return result\n\nclient = MultiServerMCPClient(\n    {...},\n    tool_interceptors=[logging_interceptor]\n)\n```\n\n## Resource Loading\n\nThe library also supports loading MCP resources as LangChain Blob objects:\n\n```python\nfrom langchain_mcp_adapters.resources import load_mcp_resources\n\n# Load all resources\nblobs = await load_mcp_resources(session)\n\n# Load specific resources\nblobs = await load_mcp_resources(session, uris=[\"resource://file1\", \"resource://file2\"])\n\n# Load single resource\nfrom langchain_mcp_adapters.resources import get_mcp_resource\nblob = await get_mcp_resource(session, \"resource://document\")\n```\n\n**资料来源**：[langchain_mcp_adapters/resources.py:1-80]()\n\n## Creating an MCP Server\n\nFor testing, you can create a simple MCP server using FastMCP:\n\n```python\n# math_server.py\nfrom mcp.server.fastmcp import FastMCP\n\nmcp = FastMCP(\"Math\")\n\n@mcp.tool()\ndef add(a: int, b: int) -> int:\n    \"\"\"Add two numbers\"\"\"\n    return a + b\n\n@mcp.tool()\ndef multiply(a: int, b: int) -> int:\n    \"\"\"Multiply two numbers\"\"\"\n    return a * b\n\nif __name__ == \"__main__\":\n    mcp.run()\n```\n\n**资料来源**：[README.md:50-100]()\n\n## HTTP Server Setup\n\nFor remote access, use the provided streamable HTTP server example:\n\n```bash\ncd examples/servers/streamable-http-stateless/\nuv run mcp-simple-streamablehttp-stateless --port 3000\n```\n\nThis starts a stateless HTTP server on port 3000 that can be accessed via the `streamablehttp_client`.\n\n**资料来源**：[examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py:1-10]()\n\n## Response Format\n\nAll tool calls return results in the `content_and_artifact` format:\n\n| Component | Type | Description |\n|-----------|------|-------------|\n| `content` | `list[ToolMessageContentBlock]` | Primary tool response content |\n| `artifact` | `MCPToolArtifact` | Structured data from MCP tool (if any) |\n\n**资料来源**：[langchain_mcp_adapters/tools.py:50-120]()\n\n## Next Steps\n\n- Explore the [API Reference](https://github.com/langchain-ai/langchain-mcp-adapters) for detailed function signatures\n- Review the example applications in the `examples/` directory\n- Implement custom tool interceptors for logging, caching, or authentication\n- Integrate with LangGraph's streaming capabilities for real-time tool execution\n\n---\n\n<a id='page-architecture'></a>\n\n## System Architecture\n\n### 相关页面\n\n相关主题：[Package Structure](#page-package-structure), [Tool Conversion](#page-tool-conversion), [MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/__init__.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/__init__.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n</details>\n\n# System Architecture\n\n## Overview\n\nThe **langchain-mcp-adapters** library provides a lightweight wrapper that makes [Anthropic Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) tools compatible with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph) 资料来源：[README.md]()\n\nThe library acts as a bridge between MCP servers and LangChain applications, enabling:\n\n- **Tool Conversion**: Transform MCP tools into LangChain-compatible tools\n- **Multi-Server Support**: Connect to multiple MCP servers simultaneously\n- **Resource Management**: Convert MCP resources to LangChain Blob objects\n- **Prompt Integration**: Load MCP prompts into LangChain format\n- **Interceptor Support**: Customizable tool call interception and modification\n\n资料来源：[langchain_mcp_adapters/__init__.py:1-10]()\n\n---\n\n## High-Level Architecture\n\nThe system follows a layered architecture with clear separation of concerns:\n\n```mermaid\ngraph TD\n    subgraph \"Client Layer\"\n        Client[MultiServerMCPClient]\n    end\n\n    subgraph \"Session Layer\"\n        Stdio[StdioConnection]\n        HTTP[StreamableHttpConnection]\n        SSE[SSEConnection]\n        WS[WebsocketConnection]\n    end\n\n    subgraph \"Adapters Layer\"\n        Tools[tools.py]\n        Resources[resources.py]\n        Prompts[prompts.py]\n    end\n\n    subgraph \"Core Layer\"\n        Interceptors[interceptors.py]\n        Sessions[sessions.py]\n    end\n\n    subgraph \"External\"\n        MCPServer[MCP Server]\n        LangChain[LangChain/LangGraph]\n    end\n\n    Client --> Tools\n    Client --> Resources\n    Client --> Stdio\n    Client --> HTTP\n    Client --> SSE\n    Client --> WS\n    Stdio --> MCPServer\n    HTTP --> MCPServer\n    SSE --> MCPServer\n    WS --> MCPServer\n    Tools --> LangChain\n    Resources --> LangChain\n    Interceptors --> Tools\n    Sessions --> Client\n```\n\n---\n\n## Core Components\n\n### MultiServerMCPClient\n\nThe `MultiServerMCPClient` is the main entry point for connecting to multiple MCP servers. It manages connections and provides unified access to tools, prompts, and resources.\n\n资料来源：[langchain_mcp_adapters/client.py:1-50]()\n\n#### Key Responsibilities\n\n| Responsibility | Description |\n|----------------|-------------|\n| Connection Management | Manages multiple server connections |\n| Tool Loading | Loads and converts tools from all servers |\n| Resource Loading | Loads MCP resources as LangChain Blobs |\n| Prompt Loading | Loads prompts from MCP servers |\n| Session Handling | Provides session context managers for explicit control |\n\n#### Configuration Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `connections` | `dict[str, Connection]` | Server connection configurations |\n| `callbacks` | `Callbacks` | Event notification handlers |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | Tool call interceptors |\n| `tool_name_prefix` | `bool` | Prefix tool names with server name |\n\n资料来源：[langchain_mcp_adapters/client.py:60-80]()\n\n---\n\n### Connection Types\n\nThe library supports multiple transport mechanisms for connecting to MCP servers:\n\n```mermaid\ngraph LR\n    A[Client] --> B[StdioConnection]\n    A --> C[StreamableHttpConnection]\n    A --> D[SSEConnection]\n    A --> E[WebsocketConnection]\n\n    B --> F[stdio_client]\n    C --> G[mcp.client.streamable_http]\n    D --> H[mcp.client.sse]\n    E --> I[mcp.client.websocket]\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-50]()\n\n#### Connection Types\n\n| Transport | Use Case | Configuration |\n|-----------|----------|---------------|\n| `stdio` | Local subprocess execution | `command`, `args`, `env`, `cwd` |\n| `http` | Streamable HTTP servers | `url`, `headers`, `timeout` |\n| `sse` | Server-Sent Events transport | `url`, `headers`, `timeout` |\n| `websocket` | WebSocket connections | `url`, `headers`, `timeout` |\n\n资料来源：[langchain_mcp_adapters/sessions.py:100-200]()\n\n---\n\n## Tool Conversion System\n\n### Architecture\n\n```mermaid\ngraph TD\n    subgraph \"MCP Side\"\n        MCPTool[MCP Tool]\n        MCPToolResult[MCPToolCallResult]\n    end\n\n    subgraph \"Conversion Layer\"\n        ContentConverter[_convert_mcp_content_to_lc_block]\n        ResultConverter[_convert_call_tool_result]\n        InterceptorChain[_build_interceptor_chain]\n    end\n\n    subgraph \"LangChain Side\"\n        StructuredTool[StructuredTool]\n        ToolMessage[ToolMessage]\n        Command[Command]\n        Artifact[MCPToolArtifact]\n    end\n\n    MCPTool --> load_mcp_tool\n    load_mcp_tool --> InterceptorChain\n    InterceptorChain --> MCPToolResult\n    MCPToolResult --> ResultConverter\n    ContentConverter --> StructuredTool\n    ResultConverter --> ToolMessage\n    ResultConverter --> Command\n    ResultConverter --> Artifact\n```\n\n### Content Type Mapping\n\nThe tool adapter converts MCP content types to LangChain content blocks:\n\n| MCP Content Type | LangChain Block | Description |\n|------------------|-----------------|-------------|\n| `TextContent` | `TextContentBlock` | Plain text content |\n| `ImageContent` | `ImageContentBlock` | Image with base64 data |\n| `ResourceLink` (image/*) | `ImageContentBlock` | Image via URL |\n| `ResourceLink` (other) | `FileContentBlock` | File via URL |\n| `EmbeddedResource` (text) | `TextContentBlock` | Embedded text resource |\n| `EmbeddedResource` (blob) | `ImageContentBlock` / `FileContentBlock` | Embedded binary resource |\n| `AudioContent` | `NotImplementedError` | Not yet supported |\n\n资料来源：[langchain_mcp_adapters/tools.py:100-150]()\n\n### Tool Call Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent as LangGraph Agent\n    participant Tool as StructuredTool\n    participant Interceptor as Interceptor Chain\n    participant Executor as Execute Tool\n    participant MCPSession as MCP Session\n    participant MCPServer as MCP Server\n\n    Agent->>Tool: invoke(args)\n    Tool->>Interceptor: MCPToolCallRequest\n    Interceptor->>Interceptor: Before interceptors\n    Interceptor->>Executor: MCPToolCallRequest\n    Executor->>MCPSession: session.call_tool()\n    MCPSession->>MCPServer: CallToolRequest\n    MCPServer-->>MCPSession: CallToolResult\n    MCPSession-->>Executor: CallToolResult\n    Executor-->>Interceptor: MCPToolCallResult\n    Interceptor->>Interceptor: After interceptors\n    Interceptor-->>Tool: MCPToolCallResult\n    Tool->>Tool: _convert_call_tool_result()\n    Tool-->>Agent: (content, artifact)\n```\n\n---\n\n## Interceptor System\n\n### Purpose\n\nThe interceptor system allows custom code to execute before and after tool calls, enabling:\n\n- Request modification\n- Response transformation\n- Logging and monitoring\n- Caching\n- Error handling\n- Conditional execution\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-30]()\n\n### Interceptor Interface\n\n```mermaid\ngraph TD\n    subgraph \"MCPToolCallRequest\"\n        ReqName[name]\n        ReqArgs[args]\n        ReqServer[server_name]\n        ReqHeaders[headers]\n        ReqRuntime[runtime]\n    end\n\n    subgraph \"MCPToolCallResult\"\n        ResContent[content]\n        ResIsError[isError]\n        ResStruct[structuredContent]\n    end\n\n    Interceptor[\"ToolCallInterceptor\"]\n    Interceptor --> Before[before_tool_call]\n    Interceptor --> After[after_tool_call]\n```\n\n### Request Override Support\n\n| Field | Modifiable | Description |\n|-------|------------|-------------|\n| `name` | Yes | Tool name override |\n| `args` | Yes | Arguments override |\n| `headers` | Yes | HTTP headers override |\n| `server_name` | No | Read-only context |\n| `runtime` | No | Read-only context |\n\n资料来源：[langchain_mcp_adapters/interceptors.py:50-70]()\n\n### Interceptor Chain Pattern\n\nThe system implements an onion-pattern interceptor chain:\n\n```mermaid\ngraph TD\n    A[Agent Request] --> B[Interceptor 1 - Outermost]\n    B --> C[Interceptor 2]\n    C --> D[Interceptor N]\n    D --> E[Execute Tool - Innermost]\n    E --> D'\n    D' --> C'\n    C' --> B'\n    B' --> F[Agent Response]\n    \n    style B fill:#ff9999\n    style C fill:#ffcc99\n    style D fill:#ffff99\n    style E fill:#99ff99\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:50-80]()\n\n---\n\n## Session Management\n\n### Session Creation Flow\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B{Connection Type}\n    \n    B -->|Stdio| C[StdioConnection]\n    B -->|HTTP| D[StreamableHttpConnection]\n    B -->|SSE| E[SSEConnection]\n    B -->|WebSocket| F[WebsocketConnection]\n    \n    C --> G[create_session]\n    D --> G\n    E --> G\n    F --> G\n    \n    G --> H[ClientSession]\n```\n\n### Session Factory\n\nThe `create_session()` function provides a unified interface for session creation:\n\n```python\nasync with create_session(connection) as session:\n    tools = await load_mcp_tools(session)\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:200-300]()\n\n---\n\n## Resource Management\n\n### Resource Conversion\n\n```mermaid\ngraph LR\n    MCP[MCPServer] -->|read_resource| Session[ClientSession]\n    Session -->|ResourceContents| Converter[convert_mcp_resource_to_langchain_blob]\n    \n    Converter --> Text[TextResourceContents] --> Blob1[Blob (text)]\n    Converter --> Blob[BlobResourceContents] --> Blob2[Blob (binary)]\n```\n\n### Supported Resource Types\n\n| MCP Type | LangChain Type | Notes |\n|----------|---------------|-------|\n| `TextResourceContents` | `Blob` | MIME type from resource |\n| `BlobResourceContents` | `Blob` | Base64 decoded data |\n\n资料来源：[langchain_mcp_adapters/resources.py:1-50]()\n\n---\n\n## Data Flow Architecture\n\n### Complete Request Flow\n\n```mermaid\ngraph TD\n    subgraph \"1. Initialization\"\n        A[MultiServerMCPClient] --> B[Load Tools]\n        B --> C[create_session]\n        C --> D[session.initialize]\n    end\n\n    subgraph \"2. Tool Invocation\"\n        E[Agent] --> F[StructuredTool.invoke]\n        F --> G[call_tool coroutine]\n        G --> H[Build Request]\n        H --> I[Apply Interceptors]\n    end\n\n    subgraph \"3. MCP Execution\"\n        I --> J[session.call_tool]\n        J --> K[MCP Server]\n        K --> L[CallToolResult]\n    end\n\n    subgraph \"4. Response Conversion\"\n        L --> M[_convert_call_tool_result]\n        M --> N[Content Blocks]\n        M --> O[MCPToolArtifact]\n        N --> P[ToolMessage/Command]\n    end\n\n    subgraph \"5. Return to Agent\"\n        P --> Q[Agent Response]\n        O --> R[ToolArtifact]\n    end\n```\n\n---\n\n## Type System\n\n### Result Types\n\nThe library defines conditional types based on LangGraph availability:\n\n```python\nif LANGGRAPH_PRESENT:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command\nelse:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage\n```\n\n### MCPToolArtifact\n\nA TypedDict wrapping structured content from MCP tool calls:\n\n```python\nclass MCPToolArtifact(TypedDict):\n    structured_content: dict[str, Any]\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:50-70]()\n\n---\n\n## Error Handling\n\n### Error Flow\n\n```mermaid\ngraph TD\n    A[MCP Tool Call] --> B{Result Type}\n    \n    B -->|isError = True| C[Extract Text Blocks]\n    C --> D[Join Error Parts]\n    D --> E[ToolException]\n    \n    B -->|isError = False| F[Convert Content]\n    F --> G[Return Result]\n    \n    B -->|AudioContent| H[NotImplementedError]\n```\n\n### Error Scenarios\n\n| Scenario | Handling | Source |\n|----------|----------|--------|\n| MCP server error | `ToolException` raised | tools.py:conversion |\n| Unknown content type | `ValueError` raised | tools.py:content |\n| Audio content | `NotImplementedError` raised | tools.py:audio |\n| Missing session | `ValueError` raised | tools.py:session |\n\n---\n\n## Integration Patterns\n\n### LangGraph Integration\n\n```mermaid\ngraph LR\n    A[StateGraph] --> B[call_model]\n    B --> C[tools_condition]\n    C --> D{Tool Node?}\n    D -->|Yes| E[ToolNode]\n    D -->|No| F[End]\n    E --> G[Tools]\n    G --> B\n```\n\n### Response Format\n\nThe tool uses `response_format=\"content_and_artifact\"` to return both content and structured data:\n\n```python\nreturn StructuredTool(\n    ...\n    response_format=\"content_and_artifact\",\n)\n```\n\n---\n\n## Configuration Reference\n\n### MultiServerMCPClient Configuration\n\n```python\nMultiServerMCPClient(\n    connections={\n        \"server_name\": {\n            \"transport\": \"stdio|http|sse|websocket\",\n            # Transport-specific options\n        }\n    },\n    callbacks=Callbacks(),      # Optional\n    tool_interceptors=[],       # Optional\n    tool_name_prefix=False      # Optional\n)\n```\n\n### Transport Configurations\n\n| Transport | Required Options | Optional Options |\n|-----------|-----------------|------------------|\n| `stdio` | `command`, `args` | `env`, `cwd`, `encoding` |\n| `http` | `url` | `headers`, `timeout` |\n| `sse` | `url` | `headers`, `timeout` |\n| `websocket` | `url` | `headers`, `timeout` |\n\n---\n\n## Summary\n\nThe langchain-mcp-adapters library implements a clean, layered architecture:\n\n1. **Client Layer**: `MultiServerMCPClient` provides high-level API for managing multiple server connections\n2. **Session Layer**: Multiple transport implementations (`Stdio`, `HTTP`, `SSE`, `WebSocket`) handle protocol details\n3. **Adapters Layer**: `tools.py`, `resources.py`, and `prompts.py` convert between MCP and LangChain formats\n4. **Interceptor Layer**: `interceptors.py` enables customization of the tool call lifecycle\n5. **Core Layer**: Type definitions and conversion utilities provide the foundation\n\nThe architecture prioritizes:\n- **Extensibility**: Through the interceptor system\n- **Flexibility**: Multiple transport and connection options\n- **Type Safety**: Comprehensive type annotations and Pydantic models\n- **Integration**: Seamless LangChain and LangGraph compatibility\n\n---\n\n<a id='page-package-structure'></a>\n\n## Package Structure\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [Tool Conversion](#page-tool-conversion)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/callbacks.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/callbacks.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/prompts.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/prompts.py)\n</details>\n\n# Package Structure\n\n## Overview\n\nThe `langchain-mcp-adapters` package provides a lightweight wrapper that makes [Anthropic Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) tools compatible with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph). The package bridges MCP servers with LangChain applications by converting MCP tools, prompts, and resources into LangChain-compatible formats.\n\n资料来源：[README.md]()\n\n## Package Architecture\n\nThe package follows a modular architecture with distinct responsibilities for each module:\n\n```mermaid\ngraph TD\n    subgraph \"langchain_mcp_adapters Package\"\n        A[\"__init__.py<br/>Package Entry\"] --> B[\"client.py<br/>MultiServerMCPClient\"]\n        B --> C[\"sessions.py<br/>Connection Management\"]\n        B --> D[\"tools.py<br/>Tool Conversion\"]\n        B --> E[\"resources.py<br/>Resource Conversion\"]\n        B --> F[\"prompts.py<br/>Prompt Loading\"]\n        C --> G[\"callbacks.py<br/>Callback Handling\"]\n        C --> H[\"interceptors.py<br/>Tool Call Interceptors\"]\n    end\n    \n    I[\"MCP Servers\"] --> C\n    D --> J[\"LangChain Tools\"]\n    E --> K[\"LangChain Blobs\"]\n    F --> L[\"LangChain Prompts\"]\n```\n\n## Directory Structure\n\n```\nlangchain_mcp_adapters/\n├── __init__.py          # Package initialization and exports\n├── client.py            # MultiServerMCPClient for managing multiple servers\n├── tools.py             # MCP to LangChain tool conversion\n├── resources.py         # MCP resource to Blob conversion\n├── prompts.py           # MCP prompt loading\n├── sessions.py          # Connection handling for different transports\n├── callbacks.py         # Event and notification callbacks\n└── interceptors.py     # Tool call interception and modification\n```\n\n## Core Modules\n\n### 1. tools.py — Tool Conversion\n\nThe `tools.py` module handles conversion of MCP tools to LangChain-compatible tools.\n\n| Component | Purpose |\n|-----------|---------|\n| `load_mcp_tools()` | Loads all available MCP tools and converts them to LangChain tools |\n| `_convert_mcp_content_to_lc_block()` | Converts MCP content blocks (Text, Image, Audio, Resource) to LangChain content blocks |\n| `_convert_call_tool_result()` | Converts MCP CallToolResult to LangChain tool result format |\n| `MCPToolArtifact` | TypedDict wrapping structured content from MCP tool calls |\n\n**Key Type Definitions:**\n\n```python\nToolMessageContentBlock = TextContentBlock | ImageContentBlock | FileContentBlock\n\nConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command  # if langgraph installed\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:1-150]()\n\n### 2. client.py — MultiServerMCPClient\n\nThe `client.py` module provides the `MultiServerMCPClient` class for managing connections to multiple MCP servers.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `connections` | `dict[str, Connection]` | Dictionary mapping server names to connection configurations |\n| `callbacks` | `Callbacks` | Optional callbacks for handling notifications |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | Optional interceptors for tool call processing |\n| `tool_name_prefix` | `bool` | Prefix tool names with server name (default: `False`) |\n\n**Supported Connection Configurations:**\n\nThe client supports multiple transport types with their respective parameters:\n\n| Transport | Required Parameters |\n|-----------|---------------------|\n| `stdio` | `command`, `args` |\n| `http` | `url` |\n| `sse` | `url`, optional `headers` |\n| `streamable_http` | `url`, optional `headers` |\n| `websocket` | `url` |\n\n资料来源：[langchain_mcp_adapters/client.py:1-100]()\n\n### 3. sessions.py — Connection Management\n\nThe `sessions.py` module handles connection management for different MCP transport types.\n\n| Connection Type | Class | Purpose |\n|-----------------|-------|---------|\n| Stdio | `StdioConnection` | stdio-based communication with subprocess |\n| HTTP | `McpHttpClientFactory`, `StreamableHttpConnection` | HTTP-based communication |\n| SSE | `SSEConnection` | Server-Sent Events transport |\n| WebSocket | `WebsocketConnection` | WebSocket-based communication |\n\n**Session Creation Flow:**\n\n```mermaid\ngraph TD\n    A[\"create_session()\"] --> B{\"Connection Type?\"}\n    B -->|Stdio| C[\"_create_stdio_session()\"]\n    B -->|HTTP| D[\"_create_http_session()\"]\n    B -->|SSE| E[\"_create_sse_session()\"]\n    B -->|WebSocket| F[\"_create_websocket_session()\"]\n    \n    C --> G[\"ClientSession\"]\n    D --> G\n    E --> G\n    F --> G\n```\n\nThe `create_session()` function returns an async generator that yields an initialized `ClientSession`:\n\n```python\n@asynccontextmanager\nasync def create_session(connection: Connection) -> AsyncIterator[ClientSession]:\n```\n\n**Environment Variable Expansion:**\n\nSessions support environment variable expansion in configuration values using `${VAR}` or `${VAR:default}` syntax.\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-100]()\n\n### 4. resources.py — Resource Conversion\n\nThe `resources.py` module converts MCP resources into LangChain Blob objects.\n\n| Function | Purpose |\n|----------|---------|\n| `convert_mcp_resource_to_langchain_blob()` | Converts a single MCP resource content to a Blob |\n| `get_mcp_resource()` | Fetches a single MCP resource by URI |\n| `load_mcp_resources()` | Loads multiple MCP resources and converts them to Blobs |\n\n**Supported Content Types:**\n\n| MCP Type | Conversion |\n|----------|------------|\n| `TextResourceContents` | Raw text data |\n| `BlobResourceContents` | Base64-decoded binary data |\n\n资料来源：[langchain_mcp_adapters/resources.py:1-80]()\n\n### 5. prompts.py — Prompt Loading\n\nThe `prompts.py` module handles loading MCP prompts into LangChain prompt formats. The module provides functionality to convert MCP prompt definitions into LangChain-compatible prompt structures.\n\n资料来源：[langchain_mcp_adapters/prompts.py:1-50]()\n\n### 6. callbacks.py — Callback Handling\n\nThe `callbacks.py` module provides callback infrastructure for handling notifications and events during MCP operations.\n\n| Component | Purpose |\n|-----------|---------|\n| `Callbacks` | Main callback container class |\n| `CallbackContext` | Context passed to callbacks with server/tool information |\n\nThe `CallbackContext` dataclass holds:\n\n```python\n@dataclass\nclass CallbackContext:\n    server_name: str | None = None\n    tool_name: str | None = None\n```\n\n资料来源：[langchain_mcp_adapters/callbacks.py:1-60]()\n\n### 7. interceptors.py — Tool Call Interceptors\n\nThe `interceptors.py` module provides interceptor interfaces for wrapping and controlling MCP tool call execution.\n\n| Component | Purpose |\n|-----------|---------|\n| `ToolCallInterceptor` | Protocol for intercepting tool calls |\n| `MCPToolCallRequest` | Request object passed to interceptors |\n| `_build_interceptor_chain()` | Builds composed handler chain with interceptors in onion pattern |\n\n**Interceptor Pattern:**\n\n```mermaid\ngraph TD\n    A[\"Request\"] --> B[\"Interceptor 1<br/>(Outer Layer)\"]\n    B --> C[\"Interceptor 2\"]\n    C --> D[\"...\"]\n    D --> E[\"Interceptor N\"]\n    E --> F[\"execute_tool<br/>(Innermost)\"]\n    F --> G[\"Result\"]\n    G --> E\n    G --> D\n    G --> C\n    G --> B\n    G --> H[\"Response\"]\n```\n\nThe interceptor chain follows an onion pattern where each interceptor wraps the next, allowing pre-processing before and post-processing after tool execution.\n\n**MCPToolCallRequest Structure:**\n\n```python\n@dataclass\nclass MCPToolCallRequest:\n    name: str\n    args: dict[str, Any]\n    server_name: str\n    headers: dict[str, Any] | None\n    runtime: Any\n```\n\n**Result Type (Conditional):**\n\n```python\nif LANGGRAPH_PRESENT:\n    MCPToolCallResult = CallToolResult | ToolMessage | Command\nelse:\n    MCPToolCallResult = CallToolResult | ToolMessage\n```\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-80]()\n\n## Data Flow Architecture\n\n### Tool Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant MultiServerMCPClient\n    participant load_mcp_tools\n    participant ToolCallInterceptor\n    participant ClientSession\n    participant MCPServer\n    \n    User->>MultiServerMCPClient: get_tools()\n    MultiServerMCPClient->>load_mcp_tools: load_mcp_tools(session)\n    load_mcp_tools->>load_mcp_tools: Create StructuredTool\n    Note over load_mcp_tools: Register call_tool coroutine\n    \n    User->>StructuredTool: invoke(args)\n    StructuredTool->>load_mcp_tools: call_tool(args)\n    \n    alt With Interceptors\n        load_mcp_tools->>ToolCallInterceptor: intercept(request)\n        ToolCallInterceptor->>ToolCallInterceptor: modify/validate\n    end\n    \n    load_mcp_tools->>ClientSession: call_tool(name, args)\n    ClientSession->>MCPServer: MCP CallToolRequest\n    MCPServer-->>ClientSession: CallToolResult\n    ClientSession-->>load_mcp_tools: CallToolResult\n    \n    alt Error Result\n        load_mcp_tools->>load_mcp_tools: Check isError flag\n        load_mcp_tools->>ToolException: raise\n    end\n    \n    load_mcp_tools->>_convert_call_tool_result: format result\n    Note over load_mcp_tools: Convert content blocks to LC format\n    \n    load_mcp_tools-->>User: (content, artifact)\n```\n\n### Content Conversion Flow\n\n```mermaid\ngraph LR\n    subgraph \"MCP Content Types\"\n        A[\"TextContent\"]\n        B[\"ImageContent\"]\n        C[\"AudioContent\"]\n        D[\"ResourceLink\"]\n        E[\"EmbeddedResource\"]\n    end\n    \n    subgraph \"Conversion Functions\"\n        F[\"_convert_mcp_content_to_lc_block\"]\n    end\n    \n    subgraph \"LangChain Content Blocks\"\n        G[\"TextContentBlock\"]\n        H[\"ImageContentBlock\"]\n        I[\"FileContentBlock\"]\n    end\n    \n    A --> F\n    B --> F\n    D --> F\n    E --> F\n    C -.->|NotImplementedError| F\n    \n    F --> G\n    F --> H\n    F --> I\n```\n\n## Type System\n\n### Conditional Type Definitions\n\nThe package uses conditional type definitions based on whether `langgraph` is installed:\n\n```python\ntry:\n    from langgraph.types import Command\n    LANGGRAPH_PRESENT = True\nexcept ImportError:\n    LANGGRAPH_PRESENT = False\n```\n\n| Type | Without langgraph | With langgraph |\n|------|-------------------|----------------|\n| `ConvertedToolResult` | `list[ToolMessageContentBlock] \\| ToolMessage` | `list[ToolMessageContentBlock] \\| ToolMessage \\| Command` |\n| `MCPToolCallResult` | `CallToolResult \\| ToolMessage` | `CallToolResult \\| ToolMessage \\| Command` |\n\n## Error Handling\n\n### Tool Exceptions\n\n| Error Type | Trigger | Behavior |\n|------------|---------|----------|\n| `ToolException` | MCP tool returns `isError: true` | Raised with joined error message from content blocks |\n| `NotImplementedError` | AudioContent conversion attempted | Audio content is not yet supported |\n| `ValueError` | Unknown content type | Unknown MCP content types raise ValueError |\n\n### Connection Errors\n\n| Error Type | Condition |\n|------------|-----------|\n| `ValueError` | Neither session nor connection provided to `load_mcp_tools()` |\n\n## Configuration Options\n\n### Tool Loading Options\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `session` | `ClientSession` | `None` | MCP client session |\n| `connection` | `Connection` | `None` | Connection config for new session |\n| `callbacks` | `Callbacks` | `None` | Event callbacks |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | `None` | Tool call interceptors |\n| `server_name` | `str` | `None` | Server name for context |\n| `tool_name_prefix` | `bool` | `False` | Prefix tool names with server |\n\n### Client Configuration\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `connections` | `dict[str, Connection]` | `{}` | Server connection configs |\n| `callbacks` | `Callbacks` | `Callbacks()` | Default callbacks |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | `[]` | Default interceptors |\n| `tool_name_prefix` | `bool` | `False` | Prefix tool names |\n\n## Dependencies\n\n### Required Dependencies\n\n| Package | Purpose |\n|---------|---------|\n| `langchain-core` | LangChain core functionality and BaseTool |\n| `mcp` | MCP client SDK |\n| `pydantic` | Data validation and model creation |\n\n### Optional Dependencies\n\n| Package | Feature |\n|---------|---------|\n| `langgraph` | LangGraph Command support, enhanced state management |\n\n## Package Exports\n\nThe `__init__.py` exports the main public API:\n\n- `MultiServerMCPClient` - Multi-server client class\n- `load_mcp_tools` - Tool loading function\n- `load_mcp_resources` - Resource loading function\n- `load_mcp_prompt` - Prompt loading function\n- `Callbacks`, `CallbackContext` - Callback infrastructure\n- `ToolCallInterceptor` - Interceptor protocol\n- `Connection` - Connection configuration types\n\n资料来源：[langchain_mcp_adapters/__init__.py:1-20]()\n\n---\n\n<a id='page-tool-conversion'></a>\n\n## Tool Conversion\n\n### 相关页面\n\n相关主题：[MultiServerMCPClient](#page-multiserver-client), [Transport Types](#page-transport-types), [Package Structure](#page-package-structure)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n</details>\n\n# Tool Conversion\n\n## Overview\n\nTool Conversion is the core mechanism that bridges **MCP (Model Context Protocol)** tools with **LangChain tools**, enabling interoperability between the MCP ecosystem and LangChain/LangGraph agents. This adapter transforms native MCP tool definitions into LangChain-compatible `StructuredTool` instances that can be used with LangChain agents and LangGraph state machines.\n\nThe conversion layer handles:\n\n- Tool signature translation (MCP schema → LangChain Pydantic schema)\n- Tool execution with proper session context\n- Content block conversion (MCP content types → LangChain content blocks)\n- Error handling and artifact wrapping\n- Interceptor chain support for middleware patterns\n\n资料来源：[langchain_mcp_adapters/tools.py:1-30]()\n\n## Architecture\n\n```mermaid\ngraph TD\n    subgraph \"MCP Layer\"\n        MCPTool[MCP Tool Definition]\n        MCPToolCallResult[MCP CallToolResult]\n    end\n    \n    subgraph \"Adapter Layer\"\n        convert_mcp_tool[convert_mcp_tool_to_langchain_tool]\n        load_mcp_tools[load_mcp_tools]\n        interceptor_chain[Interceptor Chain]\n        content_converter[_convert_mcp_content_to_lc_block]\n        result_converter[_convert_call_tool_result]\n    end\n    \n    subgraph \"LangChain Layer\"\n        StructuredTool[StructuredTool]\n        ToolMessage[ToolMessage]\n        Command[Command<br/>langgraph.types]\n        MCPToolArtifact[MCPToolArtifact]\n    end\n    \n    MCPTool --> convert_mcp_tool\n    MCPTool --> load_mcp_tools\n    load_mcp_tools --> convert_mcp_tool\n    convert_mcp_tool --> interceptor_chain\n    interceptor_chain --> content_converter\n    MCPToolCallResult --> result_converter\n    result_converter --> ToolMessage\n    result_converter --> Command\n    result_converter --> MCPToolArtifact\n```\n\n### Conversion Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent as LangChain Agent\n    participant LC_Tool as LangChain StructuredTool\n    participant Interceptor as ToolCallInterceptor\n    participant MCP_Session as MCP ClientSession\n    participant MCP_Server as MCP Server\n\n    Agent->>LC_Tool: invoke(name, args)\n    LC_Tool->>Interceptor: MCPToolCallRequest\n    Interceptor->>Interceptor: preprocess()\n    Interceptor->>MCP_Session: call_tool()\n    MCP_Session->>MCP_Server: protocol call\n    MCP_Server-->>MCP_Session: CallToolResult\n    MCP_Session-->>Interceptor: MCPToolCallResult\n    Interceptor->>Interceptor: postprocess()\n    Interceptor-->>LC_Tool: Converted Result\n    LC_Tool->>LC_Tool: _convert_call_tool_result()\n    LC_Tool-->>Agent: (content, artifact)\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:140-220]()\n\n## Core Functions\n\n### load_mcp_tools\n\nLoads all available MCP tools from a session and converts them to LangChain tools.\n\n```python\nasync def load_mcp_tools(\n    session: ClientSession | None,\n    *,\n    connection: Connection | None = None,\n    callbacks: Callbacks | None = None,\n    tool_interceptors: list[ToolCallInterceptor] | None = None,\n    server_name: str | None = None,\n    tool_name_prefix: bool = False,\n) -> list[BaseTool]\n```\n\n**Parameters:**\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `session` | `ClientSession \\| None` | required | MCP client session. If `None`, `connection` must be provided. |\n| `connection` | `Connection \\| None` | `None` | Connection config to create a new session if session is `None`. |\n| `callbacks` | `Callbacks \\| None` | `None` | Optional callbacks for handling notifications and events. |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | `None` | Optional list of interceptors for tool call processing. |\n| `server_name` | `str \\| None` | `None` | Name of the server these tools belong to. |\n| `tool_name_prefix` | `bool` | `False` | If `True`, tool names are prefixed with server name (e.g., `\"weather_search\"`). |\n\n资料来源：[langchain_mcp_adapters/tools.py:219-270]()\n\n### convert_mcp_tool_to_langchain_tool\n\nConverts a single MCP tool to a LangChain `StructuredTool`.\n\n```python\ndef convert_mcp_tool_to_langchain_tool(\n    session: ClientSession | None,\n    tool: MCPTool,\n    *,\n    connection: Connection | None = None,\n    callbacks: Callbacks | None = None,\n    tool_interceptors: list[ToolCallInterceptor] | None = None,\n    server_name: str | None = None,\n    tool_name_prefix: bool = False,\n) -> BaseTool\n```\n\n**Returns:**\nA LangChain `StructuredTool` with `response_format=\"content_and_artifact\"`.\n\n**Key Implementation Details:**\n\n- Creates an async `call_tool` coroutine that handles execution\n- Injects `runtime` via `InjectedToolArg` for LangGraph compatibility\n- Supports `ToolCallInterceptor` chain via `_build_interceptor_chain()`\n- Wraps errors as `ToolException`\n- Extracts `structuredContent` into `MCPToolArtifact`\n\n资料来源：[langchain_mcp_adapters/tools.py:150-218]()\n\n## Content Block Conversion\n\nThe adapter converts MCP content types to LangChain content blocks for uniform handling.\n\n### Supported Conversions\n\n| MCP Content Type | LangChain Content Block | Notes |\n|------------------|-------------------------|-------|\n| `TextContent` | `{\"type\": \"text\", \"text\": ...}` | Direct text conversion |\n| `ImageContent` | `{\"type\": \"image\", \"base64\": ..., \"mime_type\": ...}` | Base64 encoded image data |\n| `ResourceLink` (image/*) | `{\"type\": \"image\", \"url\": ..., \"mime_type\": ...}` | Image via URI reference |\n| `ResourceLink` (other) | `{\"type\": \"file\", \"url\": ..., \"mime_type\": ...}` | Generic file via URI reference |\n| `EmbeddedResource` (text) | `{\"type\": \"text\", \"text\": ...}` | Text from embedded resource |\n| `EmbeddedResource` (blob) | Image or file block | Based on MIME type |\n| `AudioContent` | — | Raises `NotImplementedError` |\n\n资料来源：[langchain_mcp_adapters/tools.py:70-115]()\n\n### _convert_mcp_content_to_lc_block\n\n```python\ndef _convert_mcp_content_to_lc_block(\n    content: ContentBlock,\n) -> ToolMessageContentBlock\n```\n\nThis function handles the 1:1 mapping between MCP content types and LangChain content blocks.\n\n```mermaid\ngraph LR\n    A[ContentBlock] --> B{Type Check}\n    B -->|TextContent| C[create_text_block]\n    B -->|ImageContent| D[create_image_block]\n    B -->|ResourceLink| E{MIME type?}\n    B -->|EmbeddedResource| F{Resource Type?}\n    B -->|AudioContent| G[NotImplementedError]\n    \n    E -->|image/*| H[create_image_block<br/>url=uri]\n    E -->|other| I[create_file_block<br/>url=uri]\n    \n    F -->|TextResourceContents| J[create_text_block]\n    F -->|BlobResourceContents| K{MIME type?}\n    K -->|image/*| L[create_image_block]\n    K -->|other| M[create_file_block]\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:70-115]()\n\n## Result Conversion\n\n### _convert_call_tool_result\n\nConverts the result of an MCP tool call to LangChain format with support for multiple return types.\n\n```python\ndef _convert_call_tool_result(\n    call_tool_result: MCPToolCallResult,\n) -> tuple[ConvertedToolResult, MCPToolArtifact | None]\n```\n\n**Return Types:**\n\nThe function returns a tuple where:\n- **First element**: The converted content\n- **Second element**: The artifact (if any)\n\n**Content Types Based on Input:**\n\n| Input Type | Output Content | Output Artifact |\n|------------|----------------|-----------------|\n| `ToolMessage` | `ToolMessage` (passthrough) | `None` |\n| `Command` (LangGraph) | `Command` (passthrough) | `None` |\n| `CallToolResult` (MCP) | `list[ToolMessageContentBlock]` | `MCPToolArtifact` (if `structuredContent` present) |\n\n资料来源：[langchain_mcp_adapters/tools.py:117-145]()\n\n### MCPToolArtifact\n\nA TypedDict wrapping structured content from MCP tool calls:\n\n```python\nclass MCPToolArtifact(TypedDict):\n    \"\"\"Artifact returned from MCP tool calls.\"\"\"\n    structured_content: dict[str, Any]\n```\n\nThis allows downstream consumers to access MCP-specific structured data while maintaining compatibility with LangChain's tool result format.\n\n资料来源：[langchain_mcp_adapters/tools.py:55-68]()\n\n## Interceptor Chain\n\nThe interceptor system implements the **onion pattern** for middleware-like processing of tool calls.\n\n### _build_interceptor_chain\n\n```python\ndef _build_interceptor_chain(\n    base_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    tool_interceptors: list[ToolCallInterceptor] | None,\n) -> Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]]\n```\n\n**Execution Order:**\n1. Interceptors are applied in **reverse order** (last in list = outermost layer)\n2. Each interceptor wraps the previous handler\n3. Request flows inward through interceptors, response flows outward\n\n```mermaid\ngraph TD\n    subgraph \"Request Flow (inward)\"\n        R1[Request] --> I1[Interceptor 1<br/>outermost]\n        I1 --> I2[Interceptor 2]\n        I2 --> I3[Interceptor N<br/>innermost]\n        I3 --> BH[Base Handler<br/>execute_tool]\n    end\n    \n    subgraph \"Response Flow (outward)\"\n        BH --> RT1[Response]\n        RT1 --> I4[Interceptor N]\n        I4 --> I5[Interceptor 2]\n        I5 --> I6[Interceptor 1]\n        I6 --> R2[Response]\n    end\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:147-149]()\n\n### ToolCallInterceptor Interface\n\nInterceptors implement the `ToolCallInterceptor` protocol:\n\n```python\n@runtime_checkable\nclass ToolCallInterceptor(Protocol):\n    async def intercept(\n        self,\n        request: MCPToolCallRequest,\n        current_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    ) -> MCPToolCallResult:\n        ...\n```\n\n**Usage Pattern:**\n\n```python\nclass MyInterceptor:\n    async def intercept(\n        self,\n        request: MCPToolCallRequest,\n        current_handler: Callable,\n    ) -> MCPToolCallResult:\n        # Pre-processing\n        modified_request = request.override(args={\"modified\": True})\n        \n        # Call next handler\n        result = await current_handler(modified_request)\n        \n        # Post-processing\n        return result\n```\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-50]()\n\n## Type Definitions\n\n### ConvertedToolResult\n\nConditional type based on LangGraph availability:\n\n```python\nif LANGGRAPH_PRESENT:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command\nelse:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage\n```\n\n### ToolMessageContentBlock\n\n```python\nToolMessageContentBlock = TextContentBlock | ImageContentBlock | FileContentBlock\n```\n\nImport sourced from `langchain_core.messages.content`:\n\n资料来源：[langchain_mcp_adapters/tools.py:15-35]()\n\n## Configuration Options\n\n### Tool Name Prefixing\n\nWhen connecting to multiple MCP servers, tools may have name conflicts. Enable prefixing:\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\n\n# With prefix: tool names become \"math_add\", \"weather_get_weather\"\ntools = await client.get_tools(tool_name_prefix=True)\n```\n\n### Session Management\n\n| Mode | Description | Use Case |\n|------|-------------|----------|\n| **Shared Session** | Single session for all tools | Single server, multiple tools |\n| **Per-Tool Session** | New session created per call | Stateless servers |\n| **Explicit Session** | User-managed session | Custom lifecycle control |\n\n资料来源：[langchain_mcp_adapters/client.py:1-80]()\n\n## Error Handling\n\n### ToolException\n\nTool call errors are wrapped in `ToolException`:\n\n```python\nif call_tool_result.isError:\n    error_parts = []\n    for item in tool_content:\n        if isinstance(item, str):\n            error_parts.append(item)\n        elif isinstance(item, dict) and item.get(\"type\") == \"text\":\n            error_parts.append(item.get(\"text\", \"\"))\n    error_msg = \"\\n\".join(error_parts) if error_parts else str(tool_content)\n    raise ToolException(error_msg)\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:130-140]()\n\n## Usage Examples\n\n### Basic Tool Loading\n\n```python\nfrom mcp import ClientSession\nfrom mcp.client.stdio import stdio_client\n\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nserver_params = StdioServerParameters(\n    command=\"python\",\n    args=[\"/path/to/math_server.py\"],\n)\n\nasync with stdio_client(server_params) as (read, write):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        tools = await load_mcp_tools(session)\n```\n\n### With LangGraph Agent\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langgraph.graph import StateGraph, MessagesState, START\nfrom langgraph.prebuilt import ToolNode, tools_condition\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"/path/to/math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\ntools = await client.get_tools()\n\nbuilder = StateGraph(MessagesState)\nbuilder.add_node(\"call_model\", call_model)\nbuilder.add_node(\"tools\", ToolNode(tools))\nbuilder.add_edge(START, \"call_model\")\nbuilder.add_conditional_edges(\"call_model\", tools_condition)\n```\n\n资料来源：[README.md:1-100]()\n\n## See Also\n\n- [MultiServerMCPClient](../client) — Client for connecting to multiple MCP servers\n- [Tool Call Interceptors](../interceptors) — Middleware for tool call processing\n- [Resource Conversion](../resources) — Converting MCP resources to LangChain Blobs\n\n---\n\n<a id='page-multiserver-client'></a>\n\n## MultiServerMCPClient\n\n### 相关页面\n\n相关主题：[Tool Conversion](#page-tool-conversion), [Transport Types](#page-transport-types), [Callbacks](#page-callbacks)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n</details>\n\n# MultiServerMCPClient\n\nThe `MultiServerMCPClient` is the primary entry point for connecting LangChain applications to multiple Model Context Protocol (MCP) servers. It provides a unified interface to manage connections, load tools, resources, and prompts from various MCP server implementations.\n\n## Overview\n\n`MultiServerMCPClient` serves as a central client that abstracts the complexity of connecting to multiple MCP servers simultaneously. It handles session management, tool conversion, and integrates seamlessly with LangChain and LangGraph agents.\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B[Connection Manager]\n    B --> C[StdioConnection]\n    B --> D[SSEConnection]\n    B --> E[StreamableHttpConnection]\n    B --> F[WebsocketConnection]\n    G[load_mcp_tools] --> H[LangChain Tools]\n    I[load_mcp_resources] --> J[LangChain Blobs]\n    K[load_mcp_prompts] --> L[LangChain Messages]\n```\n\n## Initialization\n\n### Constructor Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `connections` | `dict[str, Connection] \\| None` | `None` | Mapping of server names to connection configurations |\n| `callbacks` | `Callbacks \\| None` | `None` | Optional callbacks for notifications and events |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | `None` | Optional interceptors for modifying tool requests/responses |\n| `tool_name_prefix` | `bool` | `False` | Prefix tool names with server name to avoid conflicts |\n\n### Connection Configuration\n\nEach server in the `connections` dictionary requires a transport-specific configuration:\n\n| Transport | Required Parameters |\n|-----------|---------------------|\n| `stdio` | `command`, `args` |\n| `http` | `url` |\n| `sse` | `url` |\n| `streamable_http` | `url` |\n| `websocket` | `url` |\n\n**资料来源**：[client.py:51-76]()\n\n## Connection Types\n\nThe library supports multiple transport protocols for connecting to MCP servers.\n\n### StdioConnection\n\nUsed for spawning local MCP server processes via standard I/O.\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        }\n    }\n)\n```\n\n**资料来源**：[README.md:82-90]()\n\n### HTTP/Streamable HTTP Connection\n\nUsed for connecting to HTTP-based MCP servers, including stateless streamable HTTP servers.\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\n```\n\n**资料来源**：[README.md:37-45]()\n\n### WebSocket Connection\n\nFor WebSocket-based MCP server connections.\n\n### SSE Connection\n\nServer-Sent Events transport for MCP server communication.\n\n## Usage Patterns\n\n### Basic Usage with get_tools()\n\nThe simplest pattern starts a new session for each tool call:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\nall_tools = await client.get_tools()\n```\n\n**资料来源**：[client.py:51-74]()\n\n### Explicit Session Management\n\nFor more control, use explicit session management:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nclient = MultiServerMCPClient({...})\nasync with client.session(\"math\") as session:\n    tools = await load_mcp_tools(session)\n```\n\n**资料来源**：[client.py:75-81]()\n\n### With LangGraph StateGraph\n\nIntegration with LangGraph for agent-based workflows:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langgraph.graph import StateGraph, MessagesState, START\nfrom langgraph.prebuilt import ToolNode, tools_condition\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-4.1\")\n\nclient = MultiServerMCPClient({...})\ntools = await client.get_tools()\n\ndef call_model(state: MessagesState):\n    response = model.bind_tools(tools).invoke(state[\"messages\"])\n    return {\"messages\": response}\n\nbuilder = StateGraph(MessagesState)\nbuilder.add_node(call_model)\nbuilder.add_node(ToolNode(tools))\nbuilder.add_edge(START, \"call_model\")\nbuilder.add_conditional_edges(\"call_model\", tools_condition)\n```\n\n**资料来源**：[README.md:103-126]()\n\n## Tool Name Prefixing\n\nWhen `tool_name_prefix=True`, tool names are prefixed with the server name using an underscore separator:\n\n```python\n# With prefix: \"weather_search\"\n# Without prefix: \"search\"\nclient = MultiServerMCPClient(\n    {...},\n    tool_name_prefix=True\n)\n```\n\nThis helps avoid conflicts when multiple servers expose tools with identical names.\n\n**资料来源**：[client.py:48-51]()\n\n## Runtime Headers\n\nFor HTTP and SSE transports, you can pass custom headers for authentication or tracing:\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"weather\": {\n            \"transport\": \"http\",\n            \"url\": \"http://localhost:8000/mcp\",\n            \"headers\": {\n                \"Authorization\": \"Bearer YOUR_TOKEN\",\n                \"X-Custom-Header\": \"custom-value\"\n            },\n        }\n    }\n)\n```\n\n> Only `sse` and `http` transports support runtime headers.\n\n**资料来源**：[README.md:129-152]()\n\n## Tool Interceptors\n\nTool call interceptors allow you to modify requests and responses in an onion-pattern chain:\n\n```python\nfrom langchain_mcp_adapters.interceptors import (\n    MCPToolCallRequest,\n    MCPToolCallResult,\n    ToolCallInterceptor\n)\n\nclass CustomInterceptor(ToolCallInterceptor):\n    async def intercept(\n        self, request: MCPToolCallRequest, handler\n    ) -> MCPToolCallResult:\n        # Modify request\n        modified_request = request.override(args={\"modified\": True})\n        # Process and potentially modify response\n        result = await handler(modified_request)\n        return result\n\nclient = MultiServerMCPClient(\n    {...},\n    tool_interceptors=[CustomInterceptor()]\n)\n```\n\n**资料来源**：[interceptors.py:1-55]()\n\n## MCPToolArtifact\n\nTool call results that include structured content are wrapped in an `MCPToolArtifact`:\n\n```python\nclass MCPToolArtifact(TypedDict):\n    \"\"\"Artifact returned from MCP tool calls.\n    \n    Attributes:\n        structured_content: The structured content returned by the MCP tool,\n            corresponding to the structuredContent field in CallToolResult.\n    \"\"\"\n    structured_content: dict[str, Any]\n```\n\n**资料来源**：[tools.py:70-84]()\n\n## Content Conversion\n\nThe library automatically converts MCP content blocks to LangChain content blocks:\n\n| MCP Type | LangChain Type |\n|----------|----------------|\n| `TextContent` | `{\"type\": \"text\", \"text\": ...}` |\n| `ImageContent` | `{\"type\": \"image\", ...}` |\n| `FileContentBlock` | `{\"type\": \"file\", ...}` |\n| `ResourceLink` | `{\"type\": \"image\"}` or `{\"type\": \"file\"}` |\n| `EmbeddedResource` | `{\"type\": \"text\"}`, `{\"type\": \"image\"}`, or `{\"type\": \"file\"}` |\n| `AudioContent` | `NotImplementedError` |\n\n**资料来源**：[tools.py:86-126]()\n\n## Limitations\n\n### Async Context Manager Deprecation\n\nAs of version 0.1.0, `MultiServerMCPClient` cannot be used as an async context manager:\n\n```python\n# This is NOT allowed:\n# async with MultiServerMCPClient(...) as client:\n#     ...\n\n# Instead use:\nclient = MultiServerMCPClient(...)\ntools = await client.get_tools()\n```\n\n**资料来源**：[client.py:55-68]()\n\n## Architecture\n\n```mermaid\nsequenceDiagram\n    participant Client as MultiServerMCPClient\n    participant Session as ClientSession\n    participant Loader as load_mcp_tools\n    participant Converter as Content Converter\n    participant LC as LangChain Tool\n\n    Client->>Session: create_session()\n    Session->>Loader: session.list_tools()\n    Loader->>Session: tool definitions\n    Session-->>Converter: Tool metadata\n    Converter->>LC: StructuredTool\n    LC-->>Client: BaseTool list\n```\n\n## See Also\n\n- [load_mcp_tools()](langchain_mcp_adapters/tools.md) - Loading and converting MCP tools\n- [load_mcp_resources()](langchain_mcp_adapters/resources.md) - Loading MCP resources as Blobs\n- [load_mcp_prompts()](langchain_mcp_adapters/prompts.md) - Loading MCP prompts as Messages\n- [ToolCallInterceptor](langchain_mcp_adapters/interceptors.md) - Intercepting tool calls\n\n---\n\n<a id='page-transport-types'></a>\n\n## Transport Types\n\n### 相关页面\n\n相关主题：[MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n</details>\n\n# Transport Types\n\nLangChain MCP Adapters supports multiple transport types for connecting to MCP (Model Context Protocol) servers. Transport types define the communication mechanism used between the client and server, enabling flexibility in different deployment scenarios.\n\n## Overview\n\nTransport types in langchain-mcp-adapters determine how MCP client sessions communicate with MCP servers. The library provides native support for four primary transport mechanisms, each suited for different use cases ranging from local development to production deployments.\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B{Transport Type}\n    B --> C[stdio]\n    B --> D[http]\n    B --> E[sse]\n    B --> F[websocket]\n    \n    C --> G[Local/Subprocess]\n    D --> H[HTTP Server]\n    E --> I[HTTP + SSE Events]\n    F --> J[WebSocket Server]\n    \n    G --> K[StdioServerParameters]\n    H --> L[URL + Headers]\n    I --> M[URL + Headers]\n    J --> N[URL + Headers]\n```\n\n资料来源：[langchain_mcp_adapters/client.py:1-50]()\n\n## Supported Transport Types\n\n| Transport | Use Case | Session Creation | Header Support | Timeout Config |\n|-----------|----------|------------------|----------------|----------------|\n| `stdio` | Local subprocesses, development | In-process via stdin/stdout | N/A | Encoding handlers |\n| `http` | Remote HTTP servers, stateless | Streamable HTTP client | ✅ | Request timeout |\n| `sse` | Server-Sent Events servers | HTTP + SSE endpoint | ✅ | SSE read timeout |\n| `websocket` | Real-time bidirectional | WebSocket connection | ✅ | Connection timeout |\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-100]()\n\n## Stdio Transport\n\nThe `stdio` transport uses standard input/output streams for communication. This is ideal for running MCP servers as local subprocesses or when the server runs on the same machine as the client.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `command` | `str` | ✅ | Executable command (e.g., `\"python\"`, `\"node\"`) |\n| `args` | `list[str]` | ❌ | Command-line arguments |\n| `env` | `dict[str, str]` | ❌ | Environment variables |\n| `cwd` | `str` | ❌ | Working directory |\n| `encoding` | `str` | ❌ | Character encoding (default: system default) |\n| `encoding_error_handler` | `str` | ❌ | How to handle encoding errors |\n| `session_kwargs` | `dict` | ❌ | Additional `ClientSession` arguments |\n\n资料来源：[langchain_mcp_adapters/sessions.py:60-90]()\n\n### Environment Variable Expansion\n\nThe `env` parameter supports environment variable expansion in variable values:\n\n```python\nenv = {\n    \"API_KEY\": \"${MY_API_KEY}\",  # Expands from current environment\n    \"STATIC\": \"custom-value\"     # Passed through unchanged\n}\n```\n\nVariable references use the pattern `${VAR_NAME}`. Only values (not keys) are expanded. Unexpanded references trigger a warning.\n\n资料来源：[langchain_mcp_adapters/sessions.py:80-85]()\n\n### Example: Stdio Connection\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"/path/to/math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\ntools = await client.get_tools()\n```\n\n资料来源：[README.md:80-100]()\n\n## HTTP Transport\n\nThe `http` transport connects to MCP servers via HTTP protocol. This is designed for remote server deployments and supports stateless request/response patterns.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `url` | `str` | ✅ | Full URL to the MCP server endpoint |\n| `headers` | `dict[str, str]` | ❌ | HTTP headers sent with each request |\n| `timeout` | `float` | ❌ | Request timeout in seconds (default: `60.0`) |\n\n### Header Support\n\nHTTP transport supports runtime headers, enabling dynamic authentication and authorization:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient({\n    \"weather\": {\n        \"url\": \"http://localhost:8000/mcp\",\n        \"transport\": \"http\",\n        \"headers\": {\n            \"Authorization\": \"Bearer ${API_TOKEN}\",\n            \"X-Custom-Header\": \"custom-value\"\n        }\n    }\n})\n```\n\n> Only `sse` and `http` transports support runtime headers.\n\n资料来源：[README.md:110-130]()\n\n### Example: HTTP Connection\n\n```bash\n# Start a streamable HTTP server\ncd examples/servers/streamable-http-stateless/\nuv run mcp-simple-streamablehttp-stateless --port 3000\n```\n\n```python\nfrom mcp import ClientSession\nfrom mcp.client.streamable_http import streamablehttp_client\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nasync with streamablehttp_client(\"http://localhost:3000/mcp\") as (read, write, _):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        tools = await load_mcp_tools(session)\n```\n\n资料来源：[README.md:35-55]()\n\n## SSE Transport\n\nSSE (Server-Sent Events) transport combines HTTP requests with server-side event streaming. This is useful when the MCP server needs to push updates or progress notifications to the client.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `url` | `str` | ✅ | Full URL to the MCP server SSE endpoint |\n| `headers` | `dict[str, str]` | ❌ | HTTP headers sent with each request |\n| `sse_read_timeout` | `float` | ❌ | SSE read timeout in seconds (default: `300.0`) |\n| `timeout` | `float` | ❌ | HTTP request timeout (default: `60.0`) |\n\n### Progress Callbacks\n\nSSE transport enables progress callback functionality through the MCP client callbacks system:\n\n```python\nfrom langchain_mcp_adapters.callbacks import Callbacks, CallbackContext\n\nclass CustomCallbacks(Callbacks):\n    async def progress_callback(self, progress_token: str, progress: dict) -> None:\n        print(f\"Progress: {progress}\")\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:180-200]()\n\n## WebSocket Transport\n\nWebSocket transport provides bidirectional real-time communication between the client and MCP server. This is suitable for applications requiring low-latency, persistent connections.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `url` | `str` | ✅ | WebSocket endpoint URL |\n| `headers` | `dict[str, str]` | ❌ | WebSocket handshake headers |\n| `timeout` | `float` | ❌ | Connection timeout |\n\n## Connection Factory\n\nThe `Connection` abstract class defines the common interface for all transport implementations:\n\n```mermaid\nclassDiagram\n    class Connection {\n        <<abstract>>\n        +session_kwarg: dict\n        +server_name: str\n        +get_session() ClientSession\n    }\n    \n    class StdioConnection {\n        +command: str\n        +args: list\n        +env: dict\n        +get_session() ClientSession\n    }\n    \n    class StreamableHttpConnection {\n        +url: str\n        +headers: dict\n        +timeout: float\n        +get_session() ClientSession\n    }\n    \n    class SSEConnection {\n        +url: str\n        +headers: dict\n        +timeout: float\n        +sse_read_timeout: float\n        +get_session() ClientSession\n    }\n    \n    class WebsocketConnection {\n        +url: str\n        +headers: dict\n        +timeout: float\n        +get_session() ClientSession\n    }\n    \n    Connection <|-- StdioConnection\n    Connection <|-- StreamableHttpConnection\n    Connection <|-- SSEConnection\n    Connection <|-- WebsocketConnection\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-50]()\n\n## Session Creation\n\nAll transport types ultimately create an `MCP.ClientSession` for tool execution:\n\n```python\nfrom langchain_mcp_adapters.sessions import create_session\n\n# Direct session creation\nasync with create_session(connection) as session:\n    tools = await load_mcp_tools(session)\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-30]()\n\n### MultiServerMCPClient Session Management\n\n```python\n# Explicitly starting a session\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"/path/to/math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\n\nasync with client.session(\"math\") as session:\n    tools = await load_mcp_tools(session)\n```\n\n> MultiServerMCPClient cannot be used as a context manager directly. Use `client.session(server_name)` for explicit session control.\n\n资料来源：[langchain_mcp_adapters/client.py:1-60]()\n\n## Tool Name Prefixing\n\nWhen using multiple servers with overlapping tool names, enable the `tool_name_prefix` option to avoid conflicts:\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\"transport\": \"stdio\", ...},\n        \"weather\": {\"transport\": \"http\", \"url\": \"http://localhost:8000/mcp\"}\n    },\n    tool_name_prefix=True  # Enables prefixed tool names\n)\ntools = await client.get_tools()\n# Tool names: \"math_add\", \"weather_search\" (prefixed with server name)\n```\n\n资料来源：[langchain_mcp_adapters/client.py:30-45]()\n\n## Transport Selection Guide\n\n```mermaid\ngraph TD\n    A[Select Transport] --> B{Deployment Type}\n    \n    B --> C[Local/Subprocess]\n    C --> D[Use stdio]\n    \n    B --> E[Remote Server]\n    E --> F{Need Real-time Events?}\n    \n    F --> G[Yes]\n    G --> H[Use websocket]\n    \n    F --> I[No]\n    I --> J{HTTP/1.1 or Streaming?}\n    \n    J --> K[Streaming/SSE]\n    K --> L[Use sse]\n    \n    J --> M[Request/Response]\n    M --> N[Use http]\n```\n\n### Decision Matrix\n\n| Scenario | Recommended Transport |\n|----------|----------------------|\n| Development, local testing | `stdio` |\n| Production HTTP API | `http` |\n| Server pushing events to client | `sse` |\n| Bidirectional, low-latency needs | `websocket` |\n| Fire-and-forget subprocess | `stdio` |\n\n## Timeout Configuration\n\n### Default Timeouts\n\n| Transport | Parameter | Default Value |\n|-----------|-----------|---------------|\n| HTTP | `timeout` | `60.0` seconds |\n| SSE | `timeout` | `60.0` seconds |\n| SSE | `sse_read_timeout` | `300.0` seconds |\n| WebSocket | `timeout` | Connection timeout |\n\n### Custom Timeout Example\n\n```python\nfrom langchain_mcp_adapters.sessions import StreamableHttpConnection\n\nconnection = StreamableHttpConnection(\n    url=\"http://localhost:8000/mcp\",\n    timeout=120.0,  # 2 minute request timeout\n)\n```\n\n## Error Handling\n\nTransport-specific errors may occur during session creation or tool execution:\n\n### Stdio Transport Errors\n\n- **Process startup failure**: Check `command` path and permissions\n- **Encoding errors**: Configure `encoding` and `encoding_error_handler`\n\n### HTTP/SSE/WebSocket Transport Errors\n\n- **Connection timeout**: Increase `timeout` parameter\n- **SSE read timeout**: Increase `sse_read_timeout` for long-running operations\n- **Header authentication failures**: Verify header format and token validity\n\n## See Also\n\n- [MultiServerMCPClient](langchain_mcp_adapters/client.py) - Multi-server connection management\n- [load_mcp_tools](langchain_mcp_adapters/tools.py) - Tool loading with transport\n- [Callbacks System](langchain_mcp_adapters/callbacks.py) - Progress and notification handling\n\n---\n\n<a id='page-callbacks'></a>\n\n## Callbacks\n\n### 相关页面\n\n相关主题：[Tool Call Interceptors](#page-interceptors), [MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/callbacks.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/callbacks.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n</details>\n\n# Callbacks\n\nThe Callbacks system in `langchain-mcp-adapters` provides a mechanism for handling notifications, events, and progress updates during MCP tool execution. It acts as a bridge between the LangChain callback format and the MCP (Model Context Protocol) callback format, enabling developers to intercept and respond to tool call lifecycle events.\n\n## Overview\n\nWhen working with MCP tools through `langchain-mcp-adapters`, callbacks serve several critical purposes:\n\n- **Progress Notification**: Track long-running tool operations via progress callbacks\n- **Event Handling**: Respond to notifications and events from the MCP server\n- **Context Propagation**: Maintain context about which server and tool is being executed\n- **Lifecycle Integration**: Integrate with LangChain's callback system for broader ecosystem compatibility\n\nThe callback system is primarily used in two contexts:\n1. When loading MCP tools via `load_mcp_tools()` or `convert_mcp_tool_to_langchain_tool()`\n2. When configuring the `MultiServerMCPClient` for multi-server tool aggregation\n\n资料来源：[langchain_mcp_adapters/tools.py:1-30]()\n\n## Core Components\n\n### CallbackContext\n\nThe `CallbackContext` class provides context information about an ongoing tool call operation.\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `server_name` | `str \\| None` | Name of the MCP server handling the tool call |\n| `tool_name` | `str \\| None` | Name of the tool being executed |\n\n资料来源：[langchain_mcp_adapters/callbacks.py]()\n资料来源：[langchain_mcp_adapters/tools.py:55-62]()\n\n### Callbacks Class\n\nThe `Callbacks` class is the main abstraction for handling MCP events. It provides the interface that developers implement to receive notifications.\n\n```python\nclass Callbacks:\n    \"\"\"Handler for MCP notifications and events.\"\"\"\n    \n    def to_mcp_format(self, context: CallbackContext) -> _MCPCallbacks:\n        \"\"\"Convert to MCP-compatible callback format.\"\"\"\n        ...\n```\n\n资料来源：[langchain_mcp_adapters/callbacks.py]()\n资料来源：[langchain_mcp_adapters/tools.py:63-68]()\n\n### _MCPCallbacks Class\n\nThe internal `_MCPCallbacks` class wraps callbacks in the format expected by the MCP SDK.\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `progress_callback` | `Callable \\| None` | Callback for progress updates during tool execution |\n\n资料来源：[langchain_mcp_adapters/callbacks.py]()\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B[Callbacks Instance]\n    A --> C[load_mcp_tools]\n    B --> D[to_mcp_format]\n    D --> E[_MCPCallbacks]\n    E --> F[session.call_tool]\n    C --> G[CallbackContext]\n    G --> D\n    \n    H[MCP Server] --> I[Progress Updates]\n    I --> F\n```\n\n## Usage Patterns\n\n### Basic Usage with MultiServerMCPClient\n\nThe most common pattern is to pass a `Callbacks` instance to the `MultiServerMCPClient`:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.callbacks import Callbacks, CallbackContext\n\nclass MyCallbacks(Callbacks):\n    def to_mcp_format(self, context: CallbackContext) -> _MCPCallbacks:\n        # Custom callback handling\n        return _MCPCallbacks(progress_callback=self.on_progress)\n\n    async def on_progress(self, progress: float, total: float | None, message: str | None):\n        print(f\"Progress: {progress}/{total} - {message}\")\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n    },\n    callbacks=MyCallbacks()\n)\n```\n\n资料来源：[langchain_mcp_adapters/client.py:40-60]()\n\n### Usage with load_mcp_tools\n\nCallbacks can also be passed directly when loading tools from a session:\n\n```python\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nasync with streamablehttp_client(\"http://localhost:3000/mcp\") as (read, write, _):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        \n        tools = await load_mcp_tools(\n            session,\n            callbacks=MyCallbacks(),\n            server_name=\"math_server\"\n        )\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:100-135]()\n\n### Usage with Tool Interceptors\n\nCallbacks work alongside tool interceptors for advanced control over tool execution:\n\n```python\nfrom langchain_mcp_adapters.interceptors import ToolCallInterceptor, MCPToolCallRequest, MCPToolCallResult\n\nclass LoggingInterceptor(ToolCallInterceptor):\n    async def intercept(\n        self, \n        request: MCPToolCallRequest, \n        call_next: Callable\n    ) -> MCPToolCallResult:\n        print(f\"Calling tool: {request.name}\")\n        result = await call_next(request)\n        print(f\"Tool result: {result}\")\n        return result\n\nclient = MultiServerMCPClient(\n    {...},\n    callbacks=MyCallbacks(),\n    tool_interceptors=[LoggingInterceptor()]\n)\n```\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-50]()\n\n## Callback Flow in Tool Execution\n\n```mermaid\nsequenceDiagram\n    participant Client as MCP Client\n    participant Callbacks as Callbacks Handler\n    participant Session as ClientSession\n    participant Server as MCP Server\n    \n    Client->>Callbacks: to_mcp_format(context)\n    Callbacks-->>Client: _MCPCallbacks\n    Client->>Session: call_tool(name, args, progress_callback)\n    Session->>Server: Execute Tool\n    Server-->>Session: Progress Update\n    Session->>Callbacks: progress_callback\n    Server-->>Session: Tool Result\n    Session-->>Client: CallToolResult\n```\n\n## CallbackContext Construction\n\nThe `CallbackContext` is constructed with server and tool information at different points in the execution flow:\n\n| Function | Context Construction |\n|----------|---------------------|\n| `load_mcp_tools()` | Uses `server_name` from parameters |\n| `convert_mcp_tool_to_langchain_tool()` | Uses both `server_name` and `tool.name` |\n| `MultiServerMCPClient` | Passed through to all tool loading operations |\n\n资料来源：[langchain_mcp_adapters/tools.py:70-80]()\n\n## Error Handling\n\nWhen callbacks are not provided, the system uses a default `_MCPCallbacks()` instance:\n\n```python\nmcp_callbacks = (\n    callbacks.to_mcp_format(context=CallbackContext(server_name=server_name, tool_name=tool.name))\n    if callbacks is not None\n    else _MCPCallbacks()\n)\n```\n\nThis ensures that tool execution continues normally even without custom callback handling.\n\n资料来源：[langchain_mcp_adapters/tools.py:70-75]()\n\n## Integration with Tool Result Conversion\n\nCallbacks are passed through the entire tool execution chain and are used when converting tool results back to LangChain format:\n\n```python\nasync def call_tool(...) -> tuple[ConvertedToolResult, MCPToolArtifact | None]:\n    mcp_callbacks = (\n        callbacks.to_mcp_format(\n            context=CallbackContext(server_name=server_name, tool_name=tool.name)\n        )\n        if callbacks is not None\n        else _MCPCallbacks()\n    )\n    \n    # Execute with progress callback\n    call_tool_result = await session.call_tool(\n        tool_name,\n        tool_args,\n        progress_callback=mcp_callbacks.progress_callback,\n    )\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:55-70]()\n\n## API Reference\n\n### Callbacks Class\n\n```python\nclass Callbacks:\n    \"\"\"Base class for handling MCP notifications and events.\"\"\"\n    \n    def to_mcp_format(self, context: CallbackContext) -> _MCPCallbacks:\n        \"\"\"Convert the callbacks to MCP-compatible format.\n        \n        Args:\n            context: The callback context containing server and tool info.\n            \n        Returns:\n            An _MCPCallbacks instance configured with appropriate handlers.\n        \"\"\"\n        ...\n```\n\n### _MCPCallbacks Class\n\n```python\n@dataclass\nclass _MCPCallbacks:\n    \"\"\"Internal MCP-compatible callbacks wrapper.\"\"\"\n    \n    progress_callback: Callable | None = None\n```\n\n### CallbackContext Class\n\n```python\n@dataclass\nclass CallbackContext:\n    \"\"\"Context information for callback handlers.\"\"\"\n    \n    server_name: str | None = None\n    tool_name: str | None = None\n```\n\n## Best Practices\n\n1. **Always provide context**: When constructing `CallbackContext`, include both `server_name` and `tool_name` for maximum observability.\n\n2. **Handle None gracefully**: The callback system is designed to work without callbacks, so ensure your code handles the default case.\n\n3. **Combine with interceptors**: For comprehensive tool call control, combine callbacks with tool interceptors.\n\n4. **Thread-safe progress updates**: Progress callbacks may be called from different tasks; ensure your handler is thread-safe or async-safe.\n\n5. **Resource cleanup**: When using callbacks that allocate resources, ensure proper cleanup in the client lifecycle.\n\n## Summary\n\nThe Callbacks system in `langchain-mcp-adapters` provides a clean abstraction for handling MCP tool lifecycle events. By implementing the `Callbacks` class and its `to_mcp_format()` method, developers can:\n\n- Monitor tool execution progress\n- Handle notifications from MCP servers\n- Integrate with LangChain's callback ecosystem\n- Build custom logging, monitoring, and error handling for MCP tool calls\n\nThe system is designed to be optional—tools work with default callbacks when none are provided—while providing rich customization when needed.\n\n---\n\n<a id='page-interceptors'></a>\n\n## Tool Call Interceptors\n\n### 相关页面\n\n相关主题：[Callbacks](#page-callbacks), [Tool Conversion](#page-tool-conversion)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n</details>\n\n# Tool Call Interceptors\n\n## Overview\n\nTool Call Interceptors provide a mechanism to wrap and control MCP tool call execution in the langchain-mcp-adapters library. They enable developers to inject custom logic before and after tool calls, modify request parameters, handle responses, and implement cross-cutting concerns like logging, authentication, and caching.\n\nThe interceptor system follows the **onion pattern** (also known as decorator pattern or chain of responsibility), where each interceptor wraps the next one, allowing pre-processing and post-processing of tool calls in a composable way.\n\n## Architecture\n\n### High-Level Flow\n\n```mermaid\ngraph TD\n    A[External Code] --> B[Interceptor Chain]\n    B --> C[Interceptor 1]\n    C --> D[Interceptor 2]\n    D --> E[...]\n    E --> F[execute_tool]\n    F --> G[MCP ClientSession.call_tool]\n    \n    subgraph \"Onion Layers (wrapping inward)\"\n        B\n        C\n        D\n        E\n    end\n```\n\n### Component Diagram\n\n```mermaid\nclassDiagram\n    class MCPToolCallRequest {\n        +str name\n        +dict args\n        +str server_name\n        +dict headers\n        +object runtime\n        +override() MCPToolCallRequest\n    }\n    \n    class MCPToolCallResult {\n        <<Type Alias>>\n        CallToolResult | ToolMessage | Command\n    }\n    \n    class ToolCallInterceptor {\n        <<Protocol>>\n        +async __call__(request, handler) MCPToolCallResult\n    }\n    \n    class _build_interceptor_chain {\n        +build_composed_handler()\n    }\n    \n    MCPToolCallRequest --> ToolCallInterceptor : passed to\n    _build_interceptor_chain --> ToolCallInterceptor : composes\n```\n\n## Core Data Models\n\n### MCPToolCallRequest\n\nRepresents a tool execution request passed to MCP tool call interceptors. Follows a flat namespace pattern rather than separating call data and context into nested objects.\n\n| Field | Type | Modifiable | Description |\n|-------|------|-------------|-------------|\n| `name` | `str` | Yes | Tool name to invoke |\n| `args` | `dict[str, Any]` | Yes | Tool arguments as key-value pairs |\n| `server_name` | `str` | No | Name of the MCP server handling the tool |\n| `headers` | `dict[str, Any] \\| None` | Yes | HTTP headers for applicable transports (SSE, HTTP) |\n| `runtime` | `object \\| None` | No | LangGraph runtime context (if any) |\n\n资料来源：[interceptors.py:58-74]()\n\n#### The `override()` Method\n\nThe `MCPToolCallRequest` class provides an immutable `override()` method that returns a new instance with specified attributes replaced:\n\n```python\ndef override(\n    self, **overrides: Unpack[_MCPToolCallRequestOverrides]\n) -> MCPToolCallRequest:\n```\n\nThis follows an immutable pattern, leaving the original request unchanged.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `name` | `str` | Tool name (optional) |\n| `args` | `dict[str, Any]` | Tool arguments (optional) |\n| `headers` | `dict[str, Any] \\| None` | HTTP headers (optional) |\n\n### MCPToolCallResult\n\nA type alias representing the possible return types from an interceptor:\n\n| Type | Description |\n|------|-------------|\n| `CallToolResult` | MCP protocol result (standard MCP format) |\n| `ToolMessage` | LangChain format message |\n| `Command` | LangGraph Command (when langgraph is installed) |\n\n```python\nif LANGGRAPH_PRESENT:\n    MCPToolCallResult = CallToolResult | ToolMessage | Command\nelse:\n    MCPToolCallResult = CallToolResult | ToolMessage\n```\n\n资料来源：[interceptors.py:29-36]()\n\n## ToolCallInterceptor Protocol\n\nThe `ToolCallInterceptor` is a runtime-checkable protocol that defines the interface for interceptor implementations:\n\n```python\n@runtime_checkable\nclass ToolCallInterceptor(Protocol):\n    async def __call__(\n        self,\n        request: MCPToolCallRequest,\n        handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    ) -> MCPToolCallResult:\n        ...\n```\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `request` | `MCPToolCallRequest` | The tool call request to process |\n| `handler` | `Callable` | The next handler in the chain (call to continue execution) |\n| **Returns** | `MCPToolCallResult` | The result of processing |\n\n资料来源：[interceptors.py:42-49]()\n\n### Interceptor Pattern\n\nInterceptors work by:\n\n1. **Receiving** the `request` and the `handler` callable\n2. **Optionally** modifying the request before passing it on\n3. **Calling** the `handler` to continue the chain\n4. **Optionally** modifying the result before returning\n\n```python\nasync def my_interceptor(\n    request: MCPToolCallRequest,\n    handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n) -> MCPToolCallResult:\n    # Pre-processing: modify request\n    modified_request = request.override(args={**request.args, \"injected\": True})\n    \n    # Continue to next handler\n    result = await handler(modified_request)\n    \n    # Post-processing: modify result\n    # ... do something with result ...\n    \n    return result\n```\n\n## Building the Interceptor Chain\n\nThe `_build_interceptor_chain()` function composes multiple interceptors into a single handler using the onion pattern:\n\n```python\ndef _build_interceptor_chain(\n    base_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    tool_interceptors: list[ToolCallInterceptor] | None,\n) -> Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]]:\n```\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `base_handler` | `Callable` | Innermost handler that executes the actual tool call |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | List of interceptors to wrap around the handler |\n\n资料来源：[tools.py:145-147]()\n\n### Execution Order\n\nThe first interceptor in the list becomes the **outermost layer**, with subsequent interceptors wrapping inward. This means:\n\n1. Interceptor at index 0 executes **first** (outermost)\n2. Interceptor at index 1 executes **second**\n3. And so on...\n4. The `base_handler` (actual tool execution) executes **last** (innermost)\n\n```mermaid\ngraph LR\n    A[External Call] --> B[\"Interceptor[0]<br/>outermost\"]\n    B --> C[\"Interceptor[1]\"]\n    C --> D[\"Interceptor[2]\"]\n    D --> E[\"...\"]\n    E --> F[\"base_handler<br/>innermost\"]\n    F --> G[MCP call_tool]\n```\n\n## Usage\n\n### Loading Tools with Interceptors\n\nWhen loading MCP tools, you can provide a list of interceptors:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\n# Define your interceptor\nclass LoggingInterceptor:\n    async def __call__(self, request, handler):\n        print(f\"Calling tool: {request.name}\")\n        result = await handler(request)\n        print(f\"Tool {request.name} completed\")\n        return result\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"./math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\n\ntools = await client.get_tools(\n    tool_interceptors=[LoggingInterceptor()]\n)\n```\n\n资料来源：[tools.py:163-179]()\n\n### Individual Tool Conversion\n\nYou can also apply interceptors when converting individual tools:\n\n```python\nfrom langchain_mcp_adapters.tools import convert_mcp_tool_to_langchain_tool\n\ntool = convert_mcp_tool_to_langchain_tool(\n    session=session,\n    tool=mcp_tool,\n    tool_interceptors=[CustomInterceptor()],\n    server_name=\"my_server\",\n    tool_name_prefix=True\n)\n```\n\n### Using Runtime Context\n\nInterceptors have access to the `runtime` field, which contains LangGraph runtime context when used within a LangGraph graph:\n\n```python\nclass RuntimeAwareInterceptor:\n    async def __call__(self, request, handler):\n        if request.runtime:\n            # Access LangGraph runtime\n            pass\n        return await handler(request)\n```\n\n## Example Interceptors\n\n### Authentication Interceptor\n\n```python\nclass AuthInterceptor:\n    def __init__(self, api_key: str):\n        self.api_key = api_key\n    \n    async def __call__(self, request, handler):\n        # Inject auth headers\n        request = request.override(\n            headers={\"Authorization\": f\"Bearer {self.api_key}\"}\n        )\n        return await handler(request)\n```\n\n### Caching Interceptor\n\n```python\nfrom functools import lru_cache\n\nclass CacheInterceptor:\n    def __init__(self):\n        self.cache = {}\n    \n    async def __call__(self, request, handler):\n        cache_key = f\"{request.name}:{hash(frozenset(request.args.items()))}\"\n        \n        if cache_key in self.cache:\n            return self.cache[cache_key]\n        \n        result = await handler(request)\n        self.cache[cache_key] = result\n        return result\n```\n\n### Request Modification Interceptor\n\n```python\nclass DefaultArgsInterceptor:\n    def __init__(self, defaults: dict[str, Any]):\n        self.defaults = defaults\n    \n    async def __call__(self, request, handler):\n        # Merge defaults with provided args\n        merged_args = {**self.defaults, **request.args}\n        request = request.override(args=merged_args)\n        return await handler(request)\n```\n\n## API Reference\n\n### Functions\n\n#### `_build_interceptor_chain()`\n\n```python\ndef _build_interceptor_chain(\n    base_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    tool_interceptors: list[ToolCallInterceptor] | None,\n) -> Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]]:\n```\n\nBuilds a composed handler chain with interceptors in onion pattern.\n\n**Parameters:**\n\n| Name | Type | Description |\n|------|------|-------------|\n| `base_handler` | `Callable` | Innermost handler executing the actual tool call |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | Optional list of interceptors to wrap |\n\n**Returns:** Composed handler with all interceptors applied\n\n资料来源：[tools.py:145-175]()\n\n### Classes\n\n#### `MCPToolCallRequest`\n\n```python\n@dataclass\nclass MCPToolCallRequest:\n    name: str\n    args: dict[str, Any]\n    server_name: str\n    headers: dict[str, Any] | None = None\n    runtime: object | None = None\n```\n\n资料来源：[interceptors.py:58-74]()\n\n#### `ToolCallInterceptor`\n\n```python\n@runtime_checkable\nclass ToolCallInterceptor(Protocol):\n    async def __call__(\n        self,\n        request: MCPToolCallRequest,\n        handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    ) -> MCPToolCallResult:\n        ...\n```\n\n资料来源：[interceptors.py:42-49]()\n\n### Type Aliases\n\n#### `MCPToolCallResult`\n\n```python\nif LANGGRAPH_PRESENT:\n    MCPToolCallResult = CallToolResult | ToolMessage | Command\nelse:\n    MCPToolCallResult = CallToolResult | ToolMessage\n```\n\n资料来源：[interceptors.py:29-36]()\n\n## Best Practices\n\n1. **Always call the handler**: Interceptors should typically call `handler(request)` unless intentionally short-circuiting\n2. **Immutability**: Use `request.override()` to create modified requests instead of mutating the original\n3. **Error handling**: Wrap handler calls in try/except for proper error handling and logging\n4. **Order matters**: Place interceptors in the correct order as the first in the list is the outermost\n5. **Type hints**: Use type hints for better IDE support and type checking\n\n## Limitations\n\n- Interceptors cannot currently modify the `server_name` or `runtime` fields of `MCPToolCallRequest` as they are context fields\n- The interceptor system is designed for tool call interception; other MCP lifecycle events (like resource access) are not currently interceptable\n- Runtime headers are only supported for `sse` and `http` transports\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：langchain-ai/langchain-mcp-adapters\n\n摘要：发现 17 个潜在踩坑项，其中 3 个为 high/blocking；最高优先级：安装坑 - 来源证据：Prompts and Resources auto-discovery。\n\n## 1. 安装坑 · 来源证据：Prompts and Resources auto-discovery\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Prompts and Resources auto-discovery\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_bf1812b74caa4e989767a9307a8ffc16 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/62 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 2. 安装坑 · 来源证据：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a5093182914b4df0b7ad2cd560bacdf2 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/492 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 运行坑 · 来源证据：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_ac102050dd4841d6954559a3413e0b92 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/496 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 4. 安装坑 · 来源证据：langchain-mcp-adapters==0.2.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：langchain-mcp-adapters==0.2.2\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0c6ca0722ab046379d28ecf30f8d2bcf | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 5. 配置坑 · 来源证据：langchain-mcp-adapters==0.1.10\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：langchain-mcp-adapters==0.1.10\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_8b18dbf32ccd41e38b272a458f4040f5 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 6. 能力坑 · 来源证据：langchain-mcp-adapters==0.1.14\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：langchain-mcp-adapters==0.1.14\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_6727e0d698e54fc38d7c60e262978ac2 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 7. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | README/documentation is current enough for a first validation pass.\n\n## 8. 运行坑 · 来源证据：langchain-mcp-adapters==0.1.12\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：langchain-mcp-adapters==0.1.12\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e71a46a9e0374d139555a78f229b0469 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 9. 维护坑 · 来源证据：langchain-mcp-adapters==0.2.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_59483f9a6a16414c9087b1751fba8efc | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 10. 维护坑 · 来源证据：langchain-mcp-adapters==0.2.0a1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0a1\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4e7fcda1716948898295279af95f8f96 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 11. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | last_activity_observed missing\n\n## 12. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium\n\n## 13. 安全/权限坑 · 存在安全注意事项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：No sandbox install has been executed yet; downstream must verify before user use.\n- 对用户的影响：用户安装前需要知道权限边界和敏感操作。\n- 建议检查：转成明确权限清单和安全审查提示。\n- 防护动作：安全注意事项必须面向用户前置展示。\n- 证据：risks.safety_notes | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | No sandbox install has been executed yet; downstream must verify before user use.\n\n## 14. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium\n\n## 15. 安全/权限坑 · 来源证据：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_8c46dab4b6dd4a6e92c96af49ea47647 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/502 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 16. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | issue_or_pr_quality=unknown\n\n## 17. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | release_recency=unknown\n\n<!-- canonical_name: langchain-ai/langchain-mcp-adapters; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "langchain-mcp-adapters",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:929158279",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/langchain-ai/langchain-mcp-adapters"
        },
        {
          "evidence_id": "art_19ca1ab453cc40588fce6af16d6ad367",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/langchain-ai/langchain-mcp-adapters#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "langchain-mcp-adapters 说明书",
      "toc": [
        "https://github.com/langchain-ai/langchain-mcp-adapters 项目说明书",
        "目录",
        "Introduction",
        "Overview",
        "Architecture",
        "Supported Transports",
        "Tool Conversion Process",
        "Interceptor System",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "174c48e85c59d227f1b57c1c5dedd00c341e8086",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "pyproject.toml",
      "README.md",
      "uv.lock",
      "examples/servers/streamable-http-stateless/pyproject.toml",
      "examples/servers/streamable-http-stateless/README.md",
      "examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/server.py",
      "examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__init__.py",
      "examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# langchain-mcp-adapters - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 langchain-mcp-adapters 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **想在安装前理解开源项目价值和边界的用户**：当前证据主要来自项目文档。 证据：`README.md` Claim：`clm_0002` supported 0.86\n\n## 它能做什么\n\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n## 怎么开始\n\n- `pip install langchain-mcp-adapters` 证据：`README.md` Claim：`clm_0003` supported 0.86, `clm_0004` supported 0.86\n- `pip install langchain-mcp-adapters langgraph \"langchain[openai]\"` 证据：`README.md` Claim：`clm_0004` supported 0.86\n\n## 继续前判断卡\n\n- **当前建议**：需要管理员/安全审批\n- **为什么**：继续前可能涉及密钥、账号、外部服务或敏感上下文，建议先经过管理员或安全审批。\n\n### 30 秒判断\n\n- **现在怎么做**：需要管理员/安全审批\n- **最小安全下一步**：先跑 Prompt Preview；若涉及凭证或企业环境，先审批再试装\n- **先别相信**：工具权限边界不能在安装前相信。\n- **继续会触碰**：命令执行、本地环境或项目文件、环境变量 / API Key\n\n### 现在可以相信\n\n- **适合人群线索：想在安装前理解开源项目价值和边界的用户**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0002` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n- **存在 Quick Start / 安装命令线索**（supported）：可以相信项目文档出现过启动或安装入口；不要因此直接在主力环境运行。 证据：`README.md` Claim：`clm_0003` supported 0.86, `clm_0004` supported 0.86\n\n### 现在还不能相信\n\n- **工具权限边界不能在安装前相信。**（unverified）：MCP/tool 类项目通常会触碰文件、网络、浏览器或外部 API，必须真实检查权限和日志。\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n- **安装命令是否需要网络、权限或全局写入？**（unverified）：这影响企业环境和个人环境的安装风险。 证据：`README.md`\n\n### 继续会触碰什么\n\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`README.md`\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`README.md`\n- **环境变量 / API Key**：项目入口文档明确出现 API key、token、secret 或账号凭证配置。 原因：如果真实安装需要凭证，应先使用测试凭证并经过权限/合规判断。 证据：`README.md`\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：用安装前交互式试用判断工作方式是否匹配，不需要授权或改环境。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **不要使用真实生产凭证**：环境变量/API key 一旦进入宿主或工具链，可能产生账号和合规风险。（适用：出现 API、TOKEN、KEY、SECRET 等环境线索时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **准备撤销测试 API key 或 token**：测试凭证泄露或误用时，可以快速止损。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0005` inferred 0.45\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`README.md` Claim：`clm_0006` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n### 上下文规模\n\n- 文件总数：42\n- 重要文件覆盖：20/42\n- 证据索引条目：18\n- 角色 / Skill 条目：3\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 langchain-mcp-adapters 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 langchain-mcp-adapters 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 langchain-mcp-adapters 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 3 个角色 / Skill / 项目文档条目。\n\n- **LangChain MCP Adapters**（project_doc）：This library provides a lightweight wrapper that makes Anthropic Model Context Protocol MCP https://modelcontextprotocol.io/introduction tools compatible with LangChain https://github.com/langchain-ai/langchain and LangGraph https://github.com/langchain-ai/langgraph . 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`README.md`\n- **MCP Simple StreamableHttp Stateless Server Example**（project_doc）：MCP Simple StreamableHttp Stateless Server Example 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`examples/servers/streamable-http-stateless/README.md`\n- **Security Policy**（project_doc）：For any other security concerns, please contact us at security@langchain.dev . 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`SECURITY.md`\n\n## 证据索引\n\n- 共索引 18 条证据。\n\n- **LangChain MCP Adapters**（documentation）：This library provides a lightweight wrapper that makes Anthropic Model Context Protocol MCP https://modelcontextprotocol.io/introduction tools compatible with LangChain https://github.com/langchain-ai/langchain and LangGraph https://github.com/langchain-ai/langgraph . 证据：`README.md`\n- **MCP Simple StreamableHttp Stateless Server Example**（documentation）：MCP Simple StreamableHttp Stateless Server Example 证据：`examples/servers/streamable-http-stateless/README.md`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`LICENSE`\n- **Security Policy**（documentation）：For any other security concerns, please contact us at security@langchain.dev . 证据：`SECURITY.md`\n- **Please see the documentation for all configuration options:**（source_file）：Please see the documentation for all configuration options: https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates and https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 证据：`.github/dependabot.yml`\n- **Pyenv**（source_file）：Byte-compiled / optimized / DLL files pycache / .py cod $py.class 证据：`.gitignore`\n- **Default target executed when no arguments are given to make.**（source_file）：Default target executed when no arguments are given to make. all: help 证据：`Makefile`\n- **Init**（source_file）：\"\"\"LangChain MCP Adapters - Connect MCP servers with LangChain applications. 证据：`langchain_mcp_adapters/__init__.py`\n- **Type aliases to avoid direct MCP type dependencies**（source_file）：from dataclasses import dataclass from typing import Protocol, runtime checkable 证据：`langchain_mcp_adapters/callbacks.py`\n- **Make sure to update to the full absolute path to your**（source_file）：\"\"\"Client for connecting to multiple MCP servers and loading LC tools/resources. 证据：`langchain_mcp_adapters/client.py`\n- **langgraph installed**（source_file）：\"\"\"Interceptor interfaces and types for MCP client tool call lifecycle management. 证据：`langchain_mcp_adapters/interceptors.py`\n- **Prompts**（source_file）：\"\"\"Prompts adapter for converting MCP prompts to LangChain messages https://docs.langchain.com/oss/python/langchain/messages . 证据：`langchain_mcp_adapters/prompts.py`\n- **Resources**（source_file）：\"\"\"Resources adapter for converting MCP resources to LangChain Blob objects langchain core.documents.base.Blob . 证据：`langchain_mcp_adapters/resources.py`\n- **Create and store the connection**（source_file）：\"\"\"Session management for different MCP transport types. 证据：`langchain_mcp_adapters/sessions.py`\n- **langgraph installed**（source_file）：\"\"\"Tools adapter for converting MCP tools to LangChain tools. 证据：`langchain_mcp_adapters/tools.py`\n- **-ra: Report all extra test outcomes passed, skipped, failed, etc.**（source_file）：build-system requires = \"pdm-backend\" build-backend = \"pdm.backend\" 证据：`pyproject.toml`\n- **Wait for server to be running**（source_file）：import multiprocessing import socket import time from collections.abc import Generator 证据：`tests/conftest.py`\n- **Helper for matching auto-generated LangChain content block IDs**（source_file）：import contextlib import multiprocessing import socket import time from collections.abc import Generator 证据：`tests/utils.py`\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`README.md`, `examples/servers/streamable-http-stateless/README.md`, `LICENSE`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`README.md`, `examples/servers/streamable-http-stateless/README.md`, `LICENSE`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **Introduction**：importance `high`\n  - source_paths: README.md, langchain_mcp_adapters/__init__.py\n- **Installation**：importance `high`\n  - source_paths: pyproject.toml\n- **Quick Start Guide**：importance `high`\n  - source_paths: README.md\n- **System Architecture**：importance `high`\n  - source_paths: langchain_mcp_adapters/__init__.py, langchain_mcp_adapters/tools.py, langchain_mcp_adapters/client.py, langchain_mcp_adapters/sessions.py\n- **Package Structure**：importance `medium`\n  - source_paths: langchain_mcp_adapters/tools.py, langchain_mcp_adapters/client.py, langchain_mcp_adapters/sessions.py, langchain_mcp_adapters/callbacks.py, langchain_mcp_adapters/interceptors.py\n- **Tool Conversion**：importance `high`\n  - source_paths: langchain_mcp_adapters/tools.py\n- **MultiServerMCPClient**：importance `high`\n  - source_paths: langchain_mcp_adapters/client.py, README.md\n- **Transport Types**：importance `high`\n  - source_paths: langchain_mcp_adapters/sessions.py, README.md\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `174c48e85c59d227f1b57c1c5dedd00c341e8086`\n- inspected_files: `pyproject.toml`, `README.md`, `uv.lock`, `examples/servers/streamable-http-stateless/pyproject.toml`, `examples/servers/streamable-http-stateless/README.md`, `examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/server.py`, `examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__init__.py`, `examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 来源证据：Prompts and Resources auto-discovery\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Prompts and Resources auto-discovery\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_bf1812b74caa4e989767a9307a8ffc16 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/62 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 来源证据：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_a5093182914b4df0b7ad2cd560bacdf2 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/492 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 来源证据：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n\n- Trigger: GitHub 社区证据显示该项目存在一个运行相关的待验证问题：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_ac102050dd4841d6954559a3413e0b92 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/496 | 来源类型 github_issue 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 来源证据：langchain-mcp-adapters==0.2.2\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：langchain-mcp-adapters==0.2.2\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_0c6ca0722ab046379d28ecf30f8d2bcf | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: 来源证据：langchain-mcp-adapters==0.1.10\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：langchain-mcp-adapters==0.1.10\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_8b18dbf32ccd41e38b272a458f4040f5 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 来源证据：langchain-mcp-adapters==0.1.14\n\n- Trigger: GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：langchain-mcp-adapters==0.1.14\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_6727e0d698e54fc38d7c60e262978ac2 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 7: 能力判断依赖假设\n\n- Trigger: README/documentation is current enough for a first validation pass.\n- Host AI rule: 将假设转成下游验证清单。\n- Why it matters: 假设不成立时，用户拿不到承诺的能力。\n- Evidence: capability.assumptions | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | README/documentation is current enough for a first validation pass.\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 8: 来源证据：langchain-mcp-adapters==0.1.12\n\n- Trigger: GitHub 社区证据显示该项目存在一个运行相关的待验证问题：langchain-mcp-adapters==0.1.12\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_e71a46a9e0374d139555a78f229b0469 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 9: 来源证据：langchain-mcp-adapters==0.2.0\n\n- Trigger: GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能影响升级、迁移或版本选择。\n- Evidence: community_evidence:github | cevd_59483f9a6a16414c9087b1751fba8efc | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 10: 来源证据：langchain-mcp-adapters==0.2.0a1\n\n- Trigger: GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0a1\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能影响升级、迁移或版本选择。\n- Evidence: community_evidence:github | cevd_4e7fcda1716948898295279af95f8f96 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：langchain-ai/langchain-mcp-adapters\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：mcp_host\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 来源证据：Prompts and Resources auto-discovery（high）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect（high）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py（high）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：langchain-mcp-adapters==0.2.2（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 来源证据：langchain-mcp-adapters==0.1.10（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/langchain-ai/langchain-mcp-adapters 项目说明书\n\n生成时间：2026-05-15 14:10:43 UTC\n\n## 目录\n\n- [Introduction](#page-introduction)\n- [Installation](#page-installation)\n- [Quick Start Guide](#page-quickstart)\n- [System Architecture](#page-architecture)\n- [Package Structure](#page-package-structure)\n- [Tool Conversion](#page-tool-conversion)\n- [MultiServerMCPClient](#page-multiserver-client)\n- [Transport Types](#page-transport-types)\n- [Callbacks](#page-callbacks)\n- [Tool Call Interceptors](#page-interceptors)\n\n<a id='page-introduction'></a>\n\n## Introduction\n\n### 相关页面\n\n相关主题：[Installation](#page-installation), [Quick Start Guide](#page-quickstart)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n- [langchain_mcp_adapters/__init__.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/__init__.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n</details>\n\n# Introduction\n\nLangChain MCP Adapters is a Python library that bridges the gap between the Model Context Protocol (MCP) ecosystem and LangChain/LangGraph applications. This library provides a lightweight wrapper that converts MCP tools, prompts, and resources into LangChain-compatible formats, enabling seamless integration of MCP servers with AI agents and applications built on the LangChain framework.\n\n## Overview\n\nThe Model Context Protocol (MCP) is an open protocol developed by Anthropic that enables AI applications to connect with external data sources, tools, and services. MCP defines a standard interface for AI models to interact with various resources through a client-server architecture.\n\nLangChain MCP Adapters serves as the integration layer between these two ecosystems. It allows developers to:\n\n- Use MCP servers as tool providers for LangChain and LangGraph agents\n- Load tools from multiple MCP servers simultaneously\n- Convert MCP resources into LangChain Blob objects for processing\n- Transform MCP prompts into formats compatible with LangChain\n- Intercept and modify tool call behavior through a configurable middleware pattern\n\n资料来源：[README.md:1-20](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n\n## Architecture\n\nThe library follows a modular architecture with clear separation of concerns across several key components:\n\n```mermaid\ngraph TD\n    A[LangChain/LangGraph Agent] --> B[langchain-mcp-adapters]\n    B --> C[Tools Adapter]\n    B --> D[Resources Adapter]\n    B --> E[Prompts Adapter]\n    B --> F[MultiServerMCPClient]\n    C --> G[MCP ClientSession]\n    D --> G\n    E --> G\n    F --> H[Connection Manager]\n    H --> I[StdioConnection]\n    H --> J[StreamableHttpConnection]\n    H --> K[SSEConnection]\n    H --> L[WebsocketConnection]\n    G --> M[MCP Server 1]\n    G --> N[MCP Server 2]\n    G --> O[MCP Server N]\n```\n\n### Core Components\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| `MultiServerMCPClient` | `client.py` | Manages connections to multiple MCP servers |\n| `load_mcp_tools()` | `tools.py` | Converts MCP tools to LangChain tools |\n| `load_mcp_resources()` | `resources.py` | Converts MCP resources to LangChain Blobs |\n| `load_mcp_prompt()` | `prompts.py` | Converts MCP prompts to LangChain prompts |\n| `ToolCallInterceptor` | `interceptors.py` | Middleware for tool call lifecycle management |\n\n资料来源：[langchain_mcp_adapters/__init__.py:1-12](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/__init__.py)\n\n## Supported Transports\n\nThe library supports multiple transport mechanisms for connecting to MCP servers. Each transport type is implemented in the sessions module and provides different capabilities for various deployment scenarios.\n\n```mermaid\ngraph LR\n    A[Client Application] --> B[Transport Layer]\n    B --> C[stdio]\n    B --> D[streamable-http]\n    B --> E[SSE]\n    B --> F[WebSocket]\n    C --> G[Local Process]\n    D --> H[HTTP Server]\n    E --> H\n    F --> H\n```\n\n### Transport Comparison\n\n| Transport | Use Case | Headers Support | Stateful | Notes |\n|-----------|----------|-----------------|----------|-------|\n| `stdio` | Local subprocesses | No | Yes | Standard I/O communication |\n| `streamable-http` | HTTP-based servers | Yes | Configurable | Recommended for stateless deployments |\n| `sse` | Server-Sent Events | Yes | Yes | Bidirectional communication |\n| `websocket` | Persistent connections | No | Yes | Low latency, real-time |\n\n资料来源：[langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n\n## Tool Conversion Process\n\nWhen loading MCP tools, the library performs a series of conversions to transform the tool definitions into LangChain-compatible `StructuredTool` objects. This process involves mapping MCP tool schemas, descriptions, and execution semantics.\n\n```mermaid\ngraph TD\n    A[MCP Tool Definition] --> B[Extract inputSchema]\n    B --> C[Create StructuredTool]\n    C --> D[Wrap with interceptor chain]\n    D --> E[Return BaseTool]\n    E --> F[Used by LangChain Agent]\n    F --> G[Tool call invocation]\n    G --> H[MCP ClientSession.call_tool]\n    H --> I[Result conversion]\n    I --> J[Return to Agent]\n```\n\n### Tool Result Handling\n\nThe tool adapter handles various content types returned by MCP tools:\n\n| MCP Content Type | LangChain Output | Notes |\n|------------------|------------------|-------|\n| `TextContent` | `{\"type\": \"text\", \"text\": ...}` | Direct text conversion |\n| `ImageContent` | `{\"type\": \"image\", \"base64\": ..., \"mime_type\": ...}` | Image data with MIME type |\n| `ResourceLink` (image/*) | `{\"type\": \"image\", \"url\": ...}` | Image URL reference |\n| `ResourceLink` (other) | `{\"type\": \"file\", \"url\": ...}` | File URL reference |\n| `EmbeddedResource` (text) | `{\"type\": \"text\", \"text\": ...}` | Embedded text content |\n| `EmbeddedResource` (blob) | `{\"type\": \"image\"/\"file\", ...}` | Binary content |\n\n资料来源：[langchain_mcp_adapters/tools.py:70-130](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n\n## Interceptor System\n\nThe library provides a powerful interceptor mechanism that allows developers to intercept and modify tool call behavior. This follows the onion pattern (also known as decorator pattern) for composable middleware.\n\n```mermaid\ngraph TD\n    A[Request] --> B[Interceptor 1]\n    B --> C[Interceptor 2]\n    C --> D[Interceptor N]\n    D --> E[Base Handler<br/>session.call_tool]\n    E --> F[Interceptor N Result]\n    F --> G[Interceptor 2 Result]\n    G --> H[Interceptor 1 Result]\n    H --> I[Response]\n```\n\n### ToolCallInterceptor Interface\n\nInterceptors implement the `ToolCallInterceptor` protocol and can:\n\n- Modify tool arguments before execution\n- Change the tool name being called\n- Add or modify HTTP headers for requests\n- Transform or wrap the result\n- Handle errors and retry logic\n- Support LangGraph's `Command` for state modification\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-50](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n\n## Resource Conversion\n\nMCP resources are converted to LangChain `Blob` objects, enabling integration with LangChain's document loading and processing capabilities.\n\n```mermaid\ngraph TD\n    A[MCP Resource URI] --> B[session.read_resource]\n    B --> C[ResourceContents]\n    C --> D{Content Type?}\n    D -->|TextResourceContents| E[Extract text]\n    D -->|BlobResourceContents| F[base64 decode]\n    E --> G[Blob.from_data]\n    F --> G\n    G --> H[LangChain Blob]\n```\n\n资料来源：[langchain_mcp_adapters/resources.py:1-60](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n\n## Basic Usage Patterns\n\n### Single Server with load_mcp_tools\n\n```python\nfrom mcp import ClientSession\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\n# Initialize MCP client session\nasync with ClientSession(read, write) as session:\n    await session.initialize()\n    tools = await load_mcp_tools(session)\n    # Use tools with LangChain agent\n```\n\n### Multi-Server with MultiServerMCPClient\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"./math_server.py\"],\n        \"transport\": \"stdio\",\n    },\n    \"weather\": {\n        \"url\": \"http://localhost:8000/mcp\",\n        \"transport\": \"http\",\n    }\n})\ntools = await client.get_tools()\n```\n\n资料来源：[README.md:40-80](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n\n## Installation\n\nThe library can be installed via pip:\n\n```bash\npip install langchain-mcp-adapters\n```\n\nFor LangGraph integration with full agent capabilities:\n\n```bash\npip install langchain-mcp-adapters langgraph \"langchain[openai]\"\n```\n\n资料来源：[README.md:25-30](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n\n## Key Features Summary\n\n| Feature | Description |\n|---------|-------------|\n| Tool Conversion | Convert MCP tools to LangChain `StructuredTool` objects |\n| Multi-Server Support | Connect to multiple MCP servers simultaneously |\n| Resource Loading | Convert MCP resources to LangChain Blobs |\n| Transport Flexibility | Support for stdio, HTTP, SSE, and WebSocket transports |\n| Interceptor Middleware | Hook into tool call lifecycle for custom behavior |\n| LangGraph Integration | Full compatibility with LangGraph agents and state management |\n| Pagination Support | Automatic handling of paginated tool listings |\n\n## Related Documentation\n\n- [Tools Module](./tools) - Detailed guide on tool conversion and execution\n- [Client Module](./client) - Multi-server client configuration and usage\n- [Resources Module](./resources) - Resource loading and conversion\n- [Interceptors](./interceptors) - Middleware and request/response modification\n- [Sessions](./sessions) - Transport layer implementation details\n\n---\n\n<a id='page-installation'></a>\n\n## Installation\n\n### 相关页面\n\n相关主题：[Introduction](#page-introduction), [Quick Start Guide](#page-quickstart)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [pyproject.toml](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/pyproject.toml)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n</details>\n\n# Installation\n\nThis page documents how to install and set up the **langchain-mcp-adapters** library, which provides a lightweight wrapper that makes [Anthropic Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) tools compatible with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph).\n\n## Overview\n\nThe `langchain-mcp-adapters` library bridges MCP servers with LangChain/LangGraph ecosystems. It enables:\n\n- Converting MCP tools into LangChain tools\n- Connecting to multiple MCP servers simultaneously\n- Loading and managing MCP resources as LangChain Blob objects\n- Intercepting and modifying tool call execution\n\n资料来源：[README.md:1-20]()\n\n## Prerequisites\n\n### Python Version\n\n| Version | Support Status |\n|---------|----------------|\n| Python 3.10+ | Required |\n| Python 3.11+ | Recommended |\n| Python 3.12+ | Supported |\n\n### Required Dependencies\n\nThe following packages are automatically installed as dependencies:\n\n| Package | Purpose | Min Version |\n|---------|---------|-------------|\n| `langchain-core` | Core LangChain functionality | Latest stable |\n| `mcp` | Model Context Protocol SDK | Latest stable |\n| `pydantic` | Data validation and settings | V2 |\n| `httpx` | HTTP client for streamable HTTP transport | Latest stable |\n\n### Optional Dependencies\n\n| Package | Purpose | Install Command |\n|---------|---------|-----------------|\n| `langgraph` | For LangGraph agent support | `pip install langgraph` |\n| `langchain[openai]` | OpenAI integration for agents | `pip install \"langchain[openai]\"` |\n\n资料来源：[langchain_mcp_adapters/tools.py:1-50]()\n\n## Basic Installation\n\n### Standard Installation\n\nInstall the core package using pip:\n\n```bash\npip install langchain-mcp-adapters\n```\n\n资料来源：[README.md:32]()\n\n### With LangGraph Support\n\nFor full LangGraph agent functionality:\n\n```bash\npip install langchain-mcp-adapters langgraph \"langchain[openai]\"\n```\n\nThis installs:\n- The MCP adapters library\n- LangGraph for building stateful agents\n- OpenAI integration for LLM-powered agents\n\n资料来源：[README.md:32-36]()\n\n## Environment Configuration\n\n### OpenAI API Key\n\nIf using OpenAI models with the library, set your API key:\n\n```bash\nexport OPENAI_API_KEY=<your_api_key>\n```\n\nAlternatively, pass it programmatically:\n\n```python\nimport os\nos.environ[\"OPENAI_API_KEY\"] = \"your-api-key\"\n```\n\n## Package Dependencies Graph\n\n```mermaid\ngraph TD\n    subgraph \"langchain-mcp-adapters\"\n        A[tools.py] --> B[Base Tools Module]\n        A --> C[Tool Interceptors]\n        D[resources.py] --> E[Resource Adapter]\n        F[client.py] --> G[MultiServerMCPClient]\n        H[sessions.py] --> I[Session Management]\n    end\n    \n    subgraph \"Required Dependencies\"\n        J[langchain-core] --> B\n        J --> E\n        K[mcp Python SDK] --> B\n        K --> G\n        K --> I\n        L[pydantic] --> B\n        M[httpx] --> I\n    end\n    \n    subgraph \"Optional Dependencies\"\n        N[langgraph] -.->|if installed| B\n        N -.->|if installed| G\n    end\n```\n\n## Installation Verification\n\nAfter installation, verify the package is correctly installed:\n\n```python\nimport langchain_mcp_adapters\nprint(langchain_mcp_adapters.__version__)\n```\n\nTest basic MCP tool loading:\n\n```python\nfrom langchain_mcp_adapters.tools import load_mcp_tools\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\n# Verify imports work\nprint(\"Installation verified successfully!\")\n```\n\n## Transport-Specific Installation Notes\n\nThe library supports multiple MCP server transport types, each with specific requirements:\n\n### Standard I/O (stdio) Transport\n\nNo additional dependencies required. Uses the built-in `mcp` SDK stdio client.\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-100]()\n\n### Streamable HTTP Transport\n\nRequires `httpx` for HTTP client functionality (included by default).\n\n```bash\npip install langchain-mcp-adapters\n# httpx is installed as a dependency\n```\n\n### Server-Sent Events (SSE) Transport\n\nRequires `httpx` with SSE support (included by default).\n\n资料来源：[langchain_mcp_adapters/sessions.py:100-200]()\n\n## Installing Development Version\n\n### From Source\n\nTo install the latest development version from the repository:\n\n```bash\ngit clone https://github.com/langchain-ai/langchain-mcp-adapters.git\ncd langchain-mcp-adapters\npip install -e .\n```\n\n### With Development Dependencies\n\n```bash\ngit clone https://github.com/langchain-ai/langchain-mcp-adapters.git\ncd langchain-mcp-adapters\npip install -e \".[dev]\"\n```\n\n## Dependency Resolution\n\n### Core Dependencies\n\nThe package requires these core dependencies which are installed automatically:\n\n```toml\n# From pyproject.toml\ndependencies = [\n    \"langchain-core>=0.0.1\",\n    \"mcp>=1.0.0\",\n    \"pydantic>=2.0.0\",\n    \"httpx>=0.25.0\",\n]\n```\n\n### Optional Feature Dependencies\n\n| Feature | Dependencies |\n|---------|--------------|\n| LangGraph Support | `langgraph` |\n| All Features | `langgraph`, `langchain[openai]` |\n\n资料来源：[pyproject.toml](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/pyproject.toml)\n\n## Importing the Package\n\nAfter installation, import the main components:\n\n```python\n# Core tools module\nfrom langchain_mcp_adapters.tools import load_mcp_tools, convert_mcp_tool_to_langchain_tool\n\n# Multi-server client\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\n# Resource adapter\nfrom langchain_mcp_adapters.resources import load_mcp_resources, get_mcp_resource\n\n# Session management\nfrom langchain_mcp_adapters.sessions import create_session, Connection\n\n# Interceptors (optional)\nfrom langchain_mcp_adapters.interceptors import ToolCallInterceptor\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:1-50]()\n\n## Next Steps\n\nAfter installation, proceed to:\n\n1. **[Quickstart Guide](README.md)** - Get started with basic MCP tool usage\n2. **[Multi-Server Setup](README.md)** - Connect to multiple MCP servers\n3. **[LangGraph Integration](README.md)** - Build agents with MCP tools\n4. **[Client Configuration](README.md)** - Configure connection options and transports\n\n---\n\n<a id='page-quickstart'></a>\n\n## Quick Start Guide\n\n### 相关页面\n\n相关主题：[Introduction](#page-introduction), [Tool Conversion](#page-tool-conversion), [MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>Relevant Source Files</summary>\n\nThe following source files were used to generate this page:\n\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py)\n</details>\n\n# Quick Start Guide\n\nThis guide provides a comprehensive introduction to **langchain-mcp-adapters**, a library that bridges [Anthropic's Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) servers with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph) applications.\n\n## Overview\n\nThe langchain-mcp-adapters library serves two primary purposes:\n\n1. **Tool Conversion**: Transform MCP tools into LangChain-compatible tools that integrate seamlessly with LangGraph agents\n2. **Multi-Server Client**: Manage connections to multiple MCP servers simultaneously\n\nThe library provides a lightweight wrapper that enables developers to leverage MCP servers' capabilities within the LangChain ecosystem without additional boilerplate code.\n\n## Installation\n\nInstall the core package along with required dependencies:\n\n```bash\npip install langchain-mcp-adapters\n```\n\nFor development with OpenAI models:\n\n```bash\npip install langchain-mcp-adapters langgraph \"langchain[openai]\"\n```\n\n## Architecture Overview\n\nThe library follows a layered architecture where MCP client sessions interact with server tools, prompts, and resources through adapter classes that convert data formats between MCP and LangChain standards.\n\n```mermaid\ngraph TD\n    A[LangChain / LangGraph Application] --> B[langchain-mcp-adapters]\n    B --> C[MultiServerMCPClient]\n    B --> D[Individual Tool Conversion]\n    C --> E[MCP Server 1]\n    C --> F[MCP Server 2]\n    C --> N[MCP Server N]\n    D --> E\n    D --> F\n    D --> N\n    E --> G[stdio Transport]\n    F --> H[HTTP Transport]\n    F --> I[SSE Transport]\n    F --> J[WebSocket Transport]\n```\n\n## Core Components\n\n### MultiServerMCPClient\n\nThe `MultiServerMCPClient` manages connections to multiple MCP servers and provides unified access to their tools, prompts, and resources.\n\n**资料来源**：[langchain_mcp_adapters/client.py:1-50]()\n\n#### Connection Configuration\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `command` | `str` | Executable command (e.g., `\"python\"`, `\"node\"`) |\n| `args` | `list[str]` | Command arguments |\n| `transport` | `str` | Transport type: `stdio`, `http`, `sse`, `websocket` |\n| `url` | `str` | Server URL for HTTP/SSE/WebSocket transports |\n| `headers` | `dict[str, str]` | Custom HTTP headers for requests |\n\n#### Supported Transports\n\n| Transport | Use Case | Notes |\n|----------|----------|-------|\n| `stdio` | Local subprocess servers | Communication via stdin/stdout |\n| `http` | Remote HTTP servers | REST-based communication |\n| `sse` | Servers using Server-Sent Events | Real-time streaming |\n| `websocket` | WebSocket connections | Bidirectional communication |\n\n**资料来源**：[langchain_mcp_adapters/client.py:1-100]()\n\n## Basic Usage Patterns\n\n### Pattern 1: Direct Session Usage\n\nFor single-server scenarios, create an MCP session and load tools directly:\n\n```python\nfrom mcp import ClientSession\nfrom mcp.client.streamable_http import streamablehttp_client\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nasync with streamablehttp_client(\"http://localhost:3000/mcp\") as (read, write, _):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        tools = await load_mcp_tools(session)\n        # Use tools with LangChain/LangGraph\n```\n\n**资料来源**：[README.md:1-50]()\n\n### Pattern 2: MultiServerMCPClient with stdio\n\nConnect to locally running MCP servers using standard I/O:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n    }\n)\ntools = await client.get_tools()\n```\n\n**资料来源**：[README.md:50-100]()\n\n### Pattern 3: MultiServerMCPClient with HTTP\n\nConnect to remote MCP servers via HTTP transport:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient(\n    {\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\ntools = await client.get_tools()\n```\n\n**资料来源**：[README.md:100-150]()\n\n### Pattern 4: Explicit Session Management\n\nFor advanced scenarios requiring direct session access:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nclient = MultiServerMCPClient({...})\nasync with client.session(\"math\") as session:\n    tools = await load_mcp_tools(session)\n```\n\n**资料来源**：[langchain_mcp_adapters/client.py:50-80]()\n\n## Tool Loading\n\n### load_mcp_tools Function\n\nThe `load_mcp_tools` function retrieves all available tools from an MCP session and converts them to LangChain tools.\n\n**资料来源**：[langchain_mcp_adapters/tools.py:100-200]()\n\n#### Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `session` | `ClientSession` | Yes | MCP client session |\n| `connection` | `Connection` | No | Connection config if session is `None` |\n| `callbacks` | `Callbacks` | No | Event notification handlers |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | No | Interceptors for tool call processing |\n| `server_name` | `str` | No | Server identifier for logging |\n| `tool_name_prefix` | `bool` | No | Prefix tool names with server name (default: `False`) |\n\n#### Return Value\n\nReturns a `list[BaseTool]` containing LangChain-compatible tool objects. Each tool's metadata includes annotations from the MCP tool definition.\n\n**资料来源**：[langchain_mcp_adapters/tools.py:200-300]()\n\n## Integration with LangGraph\n\n### Complete Agent Setup\n\nThe following example demonstrates a full LangGraph agent setup using MCP tools:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langgraph.graph import StateGraph, MessagesState, START\nfrom langgraph.prebuilt import ToolNode, tools_condition\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-4.1\")\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"./examples/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\n\ntools = await client.get_tools()\n\ndef call_model(state: MessagesState):\n    response = model.bind_tools(tools).invoke(state[\"messages\"])\n    return {\"messages\": response}\n\nbuilder = StateGraph(MessagesState)\nbuilder.add_node(\"call_model\", call_model)\nbuilder.add_node(\"tools\", ToolNode(tools))\nbuilder.add_edge(START, \"call_model\")\nbuilder.add_conditional_edges(\n    \"call_model\",\n    tools_condition,\n)\n# Continue with compile and execution\n```\n\n**资料来源**：[README.md:150-200]()\n\n### Workflow Diagram\n\n```mermaid\ngraph LR\n    A[User Message] --> B[call_model Node]\n    B --> C{tools_condition}\n    C -->|END| D[Response to User]\n    C -->|tools| E[ToolNode]\n    E --> F[MCP Tool Execution]\n    F --> G[Tool Result]\n    G --> B\n```\n\n## Tool Interceptors\n\nTool interceptors allow you to modify tool call requests and responses in an onion-pattern chain:\n\n```mermaid\ngraph TD\n    A[Request] --> B[Interceptor 1]\n    B --> C[Interceptor 2]\n    C --> D[Interceptor N]\n    D --> E[Execute Tool]\n    E --> D\n    D --> C\n    C --> B\n    B --> F[Response]\n```\n\n**资料来源**：[langchain_mcp_adapters/interceptors.py:1-50]()\n\n### Creating a Custom Interceptor\n\n```python\nfrom langchain_mcp_adapters.interceptors import (\n    ToolCallInterceptor,\n    MCPToolCallRequest,\n    MCPToolCallResult,\n)\n\nasync def logging_interceptor(\n    request: MCPToolCallRequest, \n    next_handler\n) -> MCPToolCallResult:\n    print(f\"Calling tool: {request.name} with args: {request.args}\")\n    result = await next_handler(request)\n    print(f\"Tool result: {result}\")\n    return result\n\nclient = MultiServerMCPClient(\n    {...},\n    tool_interceptors=[logging_interceptor]\n)\n```\n\n## Resource Loading\n\nThe library also supports loading MCP resources as LangChain Blob objects:\n\n```python\nfrom langchain_mcp_adapters.resources import load_mcp_resources\n\n# Load all resources\nblobs = await load_mcp_resources(session)\n\n# Load specific resources\nblobs = await load_mcp_resources(session, uris=[\"resource://file1\", \"resource://file2\"])\n\n# Load single resource\nfrom langchain_mcp_adapters.resources import get_mcp_resource\nblob = await get_mcp_resource(session, \"resource://document\")\n```\n\n**资料来源**：[langchain_mcp_adapters/resources.py:1-80]()\n\n## Creating an MCP Server\n\nFor testing, you can create a simple MCP server using FastMCP:\n\n```python\n# math_server.py\nfrom mcp.server.fastmcp import FastMCP\n\nmcp = FastMCP(\"Math\")\n\n@mcp.tool()\ndef add(a: int, b: int) -> int:\n    \"\"\"Add two numbers\"\"\"\n    return a + b\n\n@mcp.tool()\ndef multiply(a: int, b: int) -> int:\n    \"\"\"Multiply two numbers\"\"\"\n    return a * b\n\nif __name__ == \"__main__\":\n    mcp.run()\n```\n\n**资料来源**：[README.md:50-100]()\n\n## HTTP Server Setup\n\nFor remote access, use the provided streamable HTTP server example:\n\n```bash\ncd examples/servers/streamable-http-stateless/\nuv run mcp-simple-streamablehttp-stateless --port 3000\n```\n\nThis starts a stateless HTTP server on port 3000 that can be accessed via the `streamablehttp_client`.\n\n**资料来源**：[examples/servers/streamable-http-stateless/mcp_simple_streamablehttp_stateless/__main__.py:1-10]()\n\n## Response Format\n\nAll tool calls return results in the `content_and_artifact` format:\n\n| Component | Type | Description |\n|-----------|------|-------------|\n| `content` | `list[ToolMessageContentBlock]` | Primary tool response content |\n| `artifact` | `MCPToolArtifact` | Structured data from MCP tool (if any) |\n\n**资料来源**：[langchain_mcp_adapters/tools.py:50-120]()\n\n## Next Steps\n\n- Explore the [API Reference](https://github.com/langchain-ai/langchain-mcp-adapters) for detailed function signatures\n- Review the example applications in the `examples/` directory\n- Implement custom tool interceptors for logging, caching, or authentication\n- Integrate with LangGraph's streaming capabilities for real-time tool execution\n\n---\n\n<a id='page-architecture'></a>\n\n## System Architecture\n\n### 相关页面\n\n相关主题：[Package Structure](#page-package-structure), [Tool Conversion](#page-tool-conversion), [MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/__init__.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/__init__.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n</details>\n\n# System Architecture\n\n## Overview\n\nThe **langchain-mcp-adapters** library provides a lightweight wrapper that makes [Anthropic Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) tools compatible with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph) 资料来源：[README.md]()\n\nThe library acts as a bridge between MCP servers and LangChain applications, enabling:\n\n- **Tool Conversion**: Transform MCP tools into LangChain-compatible tools\n- **Multi-Server Support**: Connect to multiple MCP servers simultaneously\n- **Resource Management**: Convert MCP resources to LangChain Blob objects\n- **Prompt Integration**: Load MCP prompts into LangChain format\n- **Interceptor Support**: Customizable tool call interception and modification\n\n资料来源：[langchain_mcp_adapters/__init__.py:1-10]()\n\n---\n\n## High-Level Architecture\n\nThe system follows a layered architecture with clear separation of concerns:\n\n```mermaid\ngraph TD\n    subgraph \"Client Layer\"\n        Client[MultiServerMCPClient]\n    end\n\n    subgraph \"Session Layer\"\n        Stdio[StdioConnection]\n        HTTP[StreamableHttpConnection]\n        SSE[SSEConnection]\n        WS[WebsocketConnection]\n    end\n\n    subgraph \"Adapters Layer\"\n        Tools[tools.py]\n        Resources[resources.py]\n        Prompts[prompts.py]\n    end\n\n    subgraph \"Core Layer\"\n        Interceptors[interceptors.py]\n        Sessions[sessions.py]\n    end\n\n    subgraph \"External\"\n        MCPServer[MCP Server]\n        LangChain[LangChain/LangGraph]\n    end\n\n    Client --> Tools\n    Client --> Resources\n    Client --> Stdio\n    Client --> HTTP\n    Client --> SSE\n    Client --> WS\n    Stdio --> MCPServer\n    HTTP --> MCPServer\n    SSE --> MCPServer\n    WS --> MCPServer\n    Tools --> LangChain\n    Resources --> LangChain\n    Interceptors --> Tools\n    Sessions --> Client\n```\n\n---\n\n## Core Components\n\n### MultiServerMCPClient\n\nThe `MultiServerMCPClient` is the main entry point for connecting to multiple MCP servers. It manages connections and provides unified access to tools, prompts, and resources.\n\n资料来源：[langchain_mcp_adapters/client.py:1-50]()\n\n#### Key Responsibilities\n\n| Responsibility | Description |\n|----------------|-------------|\n| Connection Management | Manages multiple server connections |\n| Tool Loading | Loads and converts tools from all servers |\n| Resource Loading | Loads MCP resources as LangChain Blobs |\n| Prompt Loading | Loads prompts from MCP servers |\n| Session Handling | Provides session context managers for explicit control |\n\n#### Configuration Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `connections` | `dict[str, Connection]` | Server connection configurations |\n| `callbacks` | `Callbacks` | Event notification handlers |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | Tool call interceptors |\n| `tool_name_prefix` | `bool` | Prefix tool names with server name |\n\n资料来源：[langchain_mcp_adapters/client.py:60-80]()\n\n---\n\n### Connection Types\n\nThe library supports multiple transport mechanisms for connecting to MCP servers:\n\n```mermaid\ngraph LR\n    A[Client] --> B[StdioConnection]\n    A --> C[StreamableHttpConnection]\n    A --> D[SSEConnection]\n    A --> E[WebsocketConnection]\n\n    B --> F[stdio_client]\n    C --> G[mcp.client.streamable_http]\n    D --> H[mcp.client.sse]\n    E --> I[mcp.client.websocket]\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-50]()\n\n#### Connection Types\n\n| Transport | Use Case | Configuration |\n|-----------|----------|---------------|\n| `stdio` | Local subprocess execution | `command`, `args`, `env`, `cwd` |\n| `http` | Streamable HTTP servers | `url`, `headers`, `timeout` |\n| `sse` | Server-Sent Events transport | `url`, `headers`, `timeout` |\n| `websocket` | WebSocket connections | `url`, `headers`, `timeout` |\n\n资料来源：[langchain_mcp_adapters/sessions.py:100-200]()\n\n---\n\n## Tool Conversion System\n\n### Architecture\n\n```mermaid\ngraph TD\n    subgraph \"MCP Side\"\n        MCPTool[MCP Tool]\n        MCPToolResult[MCPToolCallResult]\n    end\n\n    subgraph \"Conversion Layer\"\n        ContentConverter[_convert_mcp_content_to_lc_block]\n        ResultConverter[_convert_call_tool_result]\n        InterceptorChain[_build_interceptor_chain]\n    end\n\n    subgraph \"LangChain Side\"\n        StructuredTool[StructuredTool]\n        ToolMessage[ToolMessage]\n        Command[Command]\n        Artifact[MCPToolArtifact]\n    end\n\n    MCPTool --> load_mcp_tool\n    load_mcp_tool --> InterceptorChain\n    InterceptorChain --> MCPToolResult\n    MCPToolResult --> ResultConverter\n    ContentConverter --> StructuredTool\n    ResultConverter --> ToolMessage\n    ResultConverter --> Command\n    ResultConverter --> Artifact\n```\n\n### Content Type Mapping\n\nThe tool adapter converts MCP content types to LangChain content blocks:\n\n| MCP Content Type | LangChain Block | Description |\n|------------------|-----------------|-------------|\n| `TextContent` | `TextContentBlock` | Plain text content |\n| `ImageContent` | `ImageContentBlock` | Image with base64 data |\n| `ResourceLink` (image/*) | `ImageContentBlock` | Image via URL |\n| `ResourceLink` (other) | `FileContentBlock` | File via URL |\n| `EmbeddedResource` (text) | `TextContentBlock` | Embedded text resource |\n| `EmbeddedResource` (blob) | `ImageContentBlock` / `FileContentBlock` | Embedded binary resource |\n| `AudioContent` | `NotImplementedError` | Not yet supported |\n\n资料来源：[langchain_mcp_adapters/tools.py:100-150]()\n\n### Tool Call Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent as LangGraph Agent\n    participant Tool as StructuredTool\n    participant Interceptor as Interceptor Chain\n    participant Executor as Execute Tool\n    participant MCPSession as MCP Session\n    participant MCPServer as MCP Server\n\n    Agent->>Tool: invoke(args)\n    Tool->>Interceptor: MCPToolCallRequest\n    Interceptor->>Interceptor: Before interceptors\n    Interceptor->>Executor: MCPToolCallRequest\n    Executor->>MCPSession: session.call_tool()\n    MCPSession->>MCPServer: CallToolRequest\n    MCPServer-->>MCPSession: CallToolResult\n    MCPSession-->>Executor: CallToolResult\n    Executor-->>Interceptor: MCPToolCallResult\n    Interceptor->>Interceptor: After interceptors\n    Interceptor-->>Tool: MCPToolCallResult\n    Tool->>Tool: _convert_call_tool_result()\n    Tool-->>Agent: (content, artifact)\n```\n\n---\n\n## Interceptor System\n\n### Purpose\n\nThe interceptor system allows custom code to execute before and after tool calls, enabling:\n\n- Request modification\n- Response transformation\n- Logging and monitoring\n- Caching\n- Error handling\n- Conditional execution\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-30]()\n\n### Interceptor Interface\n\n```mermaid\ngraph TD\n    subgraph \"MCPToolCallRequest\"\n        ReqName[name]\n        ReqArgs[args]\n        ReqServer[server_name]\n        ReqHeaders[headers]\n        ReqRuntime[runtime]\n    end\n\n    subgraph \"MCPToolCallResult\"\n        ResContent[content]\n        ResIsError[isError]\n        ResStruct[structuredContent]\n    end\n\n    Interceptor[\"ToolCallInterceptor\"]\n    Interceptor --> Before[before_tool_call]\n    Interceptor --> After[after_tool_call]\n```\n\n### Request Override Support\n\n| Field | Modifiable | Description |\n|-------|------------|-------------|\n| `name` | Yes | Tool name override |\n| `args` | Yes | Arguments override |\n| `headers` | Yes | HTTP headers override |\n| `server_name` | No | Read-only context |\n| `runtime` | No | Read-only context |\n\n资料来源：[langchain_mcp_adapters/interceptors.py:50-70]()\n\n### Interceptor Chain Pattern\n\nThe system implements an onion-pattern interceptor chain:\n\n```mermaid\ngraph TD\n    A[Agent Request] --> B[Interceptor 1 - Outermost]\n    B --> C[Interceptor 2]\n    C --> D[Interceptor N]\n    D --> E[Execute Tool - Innermost]\n    E --> D'\n    D' --> C'\n    C' --> B'\n    B' --> F[Agent Response]\n    \n    style B fill:#ff9999\n    style C fill:#ffcc99\n    style D fill:#ffff99\n    style E fill:#99ff99\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:50-80]()\n\n---\n\n## Session Management\n\n### Session Creation Flow\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B{Connection Type}\n    \n    B -->|Stdio| C[StdioConnection]\n    B -->|HTTP| D[StreamableHttpConnection]\n    B -->|SSE| E[SSEConnection]\n    B -->|WebSocket| F[WebsocketConnection]\n    \n    C --> G[create_session]\n    D --> G\n    E --> G\n    F --> G\n    \n    G --> H[ClientSession]\n```\n\n### Session Factory\n\nThe `create_session()` function provides a unified interface for session creation:\n\n```python\nasync with create_session(connection) as session:\n    tools = await load_mcp_tools(session)\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:200-300]()\n\n---\n\n## Resource Management\n\n### Resource Conversion\n\n```mermaid\ngraph LR\n    MCP[MCPServer] -->|read_resource| Session[ClientSession]\n    Session -->|ResourceContents| Converter[convert_mcp_resource_to_langchain_blob]\n    \n    Converter --> Text[TextResourceContents] --> Blob1[Blob (text)]\n    Converter --> Blob[BlobResourceContents] --> Blob2[Blob (binary)]\n```\n\n### Supported Resource Types\n\n| MCP Type | LangChain Type | Notes |\n|----------|---------------|-------|\n| `TextResourceContents` | `Blob` | MIME type from resource |\n| `BlobResourceContents` | `Blob` | Base64 decoded data |\n\n资料来源：[langchain_mcp_adapters/resources.py:1-50]()\n\n---\n\n## Data Flow Architecture\n\n### Complete Request Flow\n\n```mermaid\ngraph TD\n    subgraph \"1. Initialization\"\n        A[MultiServerMCPClient] --> B[Load Tools]\n        B --> C[create_session]\n        C --> D[session.initialize]\n    end\n\n    subgraph \"2. Tool Invocation\"\n        E[Agent] --> F[StructuredTool.invoke]\n        F --> G[call_tool coroutine]\n        G --> H[Build Request]\n        H --> I[Apply Interceptors]\n    end\n\n    subgraph \"3. MCP Execution\"\n        I --> J[session.call_tool]\n        J --> K[MCP Server]\n        K --> L[CallToolResult]\n    end\n\n    subgraph \"4. Response Conversion\"\n        L --> M[_convert_call_tool_result]\n        M --> N[Content Blocks]\n        M --> O[MCPToolArtifact]\n        N --> P[ToolMessage/Command]\n    end\n\n    subgraph \"5. Return to Agent\"\n        P --> Q[Agent Response]\n        O --> R[ToolArtifact]\n    end\n```\n\n---\n\n## Type System\n\n### Result Types\n\nThe library defines conditional types based on LangGraph availability:\n\n```python\nif LANGGRAPH_PRESENT:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command\nelse:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage\n```\n\n### MCPToolArtifact\n\nA TypedDict wrapping structured content from MCP tool calls:\n\n```python\nclass MCPToolArtifact(TypedDict):\n    structured_content: dict[str, Any]\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:50-70]()\n\n---\n\n## Error Handling\n\n### Error Flow\n\n```mermaid\ngraph TD\n    A[MCP Tool Call] --> B{Result Type}\n    \n    B -->|isError = True| C[Extract Text Blocks]\n    C --> D[Join Error Parts]\n    D --> E[ToolException]\n    \n    B -->|isError = False| F[Convert Content]\n    F --> G[Return Result]\n    \n    B -->|AudioContent| H[NotImplementedError]\n```\n\n### Error Scenarios\n\n| Scenario | Handling | Source |\n|----------|----------|--------|\n| MCP server error | `ToolException` raised | tools.py:conversion |\n| Unknown content type | `ValueError` raised | tools.py:content |\n| Audio content | `NotImplementedError` raised | tools.py:audio |\n| Missing session | `ValueError` raised | tools.py:session |\n\n---\n\n## Integration Patterns\n\n### LangGraph Integration\n\n```mermaid\ngraph LR\n    A[StateGraph] --> B[call_model]\n    B --> C[tools_condition]\n    C --> D{Tool Node?}\n    D -->|Yes| E[ToolNode]\n    D -->|No| F[End]\n    E --> G[Tools]\n    G --> B\n```\n\n### Response Format\n\nThe tool uses `response_format=\"content_and_artifact\"` to return both content and structured data:\n\n```python\nreturn StructuredTool(\n    ...\n    response_format=\"content_and_artifact\",\n)\n```\n\n---\n\n## Configuration Reference\n\n### MultiServerMCPClient Configuration\n\n```python\nMultiServerMCPClient(\n    connections={\n        \"server_name\": {\n            \"transport\": \"stdio|http|sse|websocket\",\n            # Transport-specific options\n        }\n    },\n    callbacks=Callbacks(),      # Optional\n    tool_interceptors=[],       # Optional\n    tool_name_prefix=False      # Optional\n)\n```\n\n### Transport Configurations\n\n| Transport | Required Options | Optional Options |\n|-----------|-----------------|------------------|\n| `stdio` | `command`, `args` | `env`, `cwd`, `encoding` |\n| `http` | `url` | `headers`, `timeout` |\n| `sse` | `url` | `headers`, `timeout` |\n| `websocket` | `url` | `headers`, `timeout` |\n\n---\n\n## Summary\n\nThe langchain-mcp-adapters library implements a clean, layered architecture:\n\n1. **Client Layer**: `MultiServerMCPClient` provides high-level API for managing multiple server connections\n2. **Session Layer**: Multiple transport implementations (`Stdio`, `HTTP`, `SSE`, `WebSocket`) handle protocol details\n3. **Adapters Layer**: `tools.py`, `resources.py`, and `prompts.py` convert between MCP and LangChain formats\n4. **Interceptor Layer**: `interceptors.py` enables customization of the tool call lifecycle\n5. **Core Layer**: Type definitions and conversion utilities provide the foundation\n\nThe architecture prioritizes:\n- **Extensibility**: Through the interceptor system\n- **Flexibility**: Multiple transport and connection options\n- **Type Safety**: Comprehensive type annotations and Pydantic models\n- **Integration**: Seamless LangChain and LangGraph compatibility\n\n---\n\n<a id='page-package-structure'></a>\n\n## Package Structure\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [Tool Conversion](#page-tool-conversion)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/callbacks.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/callbacks.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/prompts.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/prompts.py)\n</details>\n\n# Package Structure\n\n## Overview\n\nThe `langchain-mcp-adapters` package provides a lightweight wrapper that makes [Anthropic Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) tools compatible with [LangChain](https://github.com/langchain-ai/langchain) and [LangGraph](https://github.com/langchain-ai/langgraph). The package bridges MCP servers with LangChain applications by converting MCP tools, prompts, and resources into LangChain-compatible formats.\n\n资料来源：[README.md]()\n\n## Package Architecture\n\nThe package follows a modular architecture with distinct responsibilities for each module:\n\n```mermaid\ngraph TD\n    subgraph \"langchain_mcp_adapters Package\"\n        A[\"__init__.py<br/>Package Entry\"] --> B[\"client.py<br/>MultiServerMCPClient\"]\n        B --> C[\"sessions.py<br/>Connection Management\"]\n        B --> D[\"tools.py<br/>Tool Conversion\"]\n        B --> E[\"resources.py<br/>Resource Conversion\"]\n        B --> F[\"prompts.py<br/>Prompt Loading\"]\n        C --> G[\"callbacks.py<br/>Callback Handling\"]\n        C --> H[\"interceptors.py<br/>Tool Call Interceptors\"]\n    end\n    \n    I[\"MCP Servers\"] --> C\n    D --> J[\"LangChain Tools\"]\n    E --> K[\"LangChain Blobs\"]\n    F --> L[\"LangChain Prompts\"]\n```\n\n## Directory Structure\n\n```\nlangchain_mcp_adapters/\n├── __init__.py          # Package initialization and exports\n├── client.py            # MultiServerMCPClient for managing multiple servers\n├── tools.py             # MCP to LangChain tool conversion\n├── resources.py         # MCP resource to Blob conversion\n├── prompts.py           # MCP prompt loading\n├── sessions.py          # Connection handling for different transports\n├── callbacks.py         # Event and notification callbacks\n└── interceptors.py     # Tool call interception and modification\n```\n\n## Core Modules\n\n### 1. tools.py — Tool Conversion\n\nThe `tools.py` module handles conversion of MCP tools to LangChain-compatible tools.\n\n| Component | Purpose |\n|-----------|---------|\n| `load_mcp_tools()` | Loads all available MCP tools and converts them to LangChain tools |\n| `_convert_mcp_content_to_lc_block()` | Converts MCP content blocks (Text, Image, Audio, Resource) to LangChain content blocks |\n| `_convert_call_tool_result()` | Converts MCP CallToolResult to LangChain tool result format |\n| `MCPToolArtifact` | TypedDict wrapping structured content from MCP tool calls |\n\n**Key Type Definitions:**\n\n```python\nToolMessageContentBlock = TextContentBlock | ImageContentBlock | FileContentBlock\n\nConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command  # if langgraph installed\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:1-150]()\n\n### 2. client.py — MultiServerMCPClient\n\nThe `client.py` module provides the `MultiServerMCPClient` class for managing connections to multiple MCP servers.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `connections` | `dict[str, Connection]` | Dictionary mapping server names to connection configurations |\n| `callbacks` | `Callbacks` | Optional callbacks for handling notifications |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | Optional interceptors for tool call processing |\n| `tool_name_prefix` | `bool` | Prefix tool names with server name (default: `False`) |\n\n**Supported Connection Configurations:**\n\nThe client supports multiple transport types with their respective parameters:\n\n| Transport | Required Parameters |\n|-----------|---------------------|\n| `stdio` | `command`, `args` |\n| `http` | `url` |\n| `sse` | `url`, optional `headers` |\n| `streamable_http` | `url`, optional `headers` |\n| `websocket` | `url` |\n\n资料来源：[langchain_mcp_adapters/client.py:1-100]()\n\n### 3. sessions.py — Connection Management\n\nThe `sessions.py` module handles connection management for different MCP transport types.\n\n| Connection Type | Class | Purpose |\n|-----------------|-------|---------|\n| Stdio | `StdioConnection` | stdio-based communication with subprocess |\n| HTTP | `McpHttpClientFactory`, `StreamableHttpConnection` | HTTP-based communication |\n| SSE | `SSEConnection` | Server-Sent Events transport |\n| WebSocket | `WebsocketConnection` | WebSocket-based communication |\n\n**Session Creation Flow:**\n\n```mermaid\ngraph TD\n    A[\"create_session()\"] --> B{\"Connection Type?\"}\n    B -->|Stdio| C[\"_create_stdio_session()\"]\n    B -->|HTTP| D[\"_create_http_session()\"]\n    B -->|SSE| E[\"_create_sse_session()\"]\n    B -->|WebSocket| F[\"_create_websocket_session()\"]\n    \n    C --> G[\"ClientSession\"]\n    D --> G\n    E --> G\n    F --> G\n```\n\nThe `create_session()` function returns an async generator that yields an initialized `ClientSession`:\n\n```python\n@asynccontextmanager\nasync def create_session(connection: Connection) -> AsyncIterator[ClientSession]:\n```\n\n**Environment Variable Expansion:**\n\nSessions support environment variable expansion in configuration values using `${VAR}` or `${VAR:default}` syntax.\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-100]()\n\n### 4. resources.py — Resource Conversion\n\nThe `resources.py` module converts MCP resources into LangChain Blob objects.\n\n| Function | Purpose |\n|----------|---------|\n| `convert_mcp_resource_to_langchain_blob()` | Converts a single MCP resource content to a Blob |\n| `get_mcp_resource()` | Fetches a single MCP resource by URI |\n| `load_mcp_resources()` | Loads multiple MCP resources and converts them to Blobs |\n\n**Supported Content Types:**\n\n| MCP Type | Conversion |\n|----------|------------|\n| `TextResourceContents` | Raw text data |\n| `BlobResourceContents` | Base64-decoded binary data |\n\n资料来源：[langchain_mcp_adapters/resources.py:1-80]()\n\n### 5. prompts.py — Prompt Loading\n\nThe `prompts.py` module handles loading MCP prompts into LangChain prompt formats. The module provides functionality to convert MCP prompt definitions into LangChain-compatible prompt structures.\n\n资料来源：[langchain_mcp_adapters/prompts.py:1-50]()\n\n### 6. callbacks.py — Callback Handling\n\nThe `callbacks.py` module provides callback infrastructure for handling notifications and events during MCP operations.\n\n| Component | Purpose |\n|-----------|---------|\n| `Callbacks` | Main callback container class |\n| `CallbackContext` | Context passed to callbacks with server/tool information |\n\nThe `CallbackContext` dataclass holds:\n\n```python\n@dataclass\nclass CallbackContext:\n    server_name: str | None = None\n    tool_name: str | None = None\n```\n\n资料来源：[langchain_mcp_adapters/callbacks.py:1-60]()\n\n### 7. interceptors.py — Tool Call Interceptors\n\nThe `interceptors.py` module provides interceptor interfaces for wrapping and controlling MCP tool call execution.\n\n| Component | Purpose |\n|-----------|---------|\n| `ToolCallInterceptor` | Protocol for intercepting tool calls |\n| `MCPToolCallRequest` | Request object passed to interceptors |\n| `_build_interceptor_chain()` | Builds composed handler chain with interceptors in onion pattern |\n\n**Interceptor Pattern:**\n\n```mermaid\ngraph TD\n    A[\"Request\"] --> B[\"Interceptor 1<br/>(Outer Layer)\"]\n    B --> C[\"Interceptor 2\"]\n    C --> D[\"...\"]\n    D --> E[\"Interceptor N\"]\n    E --> F[\"execute_tool<br/>(Innermost)\"]\n    F --> G[\"Result\"]\n    G --> E\n    G --> D\n    G --> C\n    G --> B\n    G --> H[\"Response\"]\n```\n\nThe interceptor chain follows an onion pattern where each interceptor wraps the next, allowing pre-processing before and post-processing after tool execution.\n\n**MCPToolCallRequest Structure:**\n\n```python\n@dataclass\nclass MCPToolCallRequest:\n    name: str\n    args: dict[str, Any]\n    server_name: str\n    headers: dict[str, Any] | None\n    runtime: Any\n```\n\n**Result Type (Conditional):**\n\n```python\nif LANGGRAPH_PRESENT:\n    MCPToolCallResult = CallToolResult | ToolMessage | Command\nelse:\n    MCPToolCallResult = CallToolResult | ToolMessage\n```\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-80]()\n\n## Data Flow Architecture\n\n### Tool Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant MultiServerMCPClient\n    participant load_mcp_tools\n    participant ToolCallInterceptor\n    participant ClientSession\n    participant MCPServer\n    \n    User->>MultiServerMCPClient: get_tools()\n    MultiServerMCPClient->>load_mcp_tools: load_mcp_tools(session)\n    load_mcp_tools->>load_mcp_tools: Create StructuredTool\n    Note over load_mcp_tools: Register call_tool coroutine\n    \n    User->>StructuredTool: invoke(args)\n    StructuredTool->>load_mcp_tools: call_tool(args)\n    \n    alt With Interceptors\n        load_mcp_tools->>ToolCallInterceptor: intercept(request)\n        ToolCallInterceptor->>ToolCallInterceptor: modify/validate\n    end\n    \n    load_mcp_tools->>ClientSession: call_tool(name, args)\n    ClientSession->>MCPServer: MCP CallToolRequest\n    MCPServer-->>ClientSession: CallToolResult\n    ClientSession-->>load_mcp_tools: CallToolResult\n    \n    alt Error Result\n        load_mcp_tools->>load_mcp_tools: Check isError flag\n        load_mcp_tools->>ToolException: raise\n    end\n    \n    load_mcp_tools->>_convert_call_tool_result: format result\n    Note over load_mcp_tools: Convert content blocks to LC format\n    \n    load_mcp_tools-->>User: (content, artifact)\n```\n\n### Content Conversion Flow\n\n```mermaid\ngraph LR\n    subgraph \"MCP Content Types\"\n        A[\"TextContent\"]\n        B[\"ImageContent\"]\n        C[\"AudioContent\"]\n        D[\"ResourceLink\"]\n        E[\"EmbeddedResource\"]\n    end\n    \n    subgraph \"Conversion Functions\"\n        F[\"_convert_mcp_content_to_lc_block\"]\n    end\n    \n    subgraph \"LangChain Content Blocks\"\n        G[\"TextContentBlock\"]\n        H[\"ImageContentBlock\"]\n        I[\"FileContentBlock\"]\n    end\n    \n    A --> F\n    B --> F\n    D --> F\n    E --> F\n    C -.->|NotImplementedError| F\n    \n    F --> G\n    F --> H\n    F --> I\n```\n\n## Type System\n\n### Conditional Type Definitions\n\nThe package uses conditional type definitions based on whether `langgraph` is installed:\n\n```python\ntry:\n    from langgraph.types import Command\n    LANGGRAPH_PRESENT = True\nexcept ImportError:\n    LANGGRAPH_PRESENT = False\n```\n\n| Type | Without langgraph | With langgraph |\n|------|-------------------|----------------|\n| `ConvertedToolResult` | `list[ToolMessageContentBlock] \\| ToolMessage` | `list[ToolMessageContentBlock] \\| ToolMessage \\| Command` |\n| `MCPToolCallResult` | `CallToolResult \\| ToolMessage` | `CallToolResult \\| ToolMessage \\| Command` |\n\n## Error Handling\n\n### Tool Exceptions\n\n| Error Type | Trigger | Behavior |\n|------------|---------|----------|\n| `ToolException` | MCP tool returns `isError: true` | Raised with joined error message from content blocks |\n| `NotImplementedError` | AudioContent conversion attempted | Audio content is not yet supported |\n| `ValueError` | Unknown content type | Unknown MCP content types raise ValueError |\n\n### Connection Errors\n\n| Error Type | Condition |\n|------------|-----------|\n| `ValueError` | Neither session nor connection provided to `load_mcp_tools()` |\n\n## Configuration Options\n\n### Tool Loading Options\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `session` | `ClientSession` | `None` | MCP client session |\n| `connection` | `Connection` | `None` | Connection config for new session |\n| `callbacks` | `Callbacks` | `None` | Event callbacks |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | `None` | Tool call interceptors |\n| `server_name` | `str` | `None` | Server name for context |\n| `tool_name_prefix` | `bool` | `False` | Prefix tool names with server |\n\n### Client Configuration\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `connections` | `dict[str, Connection]` | `{}` | Server connection configs |\n| `callbacks` | `Callbacks` | `Callbacks()` | Default callbacks |\n| `tool_interceptors` | `list[ToolCallInterceptor]` | `[]` | Default interceptors |\n| `tool_name_prefix` | `bool` | `False` | Prefix tool names |\n\n## Dependencies\n\n### Required Dependencies\n\n| Package | Purpose |\n|---------|---------|\n| `langchain-core` | LangChain core functionality and BaseTool |\n| `mcp` | MCP client SDK |\n| `pydantic` | Data validation and model creation |\n\n### Optional Dependencies\n\n| Package | Feature |\n|---------|---------|\n| `langgraph` | LangGraph Command support, enhanced state management |\n\n## Package Exports\n\nThe `__init__.py` exports the main public API:\n\n- `MultiServerMCPClient` - Multi-server client class\n- `load_mcp_tools` - Tool loading function\n- `load_mcp_resources` - Resource loading function\n- `load_mcp_prompt` - Prompt loading function\n- `Callbacks`, `CallbackContext` - Callback infrastructure\n- `ToolCallInterceptor` - Interceptor protocol\n- `Connection` - Connection configuration types\n\n资料来源：[langchain_mcp_adapters/__init__.py:1-20]()\n\n---\n\n<a id='page-tool-conversion'></a>\n\n## Tool Conversion\n\n### 相关页面\n\n相关主题：[MultiServerMCPClient](#page-multiserver-client), [Transport Types](#page-transport-types), [Package Structure](#page-package-structure)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n</details>\n\n# Tool Conversion\n\n## Overview\n\nTool Conversion is the core mechanism that bridges **MCP (Model Context Protocol)** tools with **LangChain tools**, enabling interoperability between the MCP ecosystem and LangChain/LangGraph agents. This adapter transforms native MCP tool definitions into LangChain-compatible `StructuredTool` instances that can be used with LangChain agents and LangGraph state machines.\n\nThe conversion layer handles:\n\n- Tool signature translation (MCP schema → LangChain Pydantic schema)\n- Tool execution with proper session context\n- Content block conversion (MCP content types → LangChain content blocks)\n- Error handling and artifact wrapping\n- Interceptor chain support for middleware patterns\n\n资料来源：[langchain_mcp_adapters/tools.py:1-30]()\n\n## Architecture\n\n```mermaid\ngraph TD\n    subgraph \"MCP Layer\"\n        MCPTool[MCP Tool Definition]\n        MCPToolCallResult[MCP CallToolResult]\n    end\n    \n    subgraph \"Adapter Layer\"\n        convert_mcp_tool[convert_mcp_tool_to_langchain_tool]\n        load_mcp_tools[load_mcp_tools]\n        interceptor_chain[Interceptor Chain]\n        content_converter[_convert_mcp_content_to_lc_block]\n        result_converter[_convert_call_tool_result]\n    end\n    \n    subgraph \"LangChain Layer\"\n        StructuredTool[StructuredTool]\n        ToolMessage[ToolMessage]\n        Command[Command<br/>langgraph.types]\n        MCPToolArtifact[MCPToolArtifact]\n    end\n    \n    MCPTool --> convert_mcp_tool\n    MCPTool --> load_mcp_tools\n    load_mcp_tools --> convert_mcp_tool\n    convert_mcp_tool --> interceptor_chain\n    interceptor_chain --> content_converter\n    MCPToolCallResult --> result_converter\n    result_converter --> ToolMessage\n    result_converter --> Command\n    result_converter --> MCPToolArtifact\n```\n\n### Conversion Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent as LangChain Agent\n    participant LC_Tool as LangChain StructuredTool\n    participant Interceptor as ToolCallInterceptor\n    participant MCP_Session as MCP ClientSession\n    participant MCP_Server as MCP Server\n\n    Agent->>LC_Tool: invoke(name, args)\n    LC_Tool->>Interceptor: MCPToolCallRequest\n    Interceptor->>Interceptor: preprocess()\n    Interceptor->>MCP_Session: call_tool()\n    MCP_Session->>MCP_Server: protocol call\n    MCP_Server-->>MCP_Session: CallToolResult\n    MCP_Session-->>Interceptor: MCPToolCallResult\n    Interceptor->>Interceptor: postprocess()\n    Interceptor-->>LC_Tool: Converted Result\n    LC_Tool->>LC_Tool: _convert_call_tool_result()\n    LC_Tool-->>Agent: (content, artifact)\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:140-220]()\n\n## Core Functions\n\n### load_mcp_tools\n\nLoads all available MCP tools from a session and converts them to LangChain tools.\n\n```python\nasync def load_mcp_tools(\n    session: ClientSession | None,\n    *,\n    connection: Connection | None = None,\n    callbacks: Callbacks | None = None,\n    tool_interceptors: list[ToolCallInterceptor] | None = None,\n    server_name: str | None = None,\n    tool_name_prefix: bool = False,\n) -> list[BaseTool]\n```\n\n**Parameters:**\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `session` | `ClientSession \\| None` | required | MCP client session. If `None`, `connection` must be provided. |\n| `connection` | `Connection \\| None` | `None` | Connection config to create a new session if session is `None`. |\n| `callbacks` | `Callbacks \\| None` | `None` | Optional callbacks for handling notifications and events. |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | `None` | Optional list of interceptors for tool call processing. |\n| `server_name` | `str \\| None` | `None` | Name of the server these tools belong to. |\n| `tool_name_prefix` | `bool` | `False` | If `True`, tool names are prefixed with server name (e.g., `\"weather_search\"`). |\n\n资料来源：[langchain_mcp_adapters/tools.py:219-270]()\n\n### convert_mcp_tool_to_langchain_tool\n\nConverts a single MCP tool to a LangChain `StructuredTool`.\n\n```python\ndef convert_mcp_tool_to_langchain_tool(\n    session: ClientSession | None,\n    tool: MCPTool,\n    *,\n    connection: Connection | None = None,\n    callbacks: Callbacks | None = None,\n    tool_interceptors: list[ToolCallInterceptor] | None = None,\n    server_name: str | None = None,\n    tool_name_prefix: bool = False,\n) -> BaseTool\n```\n\n**Returns:**\nA LangChain `StructuredTool` with `response_format=\"content_and_artifact\"`.\n\n**Key Implementation Details:**\n\n- Creates an async `call_tool` coroutine that handles execution\n- Injects `runtime` via `InjectedToolArg` for LangGraph compatibility\n- Supports `ToolCallInterceptor` chain via `_build_interceptor_chain()`\n- Wraps errors as `ToolException`\n- Extracts `structuredContent` into `MCPToolArtifact`\n\n资料来源：[langchain_mcp_adapters/tools.py:150-218]()\n\n## Content Block Conversion\n\nThe adapter converts MCP content types to LangChain content blocks for uniform handling.\n\n### Supported Conversions\n\n| MCP Content Type | LangChain Content Block | Notes |\n|------------------|-------------------------|-------|\n| `TextContent` | `{\"type\": \"text\", \"text\": ...}` | Direct text conversion |\n| `ImageContent` | `{\"type\": \"image\", \"base64\": ..., \"mime_type\": ...}` | Base64 encoded image data |\n| `ResourceLink` (image/*) | `{\"type\": \"image\", \"url\": ..., \"mime_type\": ...}` | Image via URI reference |\n| `ResourceLink` (other) | `{\"type\": \"file\", \"url\": ..., \"mime_type\": ...}` | Generic file via URI reference |\n| `EmbeddedResource` (text) | `{\"type\": \"text\", \"text\": ...}` | Text from embedded resource |\n| `EmbeddedResource` (blob) | Image or file block | Based on MIME type |\n| `AudioContent` | — | Raises `NotImplementedError` |\n\n资料来源：[langchain_mcp_adapters/tools.py:70-115]()\n\n### _convert_mcp_content_to_lc_block\n\n```python\ndef _convert_mcp_content_to_lc_block(\n    content: ContentBlock,\n) -> ToolMessageContentBlock\n```\n\nThis function handles the 1:1 mapping between MCP content types and LangChain content blocks.\n\n```mermaid\ngraph LR\n    A[ContentBlock] --> B{Type Check}\n    B -->|TextContent| C[create_text_block]\n    B -->|ImageContent| D[create_image_block]\n    B -->|ResourceLink| E{MIME type?}\n    B -->|EmbeddedResource| F{Resource Type?}\n    B -->|AudioContent| G[NotImplementedError]\n    \n    E -->|image/*| H[create_image_block<br/>url=uri]\n    E -->|other| I[create_file_block<br/>url=uri]\n    \n    F -->|TextResourceContents| J[create_text_block]\n    F -->|BlobResourceContents| K{MIME type?}\n    K -->|image/*| L[create_image_block]\n    K -->|other| M[create_file_block]\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:70-115]()\n\n## Result Conversion\n\n### _convert_call_tool_result\n\nConverts the result of an MCP tool call to LangChain format with support for multiple return types.\n\n```python\ndef _convert_call_tool_result(\n    call_tool_result: MCPToolCallResult,\n) -> tuple[ConvertedToolResult, MCPToolArtifact | None]\n```\n\n**Return Types:**\n\nThe function returns a tuple where:\n- **First element**: The converted content\n- **Second element**: The artifact (if any)\n\n**Content Types Based on Input:**\n\n| Input Type | Output Content | Output Artifact |\n|------------|----------------|-----------------|\n| `ToolMessage` | `ToolMessage` (passthrough) | `None` |\n| `Command` (LangGraph) | `Command` (passthrough) | `None` |\n| `CallToolResult` (MCP) | `list[ToolMessageContentBlock]` | `MCPToolArtifact` (if `structuredContent` present) |\n\n资料来源：[langchain_mcp_adapters/tools.py:117-145]()\n\n### MCPToolArtifact\n\nA TypedDict wrapping structured content from MCP tool calls:\n\n```python\nclass MCPToolArtifact(TypedDict):\n    \"\"\"Artifact returned from MCP tool calls.\"\"\"\n    structured_content: dict[str, Any]\n```\n\nThis allows downstream consumers to access MCP-specific structured data while maintaining compatibility with LangChain's tool result format.\n\n资料来源：[langchain_mcp_adapters/tools.py:55-68]()\n\n## Interceptor Chain\n\nThe interceptor system implements the **onion pattern** for middleware-like processing of tool calls.\n\n### _build_interceptor_chain\n\n```python\ndef _build_interceptor_chain(\n    base_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    tool_interceptors: list[ToolCallInterceptor] | None,\n) -> Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]]\n```\n\n**Execution Order:**\n1. Interceptors are applied in **reverse order** (last in list = outermost layer)\n2. Each interceptor wraps the previous handler\n3. Request flows inward through interceptors, response flows outward\n\n```mermaid\ngraph TD\n    subgraph \"Request Flow (inward)\"\n        R1[Request] --> I1[Interceptor 1<br/>outermost]\n        I1 --> I2[Interceptor 2]\n        I2 --> I3[Interceptor N<br/>innermost]\n        I3 --> BH[Base Handler<br/>execute_tool]\n    end\n    \n    subgraph \"Response Flow (outward)\"\n        BH --> RT1[Response]\n        RT1 --> I4[Interceptor N]\n        I4 --> I5[Interceptor 2]\n        I5 --> I6[Interceptor 1]\n        I6 --> R2[Response]\n    end\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:147-149]()\n\n### ToolCallInterceptor Interface\n\nInterceptors implement the `ToolCallInterceptor` protocol:\n\n```python\n@runtime_checkable\nclass ToolCallInterceptor(Protocol):\n    async def intercept(\n        self,\n        request: MCPToolCallRequest,\n        current_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    ) -> MCPToolCallResult:\n        ...\n```\n\n**Usage Pattern:**\n\n```python\nclass MyInterceptor:\n    async def intercept(\n        self,\n        request: MCPToolCallRequest,\n        current_handler: Callable,\n    ) -> MCPToolCallResult:\n        # Pre-processing\n        modified_request = request.override(args={\"modified\": True})\n        \n        # Call next handler\n        result = await current_handler(modified_request)\n        \n        # Post-processing\n        return result\n```\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-50]()\n\n## Type Definitions\n\n### ConvertedToolResult\n\nConditional type based on LangGraph availability:\n\n```python\nif LANGGRAPH_PRESENT:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command\nelse:\n    ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage\n```\n\n### ToolMessageContentBlock\n\n```python\nToolMessageContentBlock = TextContentBlock | ImageContentBlock | FileContentBlock\n```\n\nImport sourced from `langchain_core.messages.content`:\n\n资料来源：[langchain_mcp_adapters/tools.py:15-35]()\n\n## Configuration Options\n\n### Tool Name Prefixing\n\nWhen connecting to multiple MCP servers, tools may have name conflicts. Enable prefixing:\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\n\n# With prefix: tool names become \"math_add\", \"weather_get_weather\"\ntools = await client.get_tools(tool_name_prefix=True)\n```\n\n### Session Management\n\n| Mode | Description | Use Case |\n|------|-------------|----------|\n| **Shared Session** | Single session for all tools | Single server, multiple tools |\n| **Per-Tool Session** | New session created per call | Stateless servers |\n| **Explicit Session** | User-managed session | Custom lifecycle control |\n\n资料来源：[langchain_mcp_adapters/client.py:1-80]()\n\n## Error Handling\n\n### ToolException\n\nTool call errors are wrapped in `ToolException`:\n\n```python\nif call_tool_result.isError:\n    error_parts = []\n    for item in tool_content:\n        if isinstance(item, str):\n            error_parts.append(item)\n        elif isinstance(item, dict) and item.get(\"type\") == \"text\":\n            error_parts.append(item.get(\"text\", \"\"))\n    error_msg = \"\\n\".join(error_parts) if error_parts else str(tool_content)\n    raise ToolException(error_msg)\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:130-140]()\n\n## Usage Examples\n\n### Basic Tool Loading\n\n```python\nfrom mcp import ClientSession\nfrom mcp.client.stdio import stdio_client\n\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nserver_params = StdioServerParameters(\n    command=\"python\",\n    args=[\"/path/to/math_server.py\"],\n)\n\nasync with stdio_client(server_params) as (read, write):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        tools = await load_mcp_tools(session)\n```\n\n### With LangGraph Agent\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langgraph.graph import StateGraph, MessagesState, START\nfrom langgraph.prebuilt import ToolNode, tools_condition\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"/path/to/math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\ntools = await client.get_tools()\n\nbuilder = StateGraph(MessagesState)\nbuilder.add_node(\"call_model\", call_model)\nbuilder.add_node(\"tools\", ToolNode(tools))\nbuilder.add_edge(START, \"call_model\")\nbuilder.add_conditional_edges(\"call_model\", tools_condition)\n```\n\n资料来源：[README.md:1-100]()\n\n## See Also\n\n- [MultiServerMCPClient](../client) — Client for connecting to multiple MCP servers\n- [Tool Call Interceptors](../interceptors) — Middleware for tool call processing\n- [Resource Conversion](../resources) — Converting MCP resources to LangChain Blobs\n\n---\n\n<a id='page-multiserver-client'></a>\n\n## MultiServerMCPClient\n\n### 相关页面\n\n相关主题：[Tool Conversion](#page-tool-conversion), [Transport Types](#page-transport-types), [Callbacks](#page-callbacks)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n</details>\n\n# MultiServerMCPClient\n\nThe `MultiServerMCPClient` is the primary entry point for connecting LangChain applications to multiple Model Context Protocol (MCP) servers. It provides a unified interface to manage connections, load tools, resources, and prompts from various MCP server implementations.\n\n## Overview\n\n`MultiServerMCPClient` serves as a central client that abstracts the complexity of connecting to multiple MCP servers simultaneously. It handles session management, tool conversion, and integrates seamlessly with LangChain and LangGraph agents.\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B[Connection Manager]\n    B --> C[StdioConnection]\n    B --> D[SSEConnection]\n    B --> E[StreamableHttpConnection]\n    B --> F[WebsocketConnection]\n    G[load_mcp_tools] --> H[LangChain Tools]\n    I[load_mcp_resources] --> J[LangChain Blobs]\n    K[load_mcp_prompts] --> L[LangChain Messages]\n```\n\n## Initialization\n\n### Constructor Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `connections` | `dict[str, Connection] \\| None` | `None` | Mapping of server names to connection configurations |\n| `callbacks` | `Callbacks \\| None` | `None` | Optional callbacks for notifications and events |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | `None` | Optional interceptors for modifying tool requests/responses |\n| `tool_name_prefix` | `bool` | `False` | Prefix tool names with server name to avoid conflicts |\n\n### Connection Configuration\n\nEach server in the `connections` dictionary requires a transport-specific configuration:\n\n| Transport | Required Parameters |\n|-----------|---------------------|\n| `stdio` | `command`, `args` |\n| `http` | `url` |\n| `sse` | `url` |\n| `streamable_http` | `url` |\n| `websocket` | `url` |\n\n**资料来源**：[client.py:51-76]()\n\n## Connection Types\n\nThe library supports multiple transport protocols for connecting to MCP servers.\n\n### StdioConnection\n\nUsed for spawning local MCP server processes via standard I/O.\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        }\n    }\n)\n```\n\n**资料来源**：[README.md:82-90]()\n\n### HTTP/Streamable HTTP Connection\n\nUsed for connecting to HTTP-based MCP servers, including stateless streamable HTTP servers.\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\n```\n\n**资料来源**：[README.md:37-45]()\n\n### WebSocket Connection\n\nFor WebSocket-based MCP server connections.\n\n### SSE Connection\n\nServer-Sent Events transport for MCP server communication.\n\n## Usage Patterns\n\n### Basic Usage with get_tools()\n\nThe simplest pattern starts a new session for each tool call:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n        \"weather\": {\n            \"url\": \"http://localhost:8000/mcp\",\n            \"transport\": \"http\",\n        }\n    }\n)\nall_tools = await client.get_tools()\n```\n\n**资料来源**：[client.py:51-74]()\n\n### Explicit Session Management\n\nFor more control, use explicit session management:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nclient = MultiServerMCPClient({...})\nasync with client.session(\"math\") as session:\n    tools = await load_mcp_tools(session)\n```\n\n**资料来源**：[client.py:75-81]()\n\n### With LangGraph StateGraph\n\nIntegration with LangGraph for agent-based workflows:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langgraph.graph import StateGraph, MessagesState, START\nfrom langgraph.prebuilt import ToolNode, tools_condition\nfrom langchain.chat_models import init_chat_model\n\nmodel = init_chat_model(\"openai:gpt-4.1\")\n\nclient = MultiServerMCPClient({...})\ntools = await client.get_tools()\n\ndef call_model(state: MessagesState):\n    response = model.bind_tools(tools).invoke(state[\"messages\"])\n    return {\"messages\": response}\n\nbuilder = StateGraph(MessagesState)\nbuilder.add_node(call_model)\nbuilder.add_node(ToolNode(tools))\nbuilder.add_edge(START, \"call_model\")\nbuilder.add_conditional_edges(\"call_model\", tools_condition)\n```\n\n**资料来源**：[README.md:103-126]()\n\n## Tool Name Prefixing\n\nWhen `tool_name_prefix=True`, tool names are prefixed with the server name using an underscore separator:\n\n```python\n# With prefix: \"weather_search\"\n# Without prefix: \"search\"\nclient = MultiServerMCPClient(\n    {...},\n    tool_name_prefix=True\n)\n```\n\nThis helps avoid conflicts when multiple servers expose tools with identical names.\n\n**资料来源**：[client.py:48-51]()\n\n## Runtime Headers\n\nFor HTTP and SSE transports, you can pass custom headers for authentication or tracing:\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"weather\": {\n            \"transport\": \"http\",\n            \"url\": \"http://localhost:8000/mcp\",\n            \"headers\": {\n                \"Authorization\": \"Bearer YOUR_TOKEN\",\n                \"X-Custom-Header\": \"custom-value\"\n            },\n        }\n    }\n)\n```\n\n> Only `sse` and `http` transports support runtime headers.\n\n**资料来源**：[README.md:129-152]()\n\n## Tool Interceptors\n\nTool call interceptors allow you to modify requests and responses in an onion-pattern chain:\n\n```python\nfrom langchain_mcp_adapters.interceptors import (\n    MCPToolCallRequest,\n    MCPToolCallResult,\n    ToolCallInterceptor\n)\n\nclass CustomInterceptor(ToolCallInterceptor):\n    async def intercept(\n        self, request: MCPToolCallRequest, handler\n    ) -> MCPToolCallResult:\n        # Modify request\n        modified_request = request.override(args={\"modified\": True})\n        # Process and potentially modify response\n        result = await handler(modified_request)\n        return result\n\nclient = MultiServerMCPClient(\n    {...},\n    tool_interceptors=[CustomInterceptor()]\n)\n```\n\n**资料来源**：[interceptors.py:1-55]()\n\n## MCPToolArtifact\n\nTool call results that include structured content are wrapped in an `MCPToolArtifact`:\n\n```python\nclass MCPToolArtifact(TypedDict):\n    \"\"\"Artifact returned from MCP tool calls.\n    \n    Attributes:\n        structured_content: The structured content returned by the MCP tool,\n            corresponding to the structuredContent field in CallToolResult.\n    \"\"\"\n    structured_content: dict[str, Any]\n```\n\n**资料来源**：[tools.py:70-84]()\n\n## Content Conversion\n\nThe library automatically converts MCP content blocks to LangChain content blocks:\n\n| MCP Type | LangChain Type |\n|----------|----------------|\n| `TextContent` | `{\"type\": \"text\", \"text\": ...}` |\n| `ImageContent` | `{\"type\": \"image\", ...}` |\n| `FileContentBlock` | `{\"type\": \"file\", ...}` |\n| `ResourceLink` | `{\"type\": \"image\"}` or `{\"type\": \"file\"}` |\n| `EmbeddedResource` | `{\"type\": \"text\"}`, `{\"type\": \"image\"}`, or `{\"type\": \"file\"}` |\n| `AudioContent` | `NotImplementedError` |\n\n**资料来源**：[tools.py:86-126]()\n\n## Limitations\n\n### Async Context Manager Deprecation\n\nAs of version 0.1.0, `MultiServerMCPClient` cannot be used as an async context manager:\n\n```python\n# This is NOT allowed:\n# async with MultiServerMCPClient(...) as client:\n#     ...\n\n# Instead use:\nclient = MultiServerMCPClient(...)\ntools = await client.get_tools()\n```\n\n**资料来源**：[client.py:55-68]()\n\n## Architecture\n\n```mermaid\nsequenceDiagram\n    participant Client as MultiServerMCPClient\n    participant Session as ClientSession\n    participant Loader as load_mcp_tools\n    participant Converter as Content Converter\n    participant LC as LangChain Tool\n\n    Client->>Session: create_session()\n    Session->>Loader: session.list_tools()\n    Loader->>Session: tool definitions\n    Session-->>Converter: Tool metadata\n    Converter->>LC: StructuredTool\n    LC-->>Client: BaseTool list\n```\n\n## See Also\n\n- [load_mcp_tools()](langchain_mcp_adapters/tools.md) - Loading and converting MCP tools\n- [load_mcp_resources()](langchain_mcp_adapters/resources.md) - Loading MCP resources as Blobs\n- [load_mcp_prompts()](langchain_mcp_adapters/prompts.md) - Loading MCP prompts as Messages\n- [ToolCallInterceptor](langchain_mcp_adapters/interceptors.md) - Intercepting tool calls\n\n---\n\n<a id='page-transport-types'></a>\n\n## Transport Types\n\n### 相关页面\n\n相关主题：[MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/sessions.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/sessions.py)\n- [README.md](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/README.md)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n</details>\n\n# Transport Types\n\nLangChain MCP Adapters supports multiple transport types for connecting to MCP (Model Context Protocol) servers. Transport types define the communication mechanism used between the client and server, enabling flexibility in different deployment scenarios.\n\n## Overview\n\nTransport types in langchain-mcp-adapters determine how MCP client sessions communicate with MCP servers. The library provides native support for four primary transport mechanisms, each suited for different use cases ranging from local development to production deployments.\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B{Transport Type}\n    B --> C[stdio]\n    B --> D[http]\n    B --> E[sse]\n    B --> F[websocket]\n    \n    C --> G[Local/Subprocess]\n    D --> H[HTTP Server]\n    E --> I[HTTP + SSE Events]\n    F --> J[WebSocket Server]\n    \n    G --> K[StdioServerParameters]\n    H --> L[URL + Headers]\n    I --> M[URL + Headers]\n    J --> N[URL + Headers]\n```\n\n资料来源：[langchain_mcp_adapters/client.py:1-50]()\n\n## Supported Transport Types\n\n| Transport | Use Case | Session Creation | Header Support | Timeout Config |\n|-----------|----------|------------------|----------------|----------------|\n| `stdio` | Local subprocesses, development | In-process via stdin/stdout | N/A | Encoding handlers |\n| `http` | Remote HTTP servers, stateless | Streamable HTTP client | ✅ | Request timeout |\n| `sse` | Server-Sent Events servers | HTTP + SSE endpoint | ✅ | SSE read timeout |\n| `websocket` | Real-time bidirectional | WebSocket connection | ✅ | Connection timeout |\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-100]()\n\n## Stdio Transport\n\nThe `stdio` transport uses standard input/output streams for communication. This is ideal for running MCP servers as local subprocesses or when the server runs on the same machine as the client.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `command` | `str` | ✅ | Executable command (e.g., `\"python\"`, `\"node\"`) |\n| `args` | `list[str]` | ❌ | Command-line arguments |\n| `env` | `dict[str, str]` | ❌ | Environment variables |\n| `cwd` | `str` | ❌ | Working directory |\n| `encoding` | `str` | ❌ | Character encoding (default: system default) |\n| `encoding_error_handler` | `str` | ❌ | How to handle encoding errors |\n| `session_kwargs` | `dict` | ❌ | Additional `ClientSession` arguments |\n\n资料来源：[langchain_mcp_adapters/sessions.py:60-90]()\n\n### Environment Variable Expansion\n\nThe `env` parameter supports environment variable expansion in variable values:\n\n```python\nenv = {\n    \"API_KEY\": \"${MY_API_KEY}\",  # Expands from current environment\n    \"STATIC\": \"custom-value\"     # Passed through unchanged\n}\n```\n\nVariable references use the pattern `${VAR_NAME}`. Only values (not keys) are expanded. Unexpanded references trigger a warning.\n\n资料来源：[langchain_mcp_adapters/sessions.py:80-85]()\n\n### Example: Stdio Connection\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"/path/to/math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\ntools = await client.get_tools()\n```\n\n资料来源：[README.md:80-100]()\n\n## HTTP Transport\n\nThe `http` transport connects to MCP servers via HTTP protocol. This is designed for remote server deployments and supports stateless request/response patterns.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `url` | `str` | ✅ | Full URL to the MCP server endpoint |\n| `headers` | `dict[str, str]` | ❌ | HTTP headers sent with each request |\n| `timeout` | `float` | ❌ | Request timeout in seconds (default: `60.0`) |\n\n### Header Support\n\nHTTP transport supports runtime headers, enabling dynamic authentication and authorization:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\n\nclient = MultiServerMCPClient({\n    \"weather\": {\n        \"url\": \"http://localhost:8000/mcp\",\n        \"transport\": \"http\",\n        \"headers\": {\n            \"Authorization\": \"Bearer ${API_TOKEN}\",\n            \"X-Custom-Header\": \"custom-value\"\n        }\n    }\n})\n```\n\n> Only `sse` and `http` transports support runtime headers.\n\n资料来源：[README.md:110-130]()\n\n### Example: HTTP Connection\n\n```bash\n# Start a streamable HTTP server\ncd examples/servers/streamable-http-stateless/\nuv run mcp-simple-streamablehttp-stateless --port 3000\n```\n\n```python\nfrom mcp import ClientSession\nfrom mcp.client.streamable_http import streamablehttp_client\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nasync with streamablehttp_client(\"http://localhost:3000/mcp\") as (read, write, _):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        tools = await load_mcp_tools(session)\n```\n\n资料来源：[README.md:35-55]()\n\n## SSE Transport\n\nSSE (Server-Sent Events) transport combines HTTP requests with server-side event streaming. This is useful when the MCP server needs to push updates or progress notifications to the client.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `url` | `str` | ✅ | Full URL to the MCP server SSE endpoint |\n| `headers` | `dict[str, str]` | ❌ | HTTP headers sent with each request |\n| `sse_read_timeout` | `float` | ❌ | SSE read timeout in seconds (default: `300.0`) |\n| `timeout` | `float` | ❌ | HTTP request timeout (default: `60.0`) |\n\n### Progress Callbacks\n\nSSE transport enables progress callback functionality through the MCP client callbacks system:\n\n```python\nfrom langchain_mcp_adapters.callbacks import Callbacks, CallbackContext\n\nclass CustomCallbacks(Callbacks):\n    async def progress_callback(self, progress_token: str, progress: dict) -> None:\n        print(f\"Progress: {progress}\")\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:180-200]()\n\n## WebSocket Transport\n\nWebSocket transport provides bidirectional real-time communication between the client and MCP server. This is suitable for applications requiring low-latency, persistent connections.\n\n### Configuration Parameters\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `url` | `str` | ✅ | WebSocket endpoint URL |\n| `headers` | `dict[str, str]` | ❌ | WebSocket handshake headers |\n| `timeout` | `float` | ❌ | Connection timeout |\n\n## Connection Factory\n\nThe `Connection` abstract class defines the common interface for all transport implementations:\n\n```mermaid\nclassDiagram\n    class Connection {\n        <<abstract>>\n        +session_kwarg: dict\n        +server_name: str\n        +get_session() ClientSession\n    }\n    \n    class StdioConnection {\n        +command: str\n        +args: list\n        +env: dict\n        +get_session() ClientSession\n    }\n    \n    class StreamableHttpConnection {\n        +url: str\n        +headers: dict\n        +timeout: float\n        +get_session() ClientSession\n    }\n    \n    class SSEConnection {\n        +url: str\n        +headers: dict\n        +timeout: float\n        +sse_read_timeout: float\n        +get_session() ClientSession\n    }\n    \n    class WebsocketConnection {\n        +url: str\n        +headers: dict\n        +timeout: float\n        +get_session() ClientSession\n    }\n    \n    Connection <|-- StdioConnection\n    Connection <|-- StreamableHttpConnection\n    Connection <|-- SSEConnection\n    Connection <|-- WebsocketConnection\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-50]()\n\n## Session Creation\n\nAll transport types ultimately create an `MCP.ClientSession` for tool execution:\n\n```python\nfrom langchain_mcp_adapters.sessions import create_session\n\n# Direct session creation\nasync with create_session(connection) as session:\n    tools = await load_mcp_tools(session)\n```\n\n资料来源：[langchain_mcp_adapters/sessions.py:1-30]()\n\n### MultiServerMCPClient Session Management\n\n```python\n# Explicitly starting a session\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"/path/to/math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\n\nasync with client.session(\"math\") as session:\n    tools = await load_mcp_tools(session)\n```\n\n> MultiServerMCPClient cannot be used as a context manager directly. Use `client.session(server_name)` for explicit session control.\n\n资料来源：[langchain_mcp_adapters/client.py:1-60]()\n\n## Tool Name Prefixing\n\nWhen using multiple servers with overlapping tool names, enable the `tool_name_prefix` option to avoid conflicts:\n\n```python\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\"transport\": \"stdio\", ...},\n        \"weather\": {\"transport\": \"http\", \"url\": \"http://localhost:8000/mcp\"}\n    },\n    tool_name_prefix=True  # Enables prefixed tool names\n)\ntools = await client.get_tools()\n# Tool names: \"math_add\", \"weather_search\" (prefixed with server name)\n```\n\n资料来源：[langchain_mcp_adapters/client.py:30-45]()\n\n## Transport Selection Guide\n\n```mermaid\ngraph TD\n    A[Select Transport] --> B{Deployment Type}\n    \n    B --> C[Local/Subprocess]\n    C --> D[Use stdio]\n    \n    B --> E[Remote Server]\n    E --> F{Need Real-time Events?}\n    \n    F --> G[Yes]\n    G --> H[Use websocket]\n    \n    F --> I[No]\n    I --> J{HTTP/1.1 or Streaming?}\n    \n    J --> K[Streaming/SSE]\n    K --> L[Use sse]\n    \n    J --> M[Request/Response]\n    M --> N[Use http]\n```\n\n### Decision Matrix\n\n| Scenario | Recommended Transport |\n|----------|----------------------|\n| Development, local testing | `stdio` |\n| Production HTTP API | `http` |\n| Server pushing events to client | `sse` |\n| Bidirectional, low-latency needs | `websocket` |\n| Fire-and-forget subprocess | `stdio` |\n\n## Timeout Configuration\n\n### Default Timeouts\n\n| Transport | Parameter | Default Value |\n|-----------|-----------|---------------|\n| HTTP | `timeout` | `60.0` seconds |\n| SSE | `timeout` | `60.0` seconds |\n| SSE | `sse_read_timeout` | `300.0` seconds |\n| WebSocket | `timeout` | Connection timeout |\n\n### Custom Timeout Example\n\n```python\nfrom langchain_mcp_adapters.sessions import StreamableHttpConnection\n\nconnection = StreamableHttpConnection(\n    url=\"http://localhost:8000/mcp\",\n    timeout=120.0,  # 2 minute request timeout\n)\n```\n\n## Error Handling\n\nTransport-specific errors may occur during session creation or tool execution:\n\n### Stdio Transport Errors\n\n- **Process startup failure**: Check `command` path and permissions\n- **Encoding errors**: Configure `encoding` and `encoding_error_handler`\n\n### HTTP/SSE/WebSocket Transport Errors\n\n- **Connection timeout**: Increase `timeout` parameter\n- **SSE read timeout**: Increase `sse_read_timeout` for long-running operations\n- **Header authentication failures**: Verify header format and token validity\n\n## See Also\n\n- [MultiServerMCPClient](langchain_mcp_adapters/client.py) - Multi-server connection management\n- [load_mcp_tools](langchain_mcp_adapters/tools.py) - Tool loading with transport\n- [Callbacks System](langchain_mcp_adapters/callbacks.py) - Progress and notification handling\n\n---\n\n<a id='page-callbacks'></a>\n\n## Callbacks\n\n### 相关页面\n\n相关主题：[Tool Call Interceptors](#page-interceptors), [MultiServerMCPClient](#page-multiserver-client)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/callbacks.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/callbacks.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n- [langchain_mcp_adapters/client.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/client.py)\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/resources.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/resources.py)\n</details>\n\n# Callbacks\n\nThe Callbacks system in `langchain-mcp-adapters` provides a mechanism for handling notifications, events, and progress updates during MCP tool execution. It acts as a bridge between the LangChain callback format and the MCP (Model Context Protocol) callback format, enabling developers to intercept and respond to tool call lifecycle events.\n\n## Overview\n\nWhen working with MCP tools through `langchain-mcp-adapters`, callbacks serve several critical purposes:\n\n- **Progress Notification**: Track long-running tool operations via progress callbacks\n- **Event Handling**: Respond to notifications and events from the MCP server\n- **Context Propagation**: Maintain context about which server and tool is being executed\n- **Lifecycle Integration**: Integrate with LangChain's callback system for broader ecosystem compatibility\n\nThe callback system is primarily used in two contexts:\n1. When loading MCP tools via `load_mcp_tools()` or `convert_mcp_tool_to_langchain_tool()`\n2. When configuring the `MultiServerMCPClient` for multi-server tool aggregation\n\n资料来源：[langchain_mcp_adapters/tools.py:1-30]()\n\n## Core Components\n\n### CallbackContext\n\nThe `CallbackContext` class provides context information about an ongoing tool call operation.\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `server_name` | `str \\| None` | Name of the MCP server handling the tool call |\n| `tool_name` | `str \\| None` | Name of the tool being executed |\n\n资料来源：[langchain_mcp_adapters/callbacks.py]()\n资料来源：[langchain_mcp_adapters/tools.py:55-62]()\n\n### Callbacks Class\n\nThe `Callbacks` class is the main abstraction for handling MCP events. It provides the interface that developers implement to receive notifications.\n\n```python\nclass Callbacks:\n    \"\"\"Handler for MCP notifications and events.\"\"\"\n    \n    def to_mcp_format(self, context: CallbackContext) -> _MCPCallbacks:\n        \"\"\"Convert to MCP-compatible callback format.\"\"\"\n        ...\n```\n\n资料来源：[langchain_mcp_adapters/callbacks.py]()\n资料来源：[langchain_mcp_adapters/tools.py:63-68]()\n\n### _MCPCallbacks Class\n\nThe internal `_MCPCallbacks` class wraps callbacks in the format expected by the MCP SDK.\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `progress_callback` | `Callable \\| None` | Callback for progress updates during tool execution |\n\n资料来源：[langchain_mcp_adapters/callbacks.py]()\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[MultiServerMCPClient] --> B[Callbacks Instance]\n    A --> C[load_mcp_tools]\n    B --> D[to_mcp_format]\n    D --> E[_MCPCallbacks]\n    E --> F[session.call_tool]\n    C --> G[CallbackContext]\n    G --> D\n    \n    H[MCP Server] --> I[Progress Updates]\n    I --> F\n```\n\n## Usage Patterns\n\n### Basic Usage with MultiServerMCPClient\n\nThe most common pattern is to pass a `Callbacks` instance to the `MultiServerMCPClient`:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.callbacks import Callbacks, CallbackContext\n\nclass MyCallbacks(Callbacks):\n    def to_mcp_format(self, context: CallbackContext) -> _MCPCallbacks:\n        # Custom callback handling\n        return _MCPCallbacks(progress_callback=self.on_progress)\n\n    async def on_progress(self, progress: float, total: float | None, message: str | None):\n        print(f\"Progress: {progress}/{total} - {message}\")\n\nclient = MultiServerMCPClient(\n    {\n        \"math\": {\n            \"command\": \"python\",\n            \"args\": [\"/path/to/math_server.py\"],\n            \"transport\": \"stdio\",\n        },\n    },\n    callbacks=MyCallbacks()\n)\n```\n\n资料来源：[langchain_mcp_adapters/client.py:40-60]()\n\n### Usage with load_mcp_tools\n\nCallbacks can also be passed directly when loading tools from a session:\n\n```python\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\nasync with streamablehttp_client(\"http://localhost:3000/mcp\") as (read, write, _):\n    async with ClientSession(read, write) as session:\n        await session.initialize()\n        \n        tools = await load_mcp_tools(\n            session,\n            callbacks=MyCallbacks(),\n            server_name=\"math_server\"\n        )\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:100-135]()\n\n### Usage with Tool Interceptors\n\nCallbacks work alongside tool interceptors for advanced control over tool execution:\n\n```python\nfrom langchain_mcp_adapters.interceptors import ToolCallInterceptor, MCPToolCallRequest, MCPToolCallResult\n\nclass LoggingInterceptor(ToolCallInterceptor):\n    async def intercept(\n        self, \n        request: MCPToolCallRequest, \n        call_next: Callable\n    ) -> MCPToolCallResult:\n        print(f\"Calling tool: {request.name}\")\n        result = await call_next(request)\n        print(f\"Tool result: {result}\")\n        return result\n\nclient = MultiServerMCPClient(\n    {...},\n    callbacks=MyCallbacks(),\n    tool_interceptors=[LoggingInterceptor()]\n)\n```\n\n资料来源：[langchain_mcp_adapters/interceptors.py:1-50]()\n\n## Callback Flow in Tool Execution\n\n```mermaid\nsequenceDiagram\n    participant Client as MCP Client\n    participant Callbacks as Callbacks Handler\n    participant Session as ClientSession\n    participant Server as MCP Server\n    \n    Client->>Callbacks: to_mcp_format(context)\n    Callbacks-->>Client: _MCPCallbacks\n    Client->>Session: call_tool(name, args, progress_callback)\n    Session->>Server: Execute Tool\n    Server-->>Session: Progress Update\n    Session->>Callbacks: progress_callback\n    Server-->>Session: Tool Result\n    Session-->>Client: CallToolResult\n```\n\n## CallbackContext Construction\n\nThe `CallbackContext` is constructed with server and tool information at different points in the execution flow:\n\n| Function | Context Construction |\n|----------|---------------------|\n| `load_mcp_tools()` | Uses `server_name` from parameters |\n| `convert_mcp_tool_to_langchain_tool()` | Uses both `server_name` and `tool.name` |\n| `MultiServerMCPClient` | Passed through to all tool loading operations |\n\n资料来源：[langchain_mcp_adapters/tools.py:70-80]()\n\n## Error Handling\n\nWhen callbacks are not provided, the system uses a default `_MCPCallbacks()` instance:\n\n```python\nmcp_callbacks = (\n    callbacks.to_mcp_format(context=CallbackContext(server_name=server_name, tool_name=tool.name))\n    if callbacks is not None\n    else _MCPCallbacks()\n)\n```\n\nThis ensures that tool execution continues normally even without custom callback handling.\n\n资料来源：[langchain_mcp_adapters/tools.py:70-75]()\n\n## Integration with Tool Result Conversion\n\nCallbacks are passed through the entire tool execution chain and are used when converting tool results back to LangChain format:\n\n```python\nasync def call_tool(...) -> tuple[ConvertedToolResult, MCPToolArtifact | None]:\n    mcp_callbacks = (\n        callbacks.to_mcp_format(\n            context=CallbackContext(server_name=server_name, tool_name=tool.name)\n        )\n        if callbacks is not None\n        else _MCPCallbacks()\n    )\n    \n    # Execute with progress callback\n    call_tool_result = await session.call_tool(\n        tool_name,\n        tool_args,\n        progress_callback=mcp_callbacks.progress_callback,\n    )\n```\n\n资料来源：[langchain_mcp_adapters/tools.py:55-70]()\n\n## API Reference\n\n### Callbacks Class\n\n```python\nclass Callbacks:\n    \"\"\"Base class for handling MCP notifications and events.\"\"\"\n    \n    def to_mcp_format(self, context: CallbackContext) -> _MCPCallbacks:\n        \"\"\"Convert the callbacks to MCP-compatible format.\n        \n        Args:\n            context: The callback context containing server and tool info.\n            \n        Returns:\n            An _MCPCallbacks instance configured with appropriate handlers.\n        \"\"\"\n        ...\n```\n\n### _MCPCallbacks Class\n\n```python\n@dataclass\nclass _MCPCallbacks:\n    \"\"\"Internal MCP-compatible callbacks wrapper.\"\"\"\n    \n    progress_callback: Callable | None = None\n```\n\n### CallbackContext Class\n\n```python\n@dataclass\nclass CallbackContext:\n    \"\"\"Context information for callback handlers.\"\"\"\n    \n    server_name: str | None = None\n    tool_name: str | None = None\n```\n\n## Best Practices\n\n1. **Always provide context**: When constructing `CallbackContext`, include both `server_name` and `tool_name` for maximum observability.\n\n2. **Handle None gracefully**: The callback system is designed to work without callbacks, so ensure your code handles the default case.\n\n3. **Combine with interceptors**: For comprehensive tool call control, combine callbacks with tool interceptors.\n\n4. **Thread-safe progress updates**: Progress callbacks may be called from different tasks; ensure your handler is thread-safe or async-safe.\n\n5. **Resource cleanup**: When using callbacks that allocate resources, ensure proper cleanup in the client lifecycle.\n\n## Summary\n\nThe Callbacks system in `langchain-mcp-adapters` provides a clean abstraction for handling MCP tool lifecycle events. By implementing the `Callbacks` class and its `to_mcp_format()` method, developers can:\n\n- Monitor tool execution progress\n- Handle notifications from MCP servers\n- Integrate with LangChain's callback ecosystem\n- Build custom logging, monitoring, and error handling for MCP tool calls\n\nThe system is designed to be optional—tools work with default callbacks when none are provided—while providing rich customization when needed.\n\n---\n\n<a id='page-interceptors'></a>\n\n## Tool Call Interceptors\n\n### 相关页面\n\n相关主题：[Callbacks](#page-callbacks), [Tool Conversion](#page-tool-conversion)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [langchain_mcp_adapters/interceptors.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/interceptors.py)\n- [langchain_mcp_adapters/tools.py](https://github.com/langchain-ai/langchain-mcp-adapters/blob/main/langchain_mcp_adapters/tools.py)\n</details>\n\n# Tool Call Interceptors\n\n## Overview\n\nTool Call Interceptors provide a mechanism to wrap and control MCP tool call execution in the langchain-mcp-adapters library. They enable developers to inject custom logic before and after tool calls, modify request parameters, handle responses, and implement cross-cutting concerns like logging, authentication, and caching.\n\nThe interceptor system follows the **onion pattern** (also known as decorator pattern or chain of responsibility), where each interceptor wraps the next one, allowing pre-processing and post-processing of tool calls in a composable way.\n\n## Architecture\n\n### High-Level Flow\n\n```mermaid\ngraph TD\n    A[External Code] --> B[Interceptor Chain]\n    B --> C[Interceptor 1]\n    C --> D[Interceptor 2]\n    D --> E[...]\n    E --> F[execute_tool]\n    F --> G[MCP ClientSession.call_tool]\n    \n    subgraph \"Onion Layers (wrapping inward)\"\n        B\n        C\n        D\n        E\n    end\n```\n\n### Component Diagram\n\n```mermaid\nclassDiagram\n    class MCPToolCallRequest {\n        +str name\n        +dict args\n        +str server_name\n        +dict headers\n        +object runtime\n        +override() MCPToolCallRequest\n    }\n    \n    class MCPToolCallResult {\n        <<Type Alias>>\n        CallToolResult | ToolMessage | Command\n    }\n    \n    class ToolCallInterceptor {\n        <<Protocol>>\n        +async __call__(request, handler) MCPToolCallResult\n    }\n    \n    class _build_interceptor_chain {\n        +build_composed_handler()\n    }\n    \n    MCPToolCallRequest --> ToolCallInterceptor : passed to\n    _build_interceptor_chain --> ToolCallInterceptor : composes\n```\n\n## Core Data Models\n\n### MCPToolCallRequest\n\nRepresents a tool execution request passed to MCP tool call interceptors. Follows a flat namespace pattern rather than separating call data and context into nested objects.\n\n| Field | Type | Modifiable | Description |\n|-------|------|-------------|-------------|\n| `name` | `str` | Yes | Tool name to invoke |\n| `args` | `dict[str, Any]` | Yes | Tool arguments as key-value pairs |\n| `server_name` | `str` | No | Name of the MCP server handling the tool |\n| `headers` | `dict[str, Any] \\| None` | Yes | HTTP headers for applicable transports (SSE, HTTP) |\n| `runtime` | `object \\| None` | No | LangGraph runtime context (if any) |\n\n资料来源：[interceptors.py:58-74]()\n\n#### The `override()` Method\n\nThe `MCPToolCallRequest` class provides an immutable `override()` method that returns a new instance with specified attributes replaced:\n\n```python\ndef override(\n    self, **overrides: Unpack[_MCPToolCallRequestOverrides]\n) -> MCPToolCallRequest:\n```\n\nThis follows an immutable pattern, leaving the original request unchanged.\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `name` | `str` | Tool name (optional) |\n| `args` | `dict[str, Any]` | Tool arguments (optional) |\n| `headers` | `dict[str, Any] \\| None` | HTTP headers (optional) |\n\n### MCPToolCallResult\n\nA type alias representing the possible return types from an interceptor:\n\n| Type | Description |\n|------|-------------|\n| `CallToolResult` | MCP protocol result (standard MCP format) |\n| `ToolMessage` | LangChain format message |\n| `Command` | LangGraph Command (when langgraph is installed) |\n\n```python\nif LANGGRAPH_PRESENT:\n    MCPToolCallResult = CallToolResult | ToolMessage | Command\nelse:\n    MCPToolCallResult = CallToolResult | ToolMessage\n```\n\n资料来源：[interceptors.py:29-36]()\n\n## ToolCallInterceptor Protocol\n\nThe `ToolCallInterceptor` is a runtime-checkable protocol that defines the interface for interceptor implementations:\n\n```python\n@runtime_checkable\nclass ToolCallInterceptor(Protocol):\n    async def __call__(\n        self,\n        request: MCPToolCallRequest,\n        handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    ) -> MCPToolCallResult:\n        ...\n```\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `request` | `MCPToolCallRequest` | The tool call request to process |\n| `handler` | `Callable` | The next handler in the chain (call to continue execution) |\n| **Returns** | `MCPToolCallResult` | The result of processing |\n\n资料来源：[interceptors.py:42-49]()\n\n### Interceptor Pattern\n\nInterceptors work by:\n\n1. **Receiving** the `request` and the `handler` callable\n2. **Optionally** modifying the request before passing it on\n3. **Calling** the `handler` to continue the chain\n4. **Optionally** modifying the result before returning\n\n```python\nasync def my_interceptor(\n    request: MCPToolCallRequest,\n    handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n) -> MCPToolCallResult:\n    # Pre-processing: modify request\n    modified_request = request.override(args={**request.args, \"injected\": True})\n    \n    # Continue to next handler\n    result = await handler(modified_request)\n    \n    # Post-processing: modify result\n    # ... do something with result ...\n    \n    return result\n```\n\n## Building the Interceptor Chain\n\nThe `_build_interceptor_chain()` function composes multiple interceptors into a single handler using the onion pattern:\n\n```python\ndef _build_interceptor_chain(\n    base_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    tool_interceptors: list[ToolCallInterceptor] | None,\n) -> Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]]:\n```\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `base_handler` | `Callable` | Innermost handler that executes the actual tool call |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | List of interceptors to wrap around the handler |\n\n资料来源：[tools.py:145-147]()\n\n### Execution Order\n\nThe first interceptor in the list becomes the **outermost layer**, with subsequent interceptors wrapping inward. This means:\n\n1. Interceptor at index 0 executes **first** (outermost)\n2. Interceptor at index 1 executes **second**\n3. And so on...\n4. The `base_handler` (actual tool execution) executes **last** (innermost)\n\n```mermaid\ngraph LR\n    A[External Call] --> B[\"Interceptor[0]<br/>outermost\"]\n    B --> C[\"Interceptor[1]\"]\n    C --> D[\"Interceptor[2]\"]\n    D --> E[\"...\"]\n    E --> F[\"base_handler<br/>innermost\"]\n    F --> G[MCP call_tool]\n```\n\n## Usage\n\n### Loading Tools with Interceptors\n\nWhen loading MCP tools, you can provide a list of interceptors:\n\n```python\nfrom langchain_mcp_adapters.client import MultiServerMCPClient\nfrom langchain_mcp_adapters.tools import load_mcp_tools\n\n# Define your interceptor\nclass LoggingInterceptor:\n    async def __call__(self, request, handler):\n        print(f\"Calling tool: {request.name}\")\n        result = await handler(request)\n        print(f\"Tool {request.name} completed\")\n        return result\n\nclient = MultiServerMCPClient({\n    \"math\": {\n        \"command\": \"python\",\n        \"args\": [\"./math_server.py\"],\n        \"transport\": \"stdio\",\n    }\n})\n\ntools = await client.get_tools(\n    tool_interceptors=[LoggingInterceptor()]\n)\n```\n\n资料来源：[tools.py:163-179]()\n\n### Individual Tool Conversion\n\nYou can also apply interceptors when converting individual tools:\n\n```python\nfrom langchain_mcp_adapters.tools import convert_mcp_tool_to_langchain_tool\n\ntool = convert_mcp_tool_to_langchain_tool(\n    session=session,\n    tool=mcp_tool,\n    tool_interceptors=[CustomInterceptor()],\n    server_name=\"my_server\",\n    tool_name_prefix=True\n)\n```\n\n### Using Runtime Context\n\nInterceptors have access to the `runtime` field, which contains LangGraph runtime context when used within a LangGraph graph:\n\n```python\nclass RuntimeAwareInterceptor:\n    async def __call__(self, request, handler):\n        if request.runtime:\n            # Access LangGraph runtime\n            pass\n        return await handler(request)\n```\n\n## Example Interceptors\n\n### Authentication Interceptor\n\n```python\nclass AuthInterceptor:\n    def __init__(self, api_key: str):\n        self.api_key = api_key\n    \n    async def __call__(self, request, handler):\n        # Inject auth headers\n        request = request.override(\n            headers={\"Authorization\": f\"Bearer {self.api_key}\"}\n        )\n        return await handler(request)\n```\n\n### Caching Interceptor\n\n```python\nfrom functools import lru_cache\n\nclass CacheInterceptor:\n    def __init__(self):\n        self.cache = {}\n    \n    async def __call__(self, request, handler):\n        cache_key = f\"{request.name}:{hash(frozenset(request.args.items()))}\"\n        \n        if cache_key in self.cache:\n            return self.cache[cache_key]\n        \n        result = await handler(request)\n        self.cache[cache_key] = result\n        return result\n```\n\n### Request Modification Interceptor\n\n```python\nclass DefaultArgsInterceptor:\n    def __init__(self, defaults: dict[str, Any]):\n        self.defaults = defaults\n    \n    async def __call__(self, request, handler):\n        # Merge defaults with provided args\n        merged_args = {**self.defaults, **request.args}\n        request = request.override(args=merged_args)\n        return await handler(request)\n```\n\n## API Reference\n\n### Functions\n\n#### `_build_interceptor_chain()`\n\n```python\ndef _build_interceptor_chain(\n    base_handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    tool_interceptors: list[ToolCallInterceptor] | None,\n) -> Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]]:\n```\n\nBuilds a composed handler chain with interceptors in onion pattern.\n\n**Parameters:**\n\n| Name | Type | Description |\n|------|------|-------------|\n| `base_handler` | `Callable` | Innermost handler executing the actual tool call |\n| `tool_interceptors` | `list[ToolCallInterceptor] \\| None` | Optional list of interceptors to wrap |\n\n**Returns:** Composed handler with all interceptors applied\n\n资料来源：[tools.py:145-175]()\n\n### Classes\n\n#### `MCPToolCallRequest`\n\n```python\n@dataclass\nclass MCPToolCallRequest:\n    name: str\n    args: dict[str, Any]\n    server_name: str\n    headers: dict[str, Any] | None = None\n    runtime: object | None = None\n```\n\n资料来源：[interceptors.py:58-74]()\n\n#### `ToolCallInterceptor`\n\n```python\n@runtime_checkable\nclass ToolCallInterceptor(Protocol):\n    async def __call__(\n        self,\n        request: MCPToolCallRequest,\n        handler: Callable[[MCPToolCallRequest], Awaitable[MCPToolCallResult]],\n    ) -> MCPToolCallResult:\n        ...\n```\n\n资料来源：[interceptors.py:42-49]()\n\n### Type Aliases\n\n#### `MCPToolCallResult`\n\n```python\nif LANGGRAPH_PRESENT:\n    MCPToolCallResult = CallToolResult | ToolMessage | Command\nelse:\n    MCPToolCallResult = CallToolResult | ToolMessage\n```\n\n资料来源：[interceptors.py:29-36]()\n\n## Best Practices\n\n1. **Always call the handler**: Interceptors should typically call `handler(request)` unless intentionally short-circuiting\n2. **Immutability**: Use `request.override()` to create modified requests instead of mutating the original\n3. **Error handling**: Wrap handler calls in try/except for proper error handling and logging\n4. **Order matters**: Place interceptors in the correct order as the first in the list is the outermost\n5. **Type hints**: Use type hints for better IDE support and type checking\n\n## Limitations\n\n- Interceptors cannot currently modify the `server_name` or `runtime` fields of `MCPToolCallRequest` as they are context fields\n- The interceptor system is designed for tool call interception; other MCP lifecycle events (like resource access) are not currently interceptable\n- Runtime headers are only supported for `sse` and `http` transports\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：langchain-ai/langchain-mcp-adapters\n\n摘要：发现 17 个潜在踩坑项，其中 3 个为 high/blocking；最高优先级：安装坑 - 来源证据：Prompts and Resources auto-discovery。\n\n## 1. 安装坑 · 来源证据：Prompts and Resources auto-discovery\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Prompts and Resources auto-discovery\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_bf1812b74caa4e989767a9307a8ffc16 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/62 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 2. 安装坑 · 来源证据：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a5093182914b4df0b7ad2cd560bacdf2 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/492 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 运行坑 · 来源证据：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_ac102050dd4841d6954559a3413e0b92 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/496 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 4. 安装坑 · 来源证据：langchain-mcp-adapters==0.2.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：langchain-mcp-adapters==0.2.2\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0c6ca0722ab046379d28ecf30f8d2bcf | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 5. 配置坑 · 来源证据：langchain-mcp-adapters==0.1.10\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：langchain-mcp-adapters==0.1.10\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_8b18dbf32ccd41e38b272a458f4040f5 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 6. 能力坑 · 来源证据：langchain-mcp-adapters==0.1.14\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：langchain-mcp-adapters==0.1.14\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_6727e0d698e54fc38d7c60e262978ac2 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 7. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | README/documentation is current enough for a first validation pass.\n\n## 8. 运行坑 · 来源证据：langchain-mcp-adapters==0.1.12\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：langchain-mcp-adapters==0.1.12\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e71a46a9e0374d139555a78f229b0469 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 9. 维护坑 · 来源证据：langchain-mcp-adapters==0.2.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_59483f9a6a16414c9087b1751fba8efc | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 10. 维护坑 · 来源证据：langchain-mcp-adapters==0.2.0a1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0a1\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4e7fcda1716948898295279af95f8f96 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 11. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | last_activity_observed missing\n\n## 12. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium\n\n## 13. 安全/权限坑 · 存在安全注意事项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：No sandbox install has been executed yet; downstream must verify before user use.\n- 对用户的影响：用户安装前需要知道权限边界和敏感操作。\n- 建议检查：转成明确权限清单和安全审查提示。\n- 防护动作：安全注意事项必须面向用户前置展示。\n- 证据：risks.safety_notes | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | No sandbox install has been executed yet; downstream must verify before user use.\n\n## 14. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium\n\n## 15. 安全/权限坑 · 来源证据：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_8c46dab4b6dd4a6e92c96af49ea47647 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/502 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 16. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | issue_or_pr_quality=unknown\n\n## 17. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | release_recency=unknown\n\n<!-- canonical_name: langchain-ai/langchain-mcp-adapters; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：langchain-ai/langchain-mcp-adapters\n\n摘要：发现 17 个潜在踩坑项，其中 3 个为 high/blocking；最高优先级：安装坑 - 来源证据：Prompts and Resources auto-discovery。\n\n## 1. 安装坑 · 来源证据：Prompts and Resources auto-discovery\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Prompts and Resources auto-discovery\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_bf1812b74caa4e989767a9307a8ffc16 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/62 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 2. 安装坑 · 来源证据：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：`MultiServerMCPClient.get_tools()` silently returns no tools when any single server fails to connect\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a5093182914b4df0b7ad2cd560bacdf2 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/492 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 运行坑 · 来源证据：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n\n- 严重度：high\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：Fix TypeError in resources.py and make __aexit__ an async coroutine in client.py\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_ac102050dd4841d6954559a3413e0b92 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/496 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 4. 安装坑 · 来源证据：langchain-mcp-adapters==0.2.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：langchain-mcp-adapters==0.2.2\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0c6ca0722ab046379d28ecf30f8d2bcf | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.2 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 5. 配置坑 · 来源证据：langchain-mcp-adapters==0.1.10\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：langchain-mcp-adapters==0.1.10\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_8b18dbf32ccd41e38b272a458f4040f5 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.10 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 6. 能力坑 · 来源证据：langchain-mcp-adapters==0.1.14\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：langchain-mcp-adapters==0.1.14\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_6727e0d698e54fc38d7c60e262978ac2 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.14 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 7. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | README/documentation is current enough for a first validation pass.\n\n## 8. 运行坑 · 来源证据：langchain-mcp-adapters==0.1.12\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：langchain-mcp-adapters==0.1.12\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e71a46a9e0374d139555a78f229b0469 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.1.12 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 9. 维护坑 · 来源证据：langchain-mcp-adapters==0.2.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_59483f9a6a16414c9087b1751fba8efc | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 10. 维护坑 · 来源证据：langchain-mcp-adapters==0.2.0a1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：langchain-mcp-adapters==0.2.0a1\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4e7fcda1716948898295279af95f8f96 | https://github.com/langchain-ai/langchain-mcp-adapters/releases/tag/langchain-mcp-adapters%3D%3D0.2.0a1 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 11. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | last_activity_observed missing\n\n## 12. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium\n\n## 13. 安全/权限坑 · 存在安全注意事项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：No sandbox install has been executed yet; downstream must verify before user use.\n- 对用户的影响：用户安装前需要知道权限边界和敏感操作。\n- 建议检查：转成明确权限清单和安全审查提示。\n- 防护动作：安全注意事项必须面向用户前置展示。\n- 证据：risks.safety_notes | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | No sandbox install has been executed yet; downstream must verify before user use.\n\n## 14. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | no_demo; severity=medium\n\n## 15. 安全/权限坑 · 来源证据：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Feature Request: Support passing server-defined params extensions (e.g. LangGraph `context`) through tools/call\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_8c46dab4b6dd4a6e92c96af49ea47647 | https://github.com/langchain-ai/langchain-mcp-adapters/issues/502 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 16. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | issue_or_pr_quality=unknown\n\n## 17. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:929158279 | https://github.com/langchain-ai/langchain-mcp-adapters | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# langchain-mcp-adapters - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for langchain-ai/langchain-mcp-adapters.\n\nProject:\n- Name: langchain-mcp-adapters\n- Repository: https://github.com/langchain-ai/langchain-mcp-adapters\n- Summary: LangChain 🔌 MCP\n- Host target: mcp_host\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: LangChain 🔌 MCP\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: LangChain 🔌 MCP\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. page-introduction: Introduction. Produce one small intermediate artifact and wait for confirmation.\n2. page-installation: Installation. Produce one small intermediate artifact and wait for confirmation.\n3. page-quickstart: Quick Start Guide. Produce one small intermediate artifact and wait for confirmation.\n4. page-architecture: System Architecture. Produce one small intermediate artifact and wait for confirmation.\n5. page-tool-conversion: Tool Conversion. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/langchain-ai/langchain-mcp-adapters\n- https://github.com/langchain-ai/langchain-mcp-adapters#readme\n- README.md\n- langchain_mcp_adapters/__init__.py\n- pyproject.toml\n- langchain_mcp_adapters/tools.py\n- langchain_mcp_adapters/client.py\n- langchain_mcp_adapters/sessions.py\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：langchain-ai/langchain-mcp-adapters\n\n## 官方安装入口\n\n### Python / pip · 官方安装入口\n\n```bash\npip install langchain-mcp-adapters\n```\n\n来源：https://github.com/langchain-ai/langchain-mcp-adapters#readme\n\n## 来源\n\n- repo: https://github.com/langchain-ai/langchain-mcp-adapters\n- docs: https://github.com/langchain-ai/langchain-mcp-adapters#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_3abacb7c691849fc8f2ee8ca09a88b62"
}
