{
  "canonical_name": "openai/openai-agents-python",
  "compilation_id": "pack_1297981b7e624ec68209bcfe2822b771",
  "created_at": "2026-05-16T08:10:02.281553+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=skill, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=skill, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `pip install openai-agents` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "pip install openai-agents",
      "sandbox_container_image": "python:3.12-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "deterministic_isolated_install",
      "sandbox_validation_id": "sbx_257edd4094dd4a6a99c62978cc705e4c"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_8752be4b06039a2da37314148c77fa7d",
    "canonical_name": "openai/openai-agents-python",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/openai/openai-agents-python",
    "slug": "openai-agents-python",
    "source_packet_id": "phit_f4776ce49b7247d8b3baa7b9d90cee19",
    "source_validation_id": "dval_a44f7347bf66437e874a64a265e0fceb"
  },
  "merchandising": {
    "best_for": "需要流程自动化能力，并使用 chatgpt的用户",
    "github_forks": 4016,
    "github_stars": 26190,
    "one_liner_en": "A lightweight, powerful framework for multi-agent workflows",
    "one_liner_zh": "A lightweight, powerful framework for multi-agent workflows",
    "primary_category": {
      "category_id": "workflow-automation",
      "confidence": "medium",
      "name_en": "Workflow Automation",
      "name_zh": "流程自动化",
      "reason": "matched_keywords:workflow, agent"
    },
    "target_user": "使用 chatgpt 等宿主 AI 的用户",
    "title_en": "openai-agents-python",
    "title_zh": "openai-agents-python 能力包",
    "visible_tags": [
      {
        "label_en": "Security & Permissions",
        "label_zh": "安全审查与权限治理",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-security-permissions",
        "type": "product_domain"
      },
      {
        "label_en": "Web Task Automation",
        "label_zh": "网页任务自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-web-task-automation",
        "type": "user_job"
      },
      {
        "label_en": "Natural-language Web Actions",
        "label_zh": "自然语言网页操作",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-natural-language-web-actions",
        "type": "core_capability"
      },
      {
        "label_en": "Page Observation and Action Planning",
        "label_zh": "页面观察与动作规划",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-page-observation-and-action-planning",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Structured Data Extraction",
        "label_zh": "结构化数据提取",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-structured-data-extraction",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_f4776ce49b7247d8b3baa7b9d90cee19",
  "page_model": {
    "artifacts": {
      "artifact_slug": "openai-agents-python",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "pip install openai-agents",
          "label": "Python / pip · 官方安装入口",
          "source": "https://github.com/openai/openai-agents-python#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "安全审查与权限治理",
        "网页任务自动化",
        "自然语言网页操作",
        "页面观察与动作规划",
        "结构化数据提取"
      ],
      "eyebrow": "流程自动化",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要流程自动化能力，并使用 chatgpt的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "A lightweight, powerful framework for multi-agent workflows"
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "chatgpt",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "skill, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "仓库名 `openai-agents-python` 与安装入口 `openai-agents` 不完全一致。",
            "category": "身份坑",
            "evidence": [
              "identity.distribution | github_repo:946380199 | https://github.com/openai/openai-agents-python | repo=openai-agents-python; install=openai-agents"
            ],
            "severity": "medium",
            "suggested_check": "在 npm/PyPI/GitHub 上确认包名映射和官方 README 说明。",
            "title": "仓库名和安装名不一致",
            "user_impact": "用户照着仓库名搜索包或照着包名找仓库时容易走错入口。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_d867c75f80af49c9968398851ff8bf6a | https://github.com/openai/openai-agents-python/issues/3346 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Clarify whether retry-after delays should respect retry max_delay",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_f486d2247bf24df8bbc7a2bd6fddbd65 | https://github.com/openai/openai-agents-python/issues/3266 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：Clarify whether retry-after delays should respect retry max_delay",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API rejects it as invalid",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_d6bad5c23bf3457eb546c22a1636cc26 | https://github.com/openai/openai-agents-python/issues/3268 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API reject…",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Tracing shutdown cannot interrupt exporter retry backoff",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_e1ceae098cf84c8aafae7082b13c5345 | https://github.com/openai/openai-agents-python/issues/3354 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Tracing shutdown cannot interrupt exporter retry backoff",
            "user_impact": "可能阻塞安装或首次运行。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.2",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_b73472b5ae90447199984775aacdca67 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.2 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.15.2",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.3",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_7e05a382001a4d07b74eda1e1316320b | https://github.com/openai/openai-agents-python/releases/tag/v0.15.3 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.15.3",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.16.1",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_44335088ff52486e9f2f41f72a274c35 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.16.1",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.17.0",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_86b81f310a6e45feadc65196a057b23b | https://github.com/openai/openai-agents-python/releases/tag/v0.17.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.17.0",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：v0.15.1",
            "category": "能力坑",
            "evidence": [
              "community_evidence:github | cevd_4c70d563ac704aeaa14b8e2c49976bc5 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.15.1",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:946380199 | https://github.com/openai/openai-agents-python | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个运行相关的待验证问题：v0.14.8",
            "category": "运行坑",
            "evidence": [
              "community_evidence:github | cevd_a31947cfee3a4299923f7714bfb54f42 | https://github.com/openai/openai-agents-python/releases/tag/v0.14.8 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.14.8",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：AdvancedSQLiteSession.add_items can report success after structure metadata failure",
            "category": "维护坑",
            "evidence": [
              "community_evidence:github | cevd_0fed2dd63d55400d9e0d9adaf08570e5 | https://github.com/openai/openai-agents-python/issues/3348 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：AdvancedSQLiteSession.add_items can report success after structure metadata failure",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：Chat Completions converter can send empty tool output for non-text results",
            "category": "维护坑",
            "evidence": [
              "community_evidence:github | cevd_34a35e920a01467e957cdd59b4179cc1 | https://github.com/openai/openai-agents-python/issues/3310 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Chat Completions converter can send empty tool output for non-text results",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：v0.15.0",
            "category": "维护坑",
            "evidence": [
              "community_evidence:github | cevd_33cd0193aea84f9b82b15a02098d85cd | https://github.com/openai/openai-agents-python/releases/tag/v0.15.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：v0.15.0",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 24 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：身份坑 - 仓库名和安装名不一致。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 269,
        "forks": 4016,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 26190
      },
      "source_url": "https://github.com/openai/openai-agents-python",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "A lightweight, powerful framework for multi-agent workflows",
      "title": "openai-agents-python 能力包",
      "trial_prompt": "# openai-agents-python - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for openai/openai-agents-python.\n\nProject:\n- Name: openai-agents-python\n- Repository: https://github.com/openai/openai-agents-python\n- Summary: A lightweight, powerful framework for multi-agent workflows\n- Host target: chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: A lightweight, powerful framework for multi-agent workflows\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. overview: OpenAI Agents SDK Overview. Produce one small intermediate artifact and wait for confirmation.\n2. examples-index: Examples Index. Produce one small intermediate artifact and wait for confirmation.\n3. agents: Agents. Produce one small intermediate artifact and wait for confirmation.\n4. tools: Tools. Produce one small intermediate artifact and wait for confirmation.\n5. run-loop: Run Loop and Execution. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/openai/openai-agents-python\n- https://github.com/openai/openai-agents-python#readme\n- .agents/skills/code-change-verification/SKILL.md\n- .agents/skills/docs-sync/SKILL.md\n- .agents/skills/examples-auto-run/SKILL.md\n- .agents/skills/final-release-review/SKILL.md\n- .agents/skills/implementation-strategy/SKILL.md\n- .agents/skills/openai-knowledge/SKILL.md\n- .agents/skills/pr-draft-summary/SKILL.md\n- .agents/skills/runtime-behavior-probe/SKILL.md\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: Tracing shutdown cannot interrupt exporter retry backoff（https://github.com/openai/openai-agents-python/issues/3354）；github/github_issue: Proposal: per-run BudgetGuard for token / request / cost limits (follow-（https://github.com/openai/openai-agents-python/issues/3353）；github/github_issue: OpenAIConversationsSession persists empty reasoning item {\"type\":\"reason（https://github.com/openai/openai-agents-python/issues/3268）；github/github_issue: Chat Completions converter can send empty tool output for non-text resul（https://github.com/openai/openai-agents-python/issues/3310）；github/github_issue: Clarify whether retry-after delays should respect retry max_delay（https://github.com/openai/openai-agents-python/issues/3266）；github/github_issue: AdvancedSQLiteSession.add_items can report success after structure metad（https://github.com/openai/openai-agents-python/issues/3348）；github/github_issue: AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the（https://github.com/openai/openai-agents-python/issues/3346）；github/github_release: v0.17.1（https://github.com/openai/openai-agents-python/releases/tag/v0.17.1）；github/github_release: v0.17.0（https://github.com/openai/openai-agents-python/releases/tag/v0.17.0）；github/github_release: v0.16.1（https://github.com/openai/openai-agents-python/releases/tag/v0.16.1）；github/github_release: v0.16.0（https://github.com/openai/openai-agents-python/releases/tag/v0.16.0）；github/github_release: v0.15.3（https://github.com/openai/openai-agents-python/releases/tag/v0.15.3）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Tracing shutdown cannot interrupt exporter retry backoff",
              "url": "https://github.com/openai/openai-agents-python/issues/3354"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Proposal: per-run BudgetGuard for token / request / cost limits (follow-",
              "url": "https://github.com/openai/openai-agents-python/issues/3353"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "OpenAIConversationsSession persists empty reasoning item {\"type\":\"reason",
              "url": "https://github.com/openai/openai-agents-python/issues/3268"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Chat Completions converter can send empty tool output for non-text resul",
              "url": "https://github.com/openai/openai-agents-python/issues/3310"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Clarify whether retry-after delays should respect retry max_delay",
              "url": "https://github.com/openai/openai-agents-python/issues/3266"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "AdvancedSQLiteSession.add_items can report success after structure metad",
              "url": "https://github.com/openai/openai-agents-python/issues/3348"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the",
              "url": "https://github.com/openai/openai-agents-python/issues/3346"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.17.1",
              "url": "https://github.com/openai/openai-agents-python/releases/tag/v0.17.1"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.17.0",
              "url": "https://github.com/openai/openai-agents-python/releases/tag/v0.17.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.16.1",
              "url": "https://github.com/openai/openai-agents-python/releases/tag/v0.16.1"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.16.0",
              "url": "https://github.com/openai/openai-agents-python/releases/tag/v0.16.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "v0.15.3",
              "url": "https://github.com/openai/openai-agents-python/releases/tag/v0.15.3"
            }
          ],
          "status": "已收录 12 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "流程自动化",
      "desc": "A lightweight, powerful framework for multi-agent workflows",
      "effort": "安装已验证",
      "forks": 4016,
      "icon": "bolt",
      "name": "openai-agents-python 能力包",
      "risk": "可发布",
      "slug": "openai-agents-python",
      "stars": 26190,
      "tags": [
        "安全审查与权限治理",
        "网页任务自动化",
        "自然语言网页操作",
        "页面观察与动作规划",
        "结构化数据提取"
      ],
      "thumb": "gray",
      "type": "Skill Pack"
    },
    "manual": {
      "markdown": "# https://github.com/openai/openai-agents-python 项目说明书\n\n生成时间：2026-05-16 04:52:19 UTC\n\n## 目录\n\n- [OpenAI Agents SDK Overview](#overview)\n- [Installation and Setup](#installation)\n- [Examples Index](#examples-index)\n- [Agents](#agents)\n- [Tools](#tools)\n- [Guardrails](#guardrails)\n- [Handoffs](#handoffs)\n- [Agents as Tools](#agents-as-tools)\n- [Run Loop and Execution](#run-loop)\n- [Sessions and Memory](#sessions)\n\n<a id='overview'></a>\n\n## OpenAI Agents SDK Overview\n\n### 相关页面\n\n相关主题：[Installation and Setup](#installation), [Agents](#agents), [Tools](#tools)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/openai/openai-agents-python/blob/main/README.md)\n- [src/agents/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/__init__.py)\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/run_internal/turn_resolution.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/turn_resolution.py)\n- [src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n- [src/agents/items.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/items.py)\n- [src/agents/extensions/visualization.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/visualization.py)\n- [src/agents/mcp/server.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/mcp/server.py)\n</details>\n\n# OpenAI Agents SDK Overview\n\n## Introduction\n\nThe OpenAI Agents SDK is a Python framework designed to build multi-agent systems that can interact with users, execute tools, and delegate tasks to specialized sub-agents. The SDK provides a structured approach to orchestrating agent conversations, managing tool execution, handling handoffs between agents, and maintaining conversation state throughout the execution lifecycle.\n\nThe SDK's core responsibility is to manage the runtime execution of agents, handling the turn-based conversation flow, tool invocations, guardrail evaluations, and multi-agent handoffs within a single unified execution model. 资料来源：[src/agents/__init__.py]()\n\n## Architecture Overview\n\nThe SDK follows a layered architecture that separates concerns between agent definition, runtime execution, and tool/mcp integrations.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Runner]\n    B --> C[Agent]\n    C --> D[Handoffs]\n    C --> E[Tools]\n    C --> F[Guardrails]\n    D --> C\n    D --> G[Sub-Agent]\n    E --> H[MCP Servers]\n    F --> I[Input/Output Guards]\n    G --> C\n    B --> J[Session Persistence]\n    B --> K[Tracing]\n```\n\n### Core Components\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| `Agent` | Defines agent behavior, tools, handoffs, and instructions | `src/agents/__init__.py` |\n| `Runner` | Executes agents and manages conversation flow | `src/agents/run.py` |\n| `Handoff` | Enables transfer of control between agents | `src/agents/handoffs/__init__.py` |\n| `MCPServer` | Provides Model Context Protocol server abstraction | `src/agents/mcp/server.py` |\n| `ItemHelpers` | Utility for extracting content from conversation items | `src/agents/items.py` |\n\n## Agent System\n\n### Agent Definition\n\nAgents are the fundamental unit of computation in the SDK. An agent encapsulates:\n\n- **Instructions**: The system prompt that defines the agent's role and behavior\n- **Tools**: A list of callable tools the agent can invoke\n- **Handoffs**: Definitions for transferring control to other agents\n- **Input Guardrails**: Pre-processing validation before agent execution\n- **Output Guardrails**: Post-processing validation of agent responses\n\n```mermaid\ngraph LR\n    A[Agent] --> B[Instructions]\n    A --> C[Tools]\n    A --> D[Handoffs]\n    A --> E[Guardrails]\n```\n\n### Agent Execution Flow\n\nThe execution follows a turn-based model where each turn processes user input, generates model responses, executes tools, and evaluates handoffs until a final response is produced.\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Runner\n    participant Agent\n    participant Tools\n    participant Handoffs\n\n    User->>Runner: User Input\n    Runner->>Agent: Process Turn\n    Agent->>Agent: Generate Response\n    alt Tool Call\n        Agent->>Tools: Execute Tool\n        Tools-->>Agent: Tool Result\n    end\n    alt Handoff\n        Agent->>Handoffs: Request Handoff\n        Handoffs->>Agent: Switch Agent\n    end\n    Agent-->>Runner: Final Output\n    Runner-->>User: Response\n```\n\n## Handoffs System\n\nThe handoff system enables agents to delegate conversations to other specialized agents while preserving conversation context. Each handoff defines:\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `name` | `str` | Unique identifier for the handoff tool |\n| `tool_name` | `str` | Name exposed to the model for invoking |\n| `tool_description` | `str` | Description shown to the model |\n| `input_json_schema` | `dict` | JSON schema for handoff arguments |\n| `on_invoke_handoff` | `Callable` | Function that returns the target agent |\n| `input_filter` | `HandoffInputFilter` | Optional filter for conversation context |\n\n资料来源：[src/agents/handoffs/__init__.py]()\n\n### Handoff Input Filtering\n\nBy default, the new agent receives the entire conversation history. The `input_filter` function allows customization of what context is passed to the target agent:\n\n```python\ninput_filter: HandoffInputFilter | None = None\n\"\"\"A function that filters the inputs that are passed to the next agent.\"\"\"\n```\n\n## Turn Resolution\n\nThe turn resolution system handles the complexity of multi-step agent interactions within a single turn. This includes managing pre-step items, new step items, tool results, guardrail evaluations, and handoff transitions.\n\n### Turn Resolution States\n\n```mermaid\nstateDiagram-v2\n    [*] --> InputGuardrails: Input Received\n    InputGuardrails --> ModelResponse: Passed\n    ModelResponse --> ToolExecution: Tool Call\n    ModelResponse --> Handoff: Agent Switch\n    ModelResponse --> FinalOutput: Direct Response\n    ToolExecution --> ModelResponse: More Tools\n    ToolExecution --> Handoff: Switch During Tool\n    ToolExecution --> FinalOutput: Complete\n    Handoff --> InputGuardrails: New Agent\n    FinalOutput --> [*]\n```\n\n### Key Resolution Functions\n\nThe turn resolution process evaluates several conditions:\n\n1. **Tool Input Guardrail Results**: Validation before tool execution\n2. **Function Results**: Output from tool invocations\n3. **Tool Output Guardrail Results**: Validation after tool execution\n4. **Handoff Evaluation**: Check for agent transfer requests\n\n资料来源：[src/agents/run_internal/turn_resolution.py]()\n\n## Tool Execution and Guardrails\n\n### Guardrail System\n\nThe SDK implements a two-layer guardrail system:\n\n| Guardrail Type | Timing | Purpose |\n|----------------|--------|---------|\n| Input Guardrails | Before agent processes input | Validate and sanitize user input |\n| Output Guardrails | After agent generates response | Validate response content |\n\n### Tool Use Tracking\n\nTools are tracked throughout execution to maintain state and enable:\n\n- Streaming output collection\n- Refusal detection\n- Error handling\n- Output validation\n\n```mermaid\ngraph TD\n    A[Tool Call] --> B{Input Guardrails}\n    B -->|Pass| C[Execute Tool]\n    B -->|Fail| D[Reject]\n    C --> E[Tool Result]\n    E --> F{Output Guardrails}\n    F -->|Pass| G[Continue]\n    F -->|Fail| H[Error Response]\n```\n\n## Model Context Protocol (MCP) Integration\n\nThe SDK provides a Python abstraction for MCP servers through the `MCPServer` base class. This enables agents to interact with external MCP-capable tools and services.\n\n### MCPServer Base Class\n\nThe `MCPServer` class provides the foundation for MCP protocol implementation with methods for:\n\n- **Resources**: `list_resources()`, `list_resource_templates()`, `read_resource()`\n- **Tools**: Tool invocation and management\n- **Prompts**: Server-provided prompt templates\n\n资料来源：[src/agents/mcp/server.py]()\n\n### Require Approval Settings\n\nMCP tools support granular approval controls:\n\n| Setting | Behavior |\n|---------|----------|\n| `RequireApprovalSetting.NEVER` | Always auto-approve |\n| `RequireApprovalSetting.ALWAYS` | Always require approval |\n| `RequireApprovalSetting.UNDETERMINED` | Use default behavior |\n\n## Session and State Management\n\n### Run State\n\nThe `run_state` object tracks execution context including:\n\n- Current agent\n- Conversation history\n- Generated items\n- Original input\n- Turn counters\n\n### Persistence\n\nThe SDK supports session persistence for maintaining state across multiple interactions:\n\n```python\nsession_persistence_enabled: bool\nstore: StoreSetting\n```\n\n## Tracing and Visualization\n\n### Agent Visualization\n\nThe SDK includes visualization utilities for generating DOT-format diagrams of agent relationships:\n\n| Function | Purpose |\n|----------|---------|\n| `get_all_nodes()` | Generate node definitions for agent graph |\n| `get_all_edges()` | Generate edge definitions for handoff connections |\n\n```mermaid\ngraph TD\n    A[User] --> B[Orchestrator Agent]\n    B --> C[Research Agent]\n    B --> D[Writer Agent]\n    C --> E[Web Search Tool]\n    D --> F[File Write Tool]\n    B --> G[Analytics Agent]\n    G --> H[Data Analysis Tool]\n```\n\n资料来源：[src/agents/extensions/visualization.py]()\n\n## Item Processing\n\n### Message Item Extraction\n\nThe SDK provides utilities for extracting content from conversation items:\n\n| Method | Purpose |\n|--------|---------|\n| `text_message_output()` | Extract text from a single message output item |\n| `text_message_outputs()` | Extract concatenated text from multiple items |\n| `extract_refusal()` | Extract refusal content if model refused to respond |\n\n```python\n@classmethod\ndef extract_refusal(cls, message: TResponseOutputItem) -> str | None:\n    \"\"\"Extracts refusal content from a message, if any.\"\"\"\n```\n\n## Run Configuration\n\n### Key Configuration Options\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `max_turns` | `int` | Maximum conversation turns |\n| `tools` | `list[Function]` | Available tools for the run |\n| `input_guardrails` | `list[InputGuardrail]` | Input validation |\n| `output_guardrails` | `list[OutputGuardrail]` | Output validation |\n| `tool_use_tracker` | `ToolUseTracker` | Tracks tool invocations |\n| `run_state` | `RunState` | Mutable execution state |\n\n资料来源：[src/agents/run.py]()\n\n## Example Workflow Patterns\n\n### Research Bot Architecture\n\nA common pattern involves multiple specialized agents:\n\n1. **Planner Agent**: Decomposes user queries into search tasks\n2. **Search Agent**: Executes web searches in parallel\n3. **Writer Agent**: Synthesizes research into final reports\n\n```mermaid\ngraph LR\n    A[User Query] --> B[Planner Agent]\n    B --> C[Search 1]\n    B --> D[Search 2]\n    B --> E[Search N]\n    C --> F[Writer Agent]\n    D --> F\n    E --> F\n    F --> G[Final Report]\n```\n\n### Sandbox Agent Workflow\n\nSandbox agents provide isolated execution environments:\n\n```mermaid\ngraph TD\n    A[SandboxAgent] --> B[Workspace]\n    A --> C[Manifest]\n    C --> D[Skill Loading]\n    B --> E[Artifact Management]\n    E --> F[File System Access]\n    D --> G[Tool Execution]\n```\n\n## SDK Version\n\nCurrent SDK version: `1.0.0` (semantic versioning)\n\n资料来源：[src/agents/version.py]()\n\n## Summary\n\nThe OpenAI Agents SDK provides a comprehensive framework for building sophisticated multi-agent applications. Key capabilities include:\n\n- **Multi-Agent Orchestration**: Define and coordinate multiple agents with specialized roles\n- **Handoff System**: Seamlessly transfer control between agents while maintaining context\n- **Tool Execution**: Integrate tools with guardrail validation at input and output\n- **MCP Integration**: Connect to external Model Context Protocol servers\n- **State Management**: Track execution state with persistence support\n- **Tracing**: Monitor and visualize agent interactions and flows\n\nThe SDK abstracts the complexity of turn resolution, tool tracking, and handoff management, allowing developers to focus on defining agent behavior and tool integrations.\n\n---\n\n<a id='installation'></a>\n\n## Installation and Setup\n\n### 相关页面\n\n相关主题：[OpenAI Agents SDK Overview](#overview), [Examples Index](#examples-index)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [pyproject.toml](https://github.com/openai/openai-agents-python/blob/main/pyproject.toml)\n- [src/agents/_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/_config.py)\n- [src/agents/run_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_config.py)\n- [examples/sandbox/extensions/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/extensions/README.md)\n- [examples/sandbox/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/README.md)\n- [examples/model_providers/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/model_providers/README.md)\n</details>\n\n# Installation and Setup\n\n## Overview\n\nThe `openai-agents-python` library provides a comprehensive multi-agent framework for building AI-powered applications. The installation and setup process involves managing dependencies, configuring environment variables, and optionally setting up sandbox backends for code execution capabilities.\n\nThis page covers the complete setup workflow from initial installation through runtime configuration.\n\n---\n\n## Prerequisites\n\n### System Requirements\n\n| Requirement | Specification |\n|-------------|---------------|\n| Python | 3.10 or higher |\n| Package Manager | pip, uv, or poetry |\n| API Access | OpenAI API key (or compatible provider) |\n\n### Environment Variables\n\nThe library requires the `OPENAI_API_KEY` environment variable for core functionality. Additional provider-specific variables may be needed depending on your use case.\n\n```bash\n# Core requirement\nexport OPENAI_API_KEY=\"sk-...\"\n\n# Optional: Model provider alternatives\nexport OPENROUTER_API_KEY=\"...\"\nexport LITELLM_API_KEY=\"...\"\n```\n\n资料来源：[examples/model_providers/README.md]()\n\n---\n\n## Installation Methods\n\n### Using pip\n\n```bash\npip install openai-agents\n```\n\n### Using uv (Recommended)\n\n```bash\nuv pip install openai-agents\n```\n\nOr with sync for development:\n\n```bash\nuv sync\n```\n\n### With Extras\n\nThe `pyproject.toml` defines optional dependency groups for specific features:\n\n| Extra | Description | Dependencies |\n|-------|-------------|--------------|\n| `sandbox` | Core sandbox functionality | e2b-sdk, modal-client |\n| `e2b` | E2B sandbox backend | e2b-code-interpreter, e2b |\n| `blaxel` | Blaxel sandbox backend | blaxel |\n| `modal` | Modal sandbox backend | modal |\n| `vercel` | Vercel deployment | vercel |\n| `daytona` | Daytona sandbox backend | daytona |\n| `temporal` | Temporal workflow integration | temporal-sdk |\n| `runloop` | Runloop backend | runloop |\n| `dev` | Development dependencies | pytest, ruff, mypy |\n\n安装带所有 sandbox 后端的完整版本：\n\n```bash\nuv sync --extra sandbox\n```\n\n资料来源：[pyproject.toml](https://github.com/openai/openai-agents-python/blob/main/pyproject.toml)\n\n---\n\n## Configuration Architecture\n\nThe library uses a layered configuration system:\n\n```mermaid\ngraph TD\n    A[Environment Variables] --> B[DefaultConfig]\n    C[User Code Config] --> D[RunConfig]\n    B --> D\n    E[Agent-specific Config] --> F[Agent]\n    F --> D\n```\n\n### Configuration Loading Order\n\n1. **Environment Variables** - Base API keys and provider settings\n2. **Default Config** - Library defaults from `_config.py`\n3. **RunConfig** - User-provided runtime configuration\n4. **Agent Config** - Per-agent overrides\n\n资料来源：[src/agents/_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/_config.py)\n\n---\n\n## Core Configuration\n\n### RunConfig Parameters\n\nThe `RunConfig` class provides runtime configuration for agent execution:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `model` | `str` | `\"gpt-4o\"` | Model identifier |\n| `model_provider` | `str \\| None` | `None` | Custom model provider |\n| `max_tokens` | `int \\| None` | `None` | Maximum response tokens |\n| `temperature` | `float \\| None` | `None` | Sampling temperature |\n| `parallel_tool_calls` | `bool` | `True` | Enable parallel tool execution |\n| `tool_choice` | `str \\| None` | `None` | Tool selection strategy |\n| `tracing` | `TracingKind` | `\"off\"` | Tracing provider |\n| `trace_include_defaults` | `bool` | `False` | Include default values in traces |\n| `trace_include_raw_model_messages` | `bool` | `False` | Include raw model messages |\n| `session.persistence` | `SessionPersistence` | `None` | Conversation persistence |\n\n资料来源：[src/agents/run_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_config.py)\n\n### Basic Configuration Example\n\n```python\nfrom agents import Agent, Runner, RunConfig\n\nconfig = RunConfig(\n    model=\"gpt-4o\",\n    temperature=0.7,\n    tracing=\"console\",  # Enable console tracing\n)\n\nagent = Agent(\n    name=\"assistant\",\n    instructions=\"You are a helpful assistant.\",\n)\n\nresult = await Runner.run(agent, \"Hello!\", run_config=config)\n```\n\n---\n\n## Sandbox Backend Setup\n\nThe library supports multiple sandbox backends for secure code execution. Each backend has specific setup requirements.\n\n### Backend Comparison\n\n| Backend | Use Case | Key Features |\n|---------|----------|--------------|\n| E2B | General-purpose sandbox | Bash/Jupyter interfaces, filesystem access |\n| Blaxel | Cloud development | Persistent storage, cloud bucket mounts |\n| Modal | Serverless execution | GPU support, scalable workloads |\n| Daytona | Containerized dev | Full development environments |\n| Vercel | Deployment | Serverless deployment, edge functions |\n\n资料来源：[examples/sandbox/extensions/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/extensions/README.md)\n\n### E2B Setup\n\n```bash\n# Install E2B extra\nuv sync --extra e2b\n\n# Set API key\nexport E2B_API_KEY=\"e2b_...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/basic.py --backend e2b\nuv run python examples/sandbox/basic.py --backend e2b_code_interpreter\n```\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n### Blaxel Setup\n\n```bash\n# Install Blaxel extra\nuv sync --extra blaxel\n\n# Set environment variables\nexport OPENAI_API_KEY=\"...\"\nexport BL_API_KEY=\"...\"\nexport BL_WORKSPACE=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/blaxel_runner.py --stream\n```\n\n**Useful Flags:**\n\n| Flag | Description |\n|------|-------------|\n| `--image blaxel/py-app` | Container image |\n| `--region us-pdx-1` | Deployment region |\n| `--memory 4096` | Memory allocation (MB) |\n| `--ttl 1h` | Session time-to-live |\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n### Modal Setup\n\n```bash\n# Install Modal extra\nuv sync --extra modal\n\n# Authenticate\nuv run modal token set --token-id <token-id> --token-secret <token-secret>\n\n# Or use environment variables\nexport MODAL_TOKEN_ID=\"...\"\nexport MODAL_TOKEN_SECRET=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/modal_runner.py \\\n  --app-name openai-agents-python-sandbox-example \\\n  --stream\n```\n\n**Useful Flags:**\n\n| Flag | Description |\n|------|-------------|\n| `--workspace-persistence tar` | Workspace persistence mode |\n| `--sandbox-create-timeout-s 60` | Sandbox creation timeout |\n| `--runtime node22` | Runtime environment |\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n### Daytona Setup\n\n```bash\n# Install Daytona extra\nuv sync --extra daytona\n\n# Set API key\nexport OPENAI_API_KEY=\"...\"\nexport DAYTONA_API_KEY=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/daytona/daytona_runner.py --stream\n```\n\n### Vercel Setup\n\n```bash\n# Install Vercel extra\nuv sync --extra vercel\n\n# Option 1: OIDC token (recommended)\nexport OPENAI_API_KEY=\"...\"\nexport VERCEL_OIDC_TOKEN=\"...\"\n\n# Option 2: Explicit tokens\nexport OPENAI_API_KEY=\"...\"\nexport VERCEL_TOKEN=\"...\"\nexport VERCEL_PROJECT_ID=\"...\"\nexport VERCEL_TEAM_ID=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/vercel_runner.py --stream\n```\n\n### Runloop Setup\n\n```bash\n# Install Runloop extra\nuv sync --extra runloop\n\n# Sign up at platform.runloop.ai\n```\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n---\n\n## Sandbox Basic Examples\n\n### Minimal Sandbox Setup\n\n```python\nfrom agents.sandbox import SandboxAgent, SandboxSession\nfrom agents.sandbox.backends.e2b import E2BBackend\n\n# Create backend\nbackend = E2BBackend(api_key=\"e2b_...\")\n\n# Create sandbox session\nsession = SandboxSession(backend=backend)\n\n# Run agent\nagent = SandboxAgent(\n    name=\"code_assistant\",\n    instructions=\"Execute Python code in the sandbox.\",\n)\n\nresult = await Runner.run(agent, \"Print hello world\", session=session)\n```\n\n资料来源：[examples/sandbox/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/README.md)\n\n### Available Sandbox Examples\n\n| Example | File | Description |\n|---------|------|-------------|\n| Basic sandbox | `examples/sandbox/basic.py` | Create session, run agent, stream results |\n| Handoffs | `examples/sandbox/handoffs.py` | Agent handoffs with sandbox backends |\n| Workspace capabilities | `examples/sandbox/sandbox_agent_capabilities.py` | Configure workspace access |\n| Combined tools | `examples/sandbox/sandbox_agent_with_tools.py` | Sandbox + host-defined tools |\n| Agents as tools | `examples/sandbox/sandbox_agents_as_tools.py` | Expose sandbox agents as tools |\n| Remote snapshots | `examples/sandbox/sandbox_agent_with_remote_snapshot.py` | Start from saved snapshots |\n\n**Run Commands:**\n\n```bash\nuv run python examples/sandbox/basic.py\nuv run python examples/sandbox/handoffs.py\nuv run python examples/sandbox/sandbox_agent_capabilities.py\n```\n\n---\n\n## Model Provider Configuration\n\n### OpenRouter (Default for Examples)\n\n```bash\nexport OPENROUTER_API_KEY=\"...\"\n```\n\n```python\nfrom agents import Agent, Runner, RunConfig\n\nconfig = RunConfig(\n    model=\"openrouter/openai/gpt-4o-mini\",\n)\n\nresult = await Runner.run(agent, \"Hello\", run_config=config)\n```\n\n### LiteLLM Provider\n\n```bash\nuv sync --extra litellm\n```\n\n```python\nfrom agents.model_providers.litellm_provider import LiteLLMProvider\n\nprovider = LiteLLMProvider(model=\"gpt-4o-mini\")\n```\n\n### Any-LLM Provider\n\n```bash\nuv sync --extra any-llm\n```\n\n```python\nfrom agents.model_providers.any_llm_provider import AnyLLMProvider\n\nprovider = AnyLLMProvider(model=\"gpt-4o-mini\")\n```\n\n**Run Examples:**\n\n```bash\nuv run examples/model_providers/litellm_provider.py\nuv run examples/model_providers/litellm_auto.py\nuv run examples/model_providers/any_llm_provider.py\nuv run examples/model_providers/any_llm_auto.py\n```\n\n资料来源：[examples/model_providers/README.md]()\n\n---\n\n## Example Project Setup\n\n### Healthcare Support Example\n\n```bash\n# List available scenarios\nuv run python examples/sandbox/healthcare_support/main.py --list-scenarios\n\n# Run specific scenario\nuv run python examples/sandbox/healthcare_support/main.py --scenario blue_cross_pt_benefits\nuv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n\n# Reset memory state\nuv run python examples/sandbox/healthcare_support/main.py --reset-memory\n```\n\n**For unattended runs:**\n\n```bash\nEXAMPLES_INTERACTIVE_MODE=auto uv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n```\n\n资料来源：[examples/sandbox/healthcare_support/README.md]()\n\n### Research Bot Example\n\n```bash\npython -m examples.research_bot.main\n```\n\n资料来源：[examples/research_bot/README.md]()\n\n---\n\n## Temporal Integration Setup\n\nFor workflow-based sandbox management:\n\n```bash\n# Install Temporal extra\nuv sync --extra temporal\n\n# Install Temporal CLI and just\n# Start dev server\njust temporal\n\n# In separate terminals\njust worker  # Start worker\njust tui     # Start TUI\n```\n\n**TUI Commands:**\n\n| Command | Description |\n|---------|-------------|\n| `/switch` | Switch to different sandbox backend |\n| `/fork [title]` | Fork session to different backend |\n| `/title <name>` | Rename current session |\n\n资料来源：[examples/sandbox/extensions/temporal/README.md]()\n\n---\n\n## Environment Configuration Files\n\n### Repository Root `.env`\n\nPlace a `.env` file at the repository root:\n\n```\nOPENAI_API_KEY=\"sk-...\"\n```\n\n### Example-Specific `.env`\n\nSome examples support their own `.env` files:\n\n```\n# examples/sandbox/extensions/temporal/.env\nOPENAI_API_KEY=\"sk-...\"\nDAYTONA_API_KEY=\"dtn_...\"\nE2B_API_KEY=\"e2b_...\"\n```\n\n---\n\n## Troubleshooting Setup Issues\n\n### Common Issues\n\n| Issue | Solution |\n|-------|----------|\n| Missing API key | Set `OPENAI_API_KEY` environment variable |\n| Backend connection failed | Verify backend API key and network access |\n| Import errors | Run `uv sync` to install all dependencies |\n| Sandbox timeout | Increase `--sandbox-create-timeout-s` parameter |\n\n### Verify Installation\n\n```python\nimport agents\nprint(agents.__version__)\n```\n\n### Check Backend Configuration\n\n```python\nfrom agents.sandbox.backends.e2b import E2BBackend\n\nbackend = E2BBackend()\n# Check if backend is properly configured\n```\n\n---\n\n## Next Steps\n\nAfter completing installation and setup:\n\n1. **Quick Start** - Run `examples/sandbox/basic.py` to verify sandbox functionality\n2. **Agent Development** - Create your first agent with custom instructions\n3. **Tool Integration** - Add custom tools to extend agent capabilities\n4. **Multi-Agent Systems** - Implement agent handoffs and orchestration\n\n---\n\n<a id='examples-index'></a>\n\n## Examples Index\n\n### 相关页面\n\n相关主题：[OpenAI Agents SDK Overview](#overview), [Agents](#agents)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [examples/basic/hello_world.py](https://github.com/openai/openai-agents-python/blob/main/examples/basic/hello_world.py)\n- [examples/agent_patterns/agents_as_tools.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools.py)\n- [examples/sandbox/basic.py](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/basic.py)\n- [examples/voice/streamed/main.py](https://github.com/openai/openai-agents-python/blob/main/examples/voice/streamed/main.py)\n- [examples/financial_research_agent/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/financial_research_agent/README.md)\n- [examples/research_bot/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/research_bot/README.md)\n- [examples/sandbox/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/README.md)\n- [examples/mcp/streamable_http_remote_example/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/mcp/streamable_http_remote_example/README.md)\n- [examples/model_providers/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/model_providers/README.md)\n- [examples/sandbox/extensions/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/extensions/README.md)\n</details>\n\n# Examples Index\n\n## Overview\n\nThe Examples Index serves as a comprehensive guide to the sample applications and demonstrations provided in the openai-agents-python repository. These examples are designed to showcase the capabilities of the Agents SDK across various use cases, from basic agent interactions to complex multi-agent workflows involving sandboxed execution environments, voice interfaces, and external tool integrations.\n\nThe examples directory structure organizes demonstrations by functional category, allowing developers to quickly locate relevant implementations for their specific requirements. Each example is designed to be runnable with minimal configuration, serving as both documentation and a starting point for custom implementations.\n\n## Example Categories\n\n### Basic Examples\n\nThe basic examples provide the foundational patterns for building agents with the SDK. These examples demonstrate core concepts with minimal complexity.\n\n| Example | File | Purpose |\n|---------|------|---------|\n| Hello World | `examples/basic/hello_world.py` | Simple agent that responds to user input |\n| Agent as Tool | `examples/agent_patterns/agents_as_tools.py` | Demonstrates wrapping agents as tools for other agents |\n\n资料来源：[examples/basic/hello_world.py](examples/basic/hello_world.py)\n资料来源：[examples/agent_patterns/agents_as_tools.py](examples/agent_patterns/agents_as_tools.py)\n\n### Sandbox Examples\n\nSandbox examples demonstrate the isolated workspace capabilities of the Agents SDK, enabling agents to execute code and manipulate files in a secure environment.\n\n#### Small API Examples\n\n| Example | Command | Description |\n|---------|---------|-------------|\n| Basic Sandbox | `uv run python examples/sandbox/basic.py` | Creates a sandbox session from a manifest, runs a `SandboxAgent`, and streams the result |\n| Handoffs | `uv run python examples/sandbox/handoffs.py` | Uses handoffs with sandbox-backed agents |\n| Workspace Capabilities | `uv run python examples/sandbox/sandbox_agent_capabilities.py` | Configures a sandbox agent with workspace capabilities |\n| Sandbox with Tools | `uv run python examples/sandbox/sandbox_agent_with_tools.py` | Combines sandbox capabilities with host-defined tools |\n| Agents as Tools | `uv run python examples/sandbox/sandbox_agents_as_tools.py` | Exposes sandbox agents as tools for another agent |\n| Remote Snapshot | `uv run python examples/sandbox/sandbox_agent_with_remote_snapshot.py` | Starts from a remote snapshot |\n\n资料来源：[examples/sandbox/README.md:1-20](examples/sandbox/README.md)\n\n#### Sandbox Extensions\n\nSandbox extensions provide integrations with various cloud sandbox providers:\n\n| Provider | Setup Command | Run Command |\n|----------|---------------|-------------|\n| E2B | `uv sync --extra e2b` | `uv run python examples/sandbox/basic.py --backend e2b` |\n| Modal | `uv sync --extra modal` | `uv run python examples/sandbox/extensions/modal_runner.py --stream` |\n| Blaxel | `uv sync --extra blaxel` | `uv run python examples/sandbox/extensions/blaxel_runner.py --stream` |\n| Vercel | `uv sync --extra vercel` | `uv run python/examples/sandbox/extensions/vercel_runner.py --stream` |\n| Daytona | `uv sync --extra daytona` | `uv run python examples/sandbox/extensions/daytona/daytona_runner.py --stream` |\n| Runloop | `uv sync --extra runloop` | Platform-specific setup |\n| Temporal | Temporal CLI + just | `just worker` / `just tui` |\n\n资料来源：[examples/sandbox/extensions/README.md](examples/sandbox/extensions/README.md)\n\n### Multi-Agent Research Examples\n\n#### Research Bot\n\nThe research bot demonstrates a multi-agent system where agents collaborate to perform web research and synthesize findings into reports.\n\n**Architecture Flow:**\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Planner Agent]\n    B --> C[Generate Search Queries]\n    C --> D[Search Agent 1]\n    C --> E[Search Agent 2]\n    C --> F[Search Agent N]\n    D --> G[Parallel Execution]\n    E --> G\n    F --> G\n    G --> H[Writer Agent]\n    H --> I[Final Report]\n```\n\n**Key Components:**\n\n- **Planner Agent**: Creates a research plan with search terms and rationale\n- **Search Agent**: Uses Web Search tool to search and summarize results\n- **Writer Agent**: Synthesizes summaries into a long-form markdown report\n\n资料来源：[examples/research_bot/README.md](examples/research_bot/README.md)\n\n#### Financial Research Agent\n\nThe financial research agent demonstrates domain-specific research capabilities with access to specialized analysis tools.\n\n**Agent Configuration:**\n\n```\nYou are a senior financial analyst. You will be provided with the original query\nand a set of raw search summaries. Your job is to synthesize these into a\nlong‑form markdown report with a short executive summary.\n```\n\n**Available Tools:**\n- `fundamentals_analysis` - Specialist write-up for fundamental analysis\n- `risk_analysis` - Specialist write-up for risk assessment\n\n资料来源：[examples/financial_research_agent/README.md](examples/financial_research_agent/README.md)\n\n### Healthcare Support Example\n\nA demonstration workflow that combines sandbox execution with human-in-the-loop approvals for healthcare-related tasks.\n\n**Workflow Components:**\n\n- **Orchestrator Agent**: Coordinates the overall workflow\n- **Benefits Subagent**: Handles benefits-related queries\n- **Sandbox Policy Agent**: Executes policy validation in sandbox\n- **Memory Recap Agent**: Maintains conversation context\n\n**Key Files:**\n\n| File | Purpose |\n|------|---------|\n| `main.py` | Standalone CLI demo runner |\n| `workflow.py` | Shared workflow execution logic, sandbox setup, artifact copying, tracing |\n| `support_agents.py` | Agent definitions |\n| `tools.py` | Local lookup tools and approval-gated human handoff |\n| `skills/prior-auth-packet-builder/SKILL.md` | Sandbox skill definition |\n\n**Available Scenarios:**\n\n```bash\nuv run python examples/sandbox/healthcare_support/main.py --list-scenarios\nuv run python examples/sandbox/healthcare_support/main.py --scenario blue_cross_pt_benefits\nuv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n```\n\n资料来源：[examples/sandbox/healthcare_support/README.md](examples/sandbox/healthcare_support/README.md)\n\n### Voice Examples\n\nVoice examples demonstrate real-time audio interaction capabilities with agents.\n\n**Architecture:**\n\n```mermaid\ngraph LR\n    A[Audio Input] --> B[Voice Agent]\n    B --> C[Streaming Response]\n    C --> D[Audio Output]\n    B --> E[Tool Calls]\n    E --> F[External Services]\n```\n\n**Run Command:**\n\n```bash\nuv run python examples/voice/streamed/main.py\n```\n\n资料来源：[examples/voice/streamed/main.py](examples/voice/streamed/main.py)\n\n### MCP Examples\n\nModel Context Protocol (MCP) examples demonstrate integration with external MCP servers for extended tool capabilities.\n\n#### Streamable HTTP Remote Example\n\nConnects to DeepWiki over the Streamable HTTP transport to leverage external tools.\n\n**Run Command:**\n\n```bash\nuv run python examples/mcp/streamable_http_remote_example/main.py\n```\n\n**Prerequisites:**\n- `OPENAI_API_KEY` set for model calls\n\n资料来源：[examples/mcp/streamable_http_remote_example/README.md](examples/mcp/streamable_http_remote_example/README.md)\n\n### Model Provider Examples\n\nModel provider examples demonstrate routing models through adapter layers for flexibility in model selection.\n\n| Adapter | Direct Run | Auto Mode |\n|---------|------------|-----------|\n| any-llm | `uv run examples/model_providers/any_llm_provider.py` | `uv run examples/model_providers/any_llm_auto.py` |\n| LiteLLM | `uv run examples/model_providers/litellm_provider.py` | `uv run examples/model_providers/litellm_auto.py` |\n\n**Model Override:**\n\n```bash\nuv run examples/model_providers/any_llm_provider.py --model openrouter/openai/gpt-5.4-mini\n```\n\n资料来源：[examples/model_providers/README.md](examples/model_providers/README.md)\n\n## Common Configuration\n\n### Environment Variables\n\nMost examples require the `OPENAI_API_KEY` environment variable. Configure it in one of these locations:\n\n1. Repository-root `.env` file\n2. Example's local `.env` file\n3. Shell environment\n\n### Running with uv\n\nThe project uses `uv` for dependency management. Run examples with:\n\n```bash\nuv run python <path-to-example>\n```\n\n### Interactive Mode\n\nFor examples with prompts, set `EXAMPLES_INTERACTIVE_MODE=auto` to auto-answer:\n\n```bash\nEXAMPLES_INTERACTIVE_MODE=auto uv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n```\n\n## Example Selection Guide\n\n```mermaid\ngraph TD\n    A[Use Case] --> B{Basic Interaction?}\n    B -->|Yes| C[Basic Examples]\n    B -->|No| D{Multi-Agent Workflow?}\n    D -->|Yes| E{Research Domain?}\n    D -->|No| F{Sandbox Required?}\n    E -->|Financial| G[Financial Research Agent]\n    E -->|General| H[Research Bot]\n    F -->|Yes| I{Specialized Provider?}\n    F -->|No| J[Agent Patterns]\n    I -->|E2B| K[E2B Examples]\n    I -->|Modal| L[Modal Examples]\n    I -->|Vercel| M[Vercel Examples]\n    I -->|Daytona| N[Daytona Examples]\n    I -->|Blaxel| O[Blaxel Examples]\n```\n\n## Sandbox Backend Comparison\n\n| Backend | Interface | Workspace Persistence | Cloud Support |\n|---------|-----------|----------------------|---------------|\n| E2B | Bash-style | Snapshot files | Yes |\n| Modal | Bash-style | Tar, snapshot files/directory | Yes |\n| Blaxel | Bash-style + PTY | Drive mount, cloud buckets | Yes (S3, R2, GCS) |\n| Vercel | Command execution | Tar, snapshot | Yes |\n| Daytona | Bash-style | Yes | Yes |\n| Runloop | TBD | Yes | Yes |\n\n资料来源：[examples/sandbox/extensions/README.md](examples/sandbox/extensions/README.md)\n\n---\n\n<a id='agents'></a>\n\n## Agents\n\n### 相关页面\n\n相关主题：[Tools](#tools), [Handoffs](#handoffs), [Guardrails](#guardrails), [Run Loop and Execution](#run-loop)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/agent.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent.py)\n- [src/agents/lifecycle.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/lifecycle.py)\n- [src/agents/agent_output.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent_output.py)\n- [src/agents/items.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/items.py)\n- [src/agents/function_schema.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/function_schema.py)\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/run_internal/turn_resolution.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/turn_resolution.py)\n- [src/agents/extensions/visualization.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/visualization.py)\n- [src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n- [src/agents/handoffs/history.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/history.py)\n</details>\n\n# Agents\n\n## Overview\n\nAgents are the core execution units in the OpenAI Agents SDK. An agent encapsulates an LLM with instructions, tools, handoffs, and guardrails that enable autonomous task completion. Agents process user inputs, make decisions about tool usage, transfer control to other agents, and generate responses.\n\nThe agent system provides a structured approach to building AI-powered applications by separating concerns between orchestration, tool execution, and response generation. Agents can be composed hierarchically, where one agent can delegate tasks to sub-agents or hand off control entirely to specialized agents.\n\n## Architecture\n\n### Agent Core Components\n\nAn agent consists of several interconnected components that work together to process requests and generate responses.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Agent]\n    B --> C[Instructions/Prompt]\n    B --> D[Tools]\n    B --> E[Handoffs]\n    B --> F[Guardrails]\n    C --> G[LLM Decision Engine]\n    D --> H[Tool Execution]\n    E --> I[Agent Transfer]\n    G --> J[Response/Action]\n    H --> J\n    I --> K[Target Agent]\n    K --> G\n```\n\n### Agent Types\n\n| Type | Description | Use Case |\n|------|-------------|----------|\n| `Agent[TContext]` | Base agent type with generic context | General purpose agents |\n| `SandboxAgent` | Agent with isolated workspace | Code execution, file operations |\n| `FunctionAgent` | Agent for function/tool orchestration | Tool-heavy workflows |\n\n### Source File Organization\n\n| File | Purpose |\n|------|---------|\n| `src/agents/agent.py` | Core agent class definition |\n| `src/agents/lifecycle.py` | Agent lifecycle management |\n| `src/agents/agent_output.py` | Output types and responses |\n| `src/agents/items.py` | Run item definitions and helpers |\n| `src/agents/function_schema.py` | Tool schema generation |\n\n## Agent Lifecycle\n\nAgents follow a defined lifecycle from initialization through execution to completion or handoff.\n\n```mermaid\nstateDiagram-v2\n    [*] --> Initialized: Agent Created\n    Initialized --> Running: Input Received\n    Running --> ToolExecution: Tool Call\n    ToolExecution --> Running: Tool Result\n    Running --> Handoff: Transfer Request\n    Handoff --> [*]: Complete\n    Running --> Response: Final Output\n    Response --> [*]: Complete\n    Handoff --> Running: New Agent\n```\n\n### Lifecycle States\n\n| State | Description | Entry Condition |\n|-------|-------------|-----------------|\n| `Initialized` | Agent created but not yet processing | Object instantiation |\n| `Running` | Actively processing input | `run()` or `run_sync()` called |\n| `ToolExecution` | Executing one or more tools | LLM requests tool call |\n| `Handoff` | Transferring to another agent | LLM triggers handoff |\n| `Response` | Generating final response | No more actions needed |\n\n资料来源：[src/agents/lifecycle.py:1-50]()\n\n### Turn Resolution\n\nThe turn resolution process handles the core agent loop. Each turn processes input and determines next actions.\n\n```mermaid\nsequenceDiagram\n    participant U as User\n    participant R as Runner\n    participant A as Agent\n    participant T as Tools\n    participant H as Handoffs\n    \n    U->>R: User Input\n    R->>A: Process Turn\n    A->>T: Tool Calls?\n    T-->>A: Results\n    A->>H: Handoff?\n    H-->>A: New Agent\n    A->>R: Response\n    R-->>U: Output\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:1-80]()\n\n## Run Items\n\nRun items represent the atomic units of work within an agent execution. They capture messages, tool calls, tool results, and handoffs.\n\n### Item Types\n\n| Type | Description | Source |\n|------|-------------|--------|\n| `MessageOutputItem` | LLM generated message | `src/agents/items.py:30-60` |\n| `ToolCallItem` | Tool invocation request | `src/agents/items.py:61-90` |\n| `ToolCallOutputItem` | Tool execution result | `src/agents/items.py:91-120` |\n| `HandoffItem` | Agent transfer | `src/agents/items.py:121-150` |\n| `ToolApprovalItem` | Human approval for tools | `src/agents/handoffs/history.py:50-70` |\n\n### Message Extraction\n\nThe `ItemHelpers` class provides utilities for extracting content from run items:\n\n```python\n# Extract text from message output\ntext = ItemHelpers.text_message_output(message_item)\n\n# Extract refusal if present\nrefusal = ItemHelpers.extract_refusal(message.raw_item)\n\n# Convert string to input list\ninput_list = ItemHelpers.input_to_new_input_list(\"user message\")\n```\n\n资料来源：[src/agents/items.py:40-75]()\n\n## Handoffs\n\nHandoffs enable agent-to-agent transfer, allowing specialized agents to handle specific tasks.\n\n### Handoff Configuration\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `agent` | `Agent` | Target agent |\n| `tool_name_override` | `str` | Override for handoff tool name |\n| `tool_description_override` | `str` | Override for handoff description |\n| `on_handoff` | `Callable` | Callback when handoff occurs |\n| `input_type` | `Type` | Type validation for handoff input |\n| `input_filter` | `Callable` | Filter inputs passed to next agent |\n| `is_enabled` | `bool \\| Callable` | Enable/disable handoff |\n\n资料来源：[src/agents/handoffs/__init__.py:30-80]()\n\n### Handoff History Management\n\nWhen an agent hands off to another, the conversation history is summarized to maintain context:\n\n```python\n# Nested history processing\nnested_history = nest_handoff_history(\n    handoff_input_data,\n    history_mapper=custom_mapper\n)\n```\n\nThe history wrapper markers default to `<CONVERSATION HISTORY>` tags but can be customized:\n\n```python\n# Customize history markers\nset_conversation_history_wrappers(\n    start=\"<PREVIOUS_CONTEXT>\",\n    end=\"</PREVIOUS_CONTEXT>\"\n)\n```\n\n资料来源：[src/agents/handoffs/history.py:20-60]()\n\n## Tools and Function Schema\n\nTools extend agent capabilities by providing functions the LLM can call.\n\n### Function Schema Generation\n\nThe `FunctionSchema` class converts Python functions into OpenAI-compatible tool schemas:\n\n```python\nschema = FunctionSchema.from_fn(my_function)\ntool_definition = schema.to_tool_definition()\n```\n\n### Tool Definition Structure\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `name` | `str` | Tool identifier |\n| `description` | `str` | Human-readable description |\n| `parameters` | `dict` | JSON schema for parameters |\n| `strict` | `bool` | Enable strict parameter validation |\n\n资料来源：[src/agents/function_schema.py:1-50]()\n\n## Agent Visualization\n\nThe SDK provides DOT-format visualization for agent graphs:\n\n```mermaid\ngraph TD\n    subgraph AgentGraph\n        A[\"User Input\"] --> B[\"Agent\"]\n        B --> C[\"Tool: search\"]\n        B --> D[\"Tool: calculate\"]\n        B --> E[\"Handoff: specialist\"]\n        E --> F[\"Specialist Agent\"]\n    end\n```\n\n### Graph Components\n\n| Component | Shape | Color | Description |\n|-----------|-------|-------|-------------|\n| Start | Ellipse | lightblue | Entry point |\n| Agent | Box | lightyellow | Agent nodes |\n| Tool | Ellipse | lightgreen | Tool definitions |\n| Handoff | Box | lightgrey | Agent transfer points |\n| End | Ellipse | lightblue | Exit point |\n\n资料来源：[src/agents/extensions/visualization.py:1-60]()\n\n## Agent Output\n\nAgent execution produces structured output containing messages, tool calls, and metadata.\n\n### Output Structure\n\n```python\n@dataclass\nclass AgentOutput:\n    messages: list[MessageOutputItem]\n    tool_calls: list[ToolCallItem]\n    tool_results: list[ToolCallOutputItem]\n    handoffs: list[HandoffItem]\n    final_response: str | None\n```\n\n资料来源：[src/agents/agent_output.py:1-40]()\n\n### Response Finalization\n\nAfter tool execution, the system finalizes responses:\n\n```python\ntool_final_output = await _maybe_finalize_from_tool_results(\n    public_agent=agent,\n    original_input=input,\n    new_response=response,\n    pre_step_items=pre_items,\n    new_step_items=new_items,\n    function_results=results\n)\n```\n\nRefusals are extracted and converted to errors:\n\n```python\nrefusal = ItemHelpers.extract_refusal(message_item.raw_item)\nif refusal:\n    raise ModelRefusalError(refusal)\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:80-120]()\n\n## Runner Integration\n\nThe `Runner` class orchestrates agent execution, managing the turn loop and state transitions.\n\n### Run Configuration\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `max_turns` | `int` | `10` | Maximum conversation turns |\n| `max_tools` | `int` | `100` | Maximum tool calls |\n| `context_length` | `int` | Model dependent | Context window size |\n| `tool_choice` | `str` | `\"auto\"` | Tool selection strategy |\n\n### State Management\n\nThe runner maintains `RunState` throughout execution:\n\n```python\nrun_state = RunState(\n    current_agent=agent,\n    model_response=response,\n    generated_items=items,\n    run_config=config\n)\n```\n\n资料来源：[src/agents/run.py:100-180]()\n\n## Error Handling\n\n### Model Refusal\n\nWhen the LLM refuses to respond, a `ModelRefusalError` is raised:\n\n```python\nif refusal:\n    refusal_error = ModelRefusalError(refusal)\n    run_error_data = build_run_error_data(...)\n```\n\n### Tool Activity Tracking\n\nThe system tracks tool usage even when no messages are generated:\n\n```python\nhas_tool_activity_without_message = not message_items and bool(\n    processed_response.tools_used\n)\n```\n\n## Multi-Agent Patterns\n\n### Hierarchical Agents\n\n```mermaid\ngraph TD\n    O[Orchestrator] --> S[Search Agent]\n    O --> A[Analysis Agent]\n    O --> W[Writer Agent]\n    S --> R[Research Results]\n    A --> R\n    A --> D[Data Insights]\n    W --> R\n    W --> D\n```\n\n### Parallel Execution\n\nAgents can execute in parallel for independent tasks:\n\n```python\n# Multiple search agents running concurrently\nsearch_tasks = [search_agent.run(query) for query in queries]\nresults = await asyncio.gather(*search_tasks)\n```\n\n## Best Practices\n\n1. **Context Management**: Use generic `Agent[TContext]` with custom context classes for type safety\n2. **Handoff Design**: Create focused agents with clear responsibilities and minimal handoffs\n3. **Tool Organization**: Group related tools into toolkits for better organization\n4. **History Filtering**: Use `input_filter` in handoffs to prevent context overflow\n5. **Error Handling**: Always handle `ModelRefusalError` and tool execution failures\n\n## Related Components\n\n| Component | File | Relationship |\n|-----------|------|--------------|\n| MCP Server | `src/agents/mcp/server.py` | Provides external tool access |\n| Guardrails | `src/agents/guardrails.py` | Input/output validation |\n| Streaming | `src/agents/streaming.py` | Real-time output |\n| Tracing | `src/agents/tracing.py` | Execution monitoring |\n\n---\n\n<a id='tools'></a>\n\n## Tools\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Guardrails](#guardrails)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/tool.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/tool.py)\n- [src/agents/tool_context.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/tool_context.py)\n- [src/agents/agent_tool_state.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent_tool_state.py)\n- [src/agents/editor.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/editor.py)\n- [src/agents/computer.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/computer.py)\n- [src/agents/apply_diff.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/apply_diff.py)\n</details>\n\n# Tools\n\n## Overview\n\nTools in the OpenAI Agents Python SDK enable AI agents to interact with external systems, execute code, manipulate files, and perform actions in isolated environments. The tools system provides a structured way for agents to extend their capabilities beyond pure text generation by calling functions, accessing resources, and performing complex operations.\n\nThe SDK implements a tool abstraction that wraps callable functions with metadata, descriptions, and execution logic. When an agent decides to use a tool, the SDK handles the invocation, manages the context, processes results, and returns responses to the agent for further processing.\n\n工具系统支持多种工具类型，从简单的函数调用到复杂的沙箱执行环境。工具可以在初始化时配置各种选项，包括名称、描述、参数模式等，并且可以与代理的批准机制和防护栏系统集成。\n\n## Core Tool Architecture\n\n### Tool Base Class\n\nThe foundation of the tools system is the `Tool` class defined in `src/agents/tool.py`. This abstract base class defines the interface that all tools must implement, ensuring consistent behavior across different tool types.\n\n```mermaid\ngraph TD\n    A[Tool Base Class] --> B[FunctionTool]\n    A --> C[FileSearchTool]\n    A --> D[ComputerTool]\n    A --> E[WebSearchTool]\n    A --> F[Sandbox Agent Tools]\n```\n\nEach tool implementation must provide:\n- A unique name identifier\n- A description for the LLM to understand tool purpose\n- Parameter schema for function calling\n- Execution logic in an `invoke` or `acall` method\n\n### Tool Interface\n\nThe tool interface follows a standard pattern where each tool is defined with metadata that allows the LLM to understand when and how to use it. Tools can be synchronous or asynchronous, supporting both simple function calls and complex operations that require I/O operations.\n\n工具的关键属性包括：\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `name` | `str` | Unique identifier for the tool |\n| `description` | `str` | Natural language description for LLM |\n| `parameters` | `dict` | JSON Schema for tool arguments |\n| `strict` | `bool` | Whether to enforce parameter validation |\n\n资料来源：[src/agents/tool.py:1-50]()\n\n## Built-in Tool Types\n\n### FunctionTool\n\n`FunctionTool` is the most common tool type, wrapping a Python function with tool metadata. It allows developers to expose arbitrary Python functions as tools that agents can call.\n\n```python\nfrom agents import FunctionTool\n\ndef calculate_budget(items: list[str]) -> float:\n    # Implementation\n    return total\n\nbudget_tool = FunctionTool(\n    name=\"calculate_budget\",\n    description=\"Calculate the total budget for a list of items\",\n    params_json_schema={...},\n    handle_invoke=calculate_budget\n)\n```\n\n### File and Editor Tools\n\nThe SDK provides specialized tools for file operations. The `FileSearchTool` enables searching through file contents, while editor tools provide controlled file manipulation capabilities.\n\n资料来源：[src/agents/editor.py:1-100]()\n\n#### Editor Tool Capabilities\n\n| Operation | Description |\n|-----------|-------------|\n| `read` | Read file contents |\n| `write` | Write content to files |\n| `edit` | Modify existing files |\n| `glob` | Find files by pattern |\n| `ls` | List directory contents |\n| `mv` | Move/rename files |\n| `rm` | Delete files |\n\n### Computer Tool\n\nThe `ComputerTool` enables agents to interact with a virtualized computer environment. This is particularly useful for tasks requiring UI automation, screenshot analysis, and keyboard/mouse control.\n\n资料来源：[src/agents/computer.py:1-100]()\n\nThe Computer Tool provides:\n\n- **Screen Capture**: Take screenshots of the virtual display\n- **Mouse Control**: Move cursor, click, scroll operations\n- **Keyboard Control**: Type text, press keys and key combinations\n- **Process Management**: Launch and interact with applications\n\n```mermaid\ngraph LR\n    A[Agent Decision] --> B[Computer Tool Action]\n    B --> C{Screen Capture?}\n    C -->|Yes| D[Screenshot Analysis]\n    C -->|No| E[Execute Action]\n    D --> F[Observation Result]\n    E --> G[Action Result]\n    F --> H[Agent Processing]\n    G --> H\n```\n\n### ApplyDiff Tool\n\nThe `ApplyDiff` tool provides efficient file modification capabilities using diff-based operations. Instead of replacing entire files, it applies targeted changes, making it more efficient for large files and reducing the risk of unintended modifications.\n\n资料来源：[src/agents/apply_diff.py:1-100]()\n\n## Tool Context and State Management\n\n### Tool Context\n\nTool context (`tool_context`) provides runtime information to tools during execution. It encapsulates the current run state, session information, and access to shared resources.\n\n资料来源：[src/agents/tool_context.py:1-100]()\n\n```mermaid\ngraph TD\n    A[Tool Execution] --> B[ToolContext]\n    B --> C[RunContext]\n    B --> D[Session]\n    B --> E[Store Settings]\n    C --> F[Current Agent]\n    C --> G[User Context]\n```\n\n### Agent Tool State\n\nThe `AgentToolState` manages tool-related state within an agent's execution context. This includes tracking tool usage, maintaining state across tool calls, and managing tool-specific configurations.\n\n资料来源：[src/agents/agent_tool_state.py:1-100]()\n\nKey responsibilities include:\n- Tracking which tools have been invoked\n- Maintaining state between sequential tool calls\n- Managing tool-specific configuration options\n- Handling tool result caching when appropriate\n\n## Tool Configuration\n\n### Tool Parameters\n\nTools are configured with JSON Schema definitions that describe their expected parameters. This schema serves dual purposes:\n\n1. **LLM Understanding**: Helps the model generate correct tool calls\n2. **Validation**: Ensures incoming parameters meet requirements\n\n```python\nparams_json_schema = {\n    \"type\": \"object\",\n    \"properties\": {\n        \"query\": {\n            \"type\": \"string\",\n            \"description\": \"Search query string\"\n        },\n        \"limit\": {\n            \"type\": \"integer\",\n            \"description\": \"Maximum results to return\",\n            \"default\": 10\n        }\n    },\n    \"required\": [\"query\"]\n}\n```\n\n### Tool Options\n\n| Option | Description | Default |\n|--------|-------------|---------|\n| `name` | Tool identifier | Function name |\n| `description` | LLM-facing description | Docstring |\n| `params_json_schema` | Parameter schema | Auto-generated |\n| `strict` | Enforce schema strictly | `False` |\n| `require_approval` | Require human approval | `None` |\n\n## Tool Guardrails\n\n### Input Guardrails\n\nInput guardrails validate tool parameters before execution. They provide an opportunity to inspect, modify, or reject tool calls based on custom logic.\n\n```python\nasync def validate_search_params(\n    ctx: RunContextWrapper,\n    tool: MCPTool,\n    params: dict\n) -> InputGuardrailResult:\n    # Custom validation logic\n    if contains_prohibited_terms(params.get(\"query\")):\n        return InputGuardrailResult(\n            did_pass=False,\n            message=\"Query contains prohibited content\"\n        )\n    return InputGuardrailResult(did_pass=True)\n```\n\n### Output Guardrails\n\nOutput guardrails validate tool results after execution. They ensure that tool outputs meet safety, formatting, or content requirements before being returned to the agent.\n\n资料来源：[src/agents/items.py:50-100]()\n\n## Tool Filtering\n\nThe SDK supports filtering which tools are exposed to agents. This is particularly useful when:\n\n- Limiting agent capabilities for security\n- Testing specific tool behaviors\n- Implementing role-based access control\n\n资料来源：[examples/mcp/tool_filter_example/README.md]()\n\n```python\n# Static tool filter\ntool_filter = [\"filesystem_read\", \"filesystem_write\"]\n\n# Dynamic tool filter\nasync def dynamic_filter(\n    ctx: RunContextWrapper,\n    agent: Agent,\n    tool: Tool\n) -> bool:\n    return tool.name in allowed_tools\n```\n\n## Integration with Agents\n\n### Adding Tools to Agents\n\nTools are added to agents through the agent's initialization or configuration:\n\n```python\nagent = Agent(\n    name=\"research_agent\",\n    tools=[\n        web_search_tool,\n        file_search_tool,\n        custom_function_tool\n    ],\n    instructions=\"You are a research assistant...\"\n)\n```\n\n### Tool Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent\n    participant SDK\n    participant Tool\n    participant External\n\n    Agent->>SDK: Request tool execution\n    SDK->>Tool: Validate parameters\n    Tool->>Tool: Apply input guardrails\n    Tool->>External: Execute operation\n    External-->>Tool: Return result\n    Tool->>Tool: Apply output guardrails\n    Tool-->>SDK: Return processed result\n    SDK-->>Agent: Provide tool result\n```\n\n## Human-in-the-Loop with Tools\n\n### Approval Requirements\n\nTools can be configured to require human approval before execution. When enabled, the SDK pauses tool execution and awaits human confirmation.\n\n```python\ntool = FunctionTool(\n    name=\"send_email\",\n    handle_invoke=send_email,\n    require_approval=\"always\"\n)\n```\n\n资料来源：[src/agents/mcp/server.py:100-150]()\n\n### Approval Resume\n\nAfter human approval or rejection, the SDK resumes execution with the approval result:\n\n```python\nawait runner.resume(\n    run_id=run_id,\n    approval_result=ApprovalResult(approved=True)\n)\n```\n\n## Summary\n\nThe Tools system in the OpenAI Agents Python SDK provides a flexible, extensible framework for adding capabilities to AI agents. Key features include:\n\n- **Abstraction**: Consistent interface for diverse tool types\n- **Composition**: Tools can be combined and filtered dynamically\n- **Safety**: Built-in guardrails and approval mechanisms\n- **Context Awareness**: Runtime context enables stateful tool interactions\n- **Integration**: Seamless integration with the agent execution model\n\nBy leveraging these tools, developers can create sophisticated agents that can search the web, manipulate files, execute code, interact with computer interfaces, and integrate with external services through protocols like MCP.\n\n---\n\n<a id='guardrails'></a>\n\n## Guardrails\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Tools](#tools)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/guardrail.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/guardrail.py)\n- [src/agents/tool_guardrails.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/tool_guardrails.py)\n- [src/agents/run_internal/guardrails.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/guardrails.py)\n</details>\n\n# Guardrails\n\nGuardrails provide a security and validation layer in the agents framework, enabling developers to intercept, validate, and control both incoming inputs and outgoing outputs at various stages of agent execution. They serve as programmable checkpoints that can enforce policy compliance, prevent data leakage, block harmful content, and ensure operational safety across the entire agent runtime.\n\n## Overview\n\nThe guardrail system operates at multiple checkpoints during agent execution:\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Input Guardrails]\n    B --> C[Agent Processing]\n    C --> D[Tool Call]\n    D --> E[Tool Input Guardrails]\n    E --> F[Tool Execution]\n    F --> G[Tool Output Guardrails]\n    G --> H[Response Generation]\n    H --> I[Output Guardrails]\n    I --> J[Final Output]\n    \n    B -.->|Block/Modify| A\n    E -.->|Block/Modify| D\n    G -.->|Block/Modify| F\n    I -.->|Block/Modify| H\n```\n\nGuardrails are implemented as pluggable components that can be attached to agents, individual tools, or configured globally. Each guardrail can define one of three behavioral responses when triggered:\n\n| Behavior Type | Description |\n|---------------|-------------|\n| `raise_exception` | Throws a tripwire exception, halting execution |\n| `reject_content` | Replaces the content with a custom rejection message |\n| `filter` | Removes or sanitizes the problematic content (planned) |\n\n资料来源：[src/agents/run_internal/tool_execution.py:1-50]()\n\n## Types of Guardrails\n\n### Input Guardrails\n\nInput guardrails validate user-provided input before it reaches the agent. They receive the raw input and can inspect, modify, or reject it based on custom logic.\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Runner\n    participant InputGuardrail\n    participant Agent\n    \n    User->>Runner: User Input\n    Runner->>InputGuardrail: Run input through guardrails\n    alt Guardrail triggers\n        InputGuardrail->>Runner: GuardrailOutput with behavior\n        alt raise_exception\n            Runner-->>User: GuardrailTripwireTriggered Error\n        else reject_content\n            Runner->>Agent: Modified/Sanitized input\n        end\n    else Pass through\n        InputGuardrail->>Runner: GuardrailOutput with pass behavior\n        Runner->>Agent: Original input\n    end\n```\n\n资料来源：[src/agents/run_internal/guardrails.py:1-30]()\n\n### Tool Input Guardrails\n\nTool input guardrails validate the arguments passed to tool calls before execution. They have access to the tool context, agent information, and the raw tool arguments.\n\n```python\n@dataclass\nclass ToolInputGuardrailData:\n    context: ToolContext[Any]\n    agent: Agent[Any]\n    input: Any  # The raw tool arguments\n```\n\n资料来源：[src/agents/tool_guardrails.py:1-20]()\n\n### Tool Output Guardrails\n\nTool output guardrails validate the results returned from tool execution before those results are processed further. They can inspect, filter, or reject tool outputs.\n\n```python\n@dataclass\nclass ToolOutputGuardrailData:\n    context: ToolContext[Any]\n    agent: Agent[Any]\n    output: Any  # The raw tool result\n```\n\n资料来源：[src/agents/tool_guardrails.py:1-20]()\n\n### Output Guardrails\n\nOutput guardrails validate the agent's final response before it is returned to the user. These operate on the completed message stream and can perform final content filtering or policy checks.\n\n## GuardrailResult Structure\n\nEach guardrail execution produces a `GuardrailOutput` result that defines the subsequent action:\n\n```python\n@dataclass\nclass GuardrailOutput:\n    content_filtered: bool\n    policy_name: str\n    policy_version: str\n    content: str | None\n    behavior: dict[str, Any]\n```\n\nThe `behavior` dictionary must contain at minimum a `type` key specifying one of the supported behavior types.\n\n资料来源：[src/agents/guardrail.py:1-50]()\n\n## Configuration\n\n### Agent-Level Guardrail Configuration\n\nGuardrails can be attached directly to an agent instance:\n\n```python\nfrom agents import Agent, Guardrail\n\nagent = Agent(\n    name=\"secure_agent\",\n    instructions=\"You are a helpful assistant\",\n    input_guardrails=[\n        Guardrail(guardrail_name=\"content_filter\"),\n        Guardrail(guardrail_name=\"pii_detector\"),\n    ],\n    output_guardrails=[\n        Guardrail(guardrail_name=\"safety_check\"),\n    ],\n)\n```\n\n### Tool-Level Guardrail Configuration\n\nIndividual tools can have their own guardrails:\n\n```python\nfrom agents import function_tool, ToolInputGuardrail, ToolOutputGuardrail\n\n@function_tool(\n    tool_input_guardrails=[input_check_guardrail],\n    tool_output_guardrails=[output_check_guardrail],\n)\ndef sensitive_operation(x: str) -> str:\n    return process(x)\n```\n\n资料来源：[src/agents/tool.py:1-30]()\n\n### Guardrail Behavior Configuration\n\nGuardrails can be configured with different tripwire behaviors:\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `guardrail_name` | `str` | Unique identifier for the guardrail |\n| `on_fail` | `GuardrailFailureMode` | Behavior when triggered |\n| `error_message` | `str` | Custom error message for exceptions |\n| `log` | `bool` | Whether to log guardrail triggers |\n\n## Tracing and Observability\n\nGuardrail execution is automatically traced using the observability framework:\n\n```mermaid\ngraph LR\n    A[Guardrail Trigger] --> B[guardrail_span]\n    B --> C[Record triggered status]\n    B --> D[Capture span data]\n    D --> E[Export to trace provider]\n    \n    C -->|True| F[Mark span as triggered]\n    C -->|False| G[Continue normally]\n```\n\nThe `guardrail_span` function creates spans for monitoring:\n\n```python\ndef guardrail_span(\n    name: str,\n    triggered: bool = False,\n    span_id: str | None = None,\n    parent: Trace | Span[Any] | None = None,\n    disabled: bool = False,\n) -> Span[GuardrailSpanData]:\n```\n\n资料来源：[src/agents/tracing/create.py:1-40]()\n\n## Execution Flow\n\n### Tool Guardrail Execution\n\nTool guardrails are executed within the tool execution pipeline:\n\n```mermaid\nflowchart TD\n    A[Tool Call Invoked] --> B{Input Guardrails exist?}\n    B -->|Yes| C[Execute Input Guardrails]\n    C --> D{Any trigger raise_exception?}\n    D -->|Yes| E[Raise ToolInputGuardrailTripwireTriggered]\n    D -->|No| F{Any trigger reject_content?}\n    F -->|Yes| G[Replace input with message]\n    F -->|No| H[Execute Tool]\n    H --> I{Output Guardrails exist?}\n    I -->|Yes| J[Execute Output Guardrails]\n    J --> K{Any trigger raise_exception?}\n    K -->|Yes| L[Raise ToolOutputGuardrailTripwireTriggered]\n    K -->|No| M{Any trigger reject_content?}\n    M -->|Yes| N[Replace output with message]\n    M -->|No| O[Return result]\n```\n\n资料来源：[src/agents/run_internal/tool_execution.py:50-100]()\n\n### Guardrail Tripwire Exceptions\n\nWhen a guardrail triggers with `raise_exception` behavior, specific exception types are raised:\n\n| Exception Type | Triggered By |\n|---------------|--------------|\n| `ToolInputGuardrailTripwireTriggered` | Tool input guardrail rejection |\n| `ToolOutputGuardrailTripwireTriggered` | Tool output guardrail rejection |\n\nThese exceptions contain both the guardrail reference and the output that triggered it, enabling detailed error handling and debugging.\n\n## Implementation Pattern\n\n### Creating a Custom Guardrail\n\n```python\nfrom agents import Guardrail, RunContextWrapper\nfrom agents.guardrail import (\n    GuardrailOutput,\n    InputGuardrailOutputData,\n    OutputGuardrailOutputData,\n)\n\nasync def my_guardrail(\n    context: RunContextWrapper,\n    input_data: InputGuardrailOutputData,\n) -> GuardrailOutput:\n    text = input_data.agents_input\n    if contains_problematic_content(text):\n        return GuardrailOutput(\n            content_filtered=True,\n            policy_name=\"my_policy\",\n            policy_version=\"1.0\",\n            content=\"Content filtered due to policy violation\",\n            behavior={\"type\": \"reject_content\", \"message\": \"Content not allowed\"},\n        )\n    return GuardrailOutput(\n        content_filtered=False,\n        policy_name=\"my_policy\",\n        policy_version=\"1.0\",\n        content=None,\n        behavior={\"type\": \"pass\"},\n    )\n\nguardrail = Guardrail(\n    guardrail_name=\"my_custom_guardrail\",\n    guardrail_function=my_guardrail,\n)\n```\n\n### Using with FunctionTool\n\n```python\nfrom agents import function_tool, ToolInputGuardrail, ToolOutputGuardrail\n\n@function_tool(\n    tool_input_guardrails=[\n        ToolInputGuardrail(guardrail_function=validate_json_input),\n    ],\n    tool_output_guardrails=[\n        ToolOutputGuardrail(guardrail_function=validate_output_schema),\n    ],\n)\ndef process_data(input: str) -> dict:\n    # Tool implementation\n    pass\n```\n\n## Best Practices\n\n1. **Defense in Depth**: Layer multiple guardrails at different checkpoints for comprehensive coverage\n2. **Fail-Safe Defaults**: Configure guardrails to fail closed (reject) rather than open (pass) when uncertain\n3. **Logging**: Enable guardrail logging for security auditing and debugging\n4. **Performance**: Keep guardrail logic lightweight to avoid introducing latency\n5. **Idempotency**: Ensure guardrails produce consistent results for the same input\n\n## See Also\n\n- [Agents Overview](../agents/overview) — General agent architecture\n- [Tools](../tools/overview) — Tool implementation and configuration\n- [Tracing](../tracing/overview) — Observability and monitoring\n- [Handoffs](../handoffs/overview) — Multi-agent handoff mechanisms\n\n---\n\n<a id='handoffs'></a>\n\n## Handoffs\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Agents as Tools](#agents-as-tools), [Run Loop and Execution](#run-loop)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n- [src/agents/handoffs/history.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/history.py)\n- [src/agents/extensions/handoff_filters.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/handoff_filters.py)\n- [src/agents/extensions/handoff_prompt.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/handoff_prompt.py)\n</details>\n\n# Handoffs\n\n## Overview\n\nHandoffs in the OpenAI Agents Python SDK enable seamless transfer of control and conversation context between different agents. When an agent determines that a task should be handled by another agent, a handoff executes the transition, optionally filtering or transforming the input data before the receiving agent begins processing.\n\nThe handoff mechanism serves as the backbone for multi-agent architectures, allowing complex workflows where specialized agents handle specific subtasks while maintaining coherent conversation state across transitions.\n\n## Core Concepts\n\n### What is a Handoff?\n\nA handoff is a structured mechanism that transfers control from one agent to another. It encapsulates:\n\n- The destination agent\n- Tool configuration for invoking the handoff\n- Optional input filtering logic\n- Optional type validation for handoff arguments\n- Enable/disable conditions\n\n资料来源：[src/agents/handoffs/__init__.py:1-100]()\n\n### The Handoff Class\n\nThe `Handoff` class is the primary abstraction for defining agent-to-agent transfers:\n\n```python\nclass Handoff(Generic[TAgent, TContext]):\n    name: str\n    description: str\n    input_json_schema: dict[str, Any]\n    on_invoke_handoff: Callable[[RunContextWrapper[Any], str], Awaitable[TAgent]]\n    agent_name: str\n    input_filter: HandoffInputFilter | None = None\n    is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[TContext]], bool] = True\n```\n\n资料来源：[src/agents/handoffs/__init__.py:100-130]()\n\n### HandoffInputData\n\nWhen a handoff is invoked, it receives and processes `HandoffInputData`:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `input_history` | `list[InputItem]` | Conversation history up to the handoff point |\n| `pre_handoff_items` | `list[RunItem]` | Run items generated before handoff |\n| `input_items` | `list[InputItem]` | Input items to pass to the next agent |\n| `new_items` | `list[RunItem]` | New items to add to the receiving agent's context |\n\n资料来源：[src/agents/handoffs/__init__.py:50-80]()\n\n## Architecture\n\n### Handoff Flow\n\n```mermaid\ngraph TD\n    A[Current Agent] -->|Determines handoff needed| B[Handoff Tool Call]\n    B --> C{is_enabled check}\n    C -->|Enabled| D[on_invoke_handoff]\n    C -->|Disabled| E[Hide from LLM]\n    D --> F[Input Filter Processing]\n    F --> G{HandoffInputData}\n    G --> H[Next Agent Context]\n    H --> I[Receiving Agent]\n    \n    J[Type Validation] -.->|if input_type provided| F\n    K[History Nesting] -.->|if nest_handoff_history enabled| G\n```\n\n### Agent Hierarchy with Handoffs\n\n```mermaid\ngraph TD\n    A[Orchestrator Agent] -->|handoff| B[Research Agent]\n    A -->|handoff| C[Writer Agent]\n    A -->|handoff| D[Review Agent]\n    B -->|handoff| E[Web Search Agent]\n    B -->|handoff| F[Data Analysis Agent]\n    C -->|handoff| D\n```\n\n## Configuration Options\n\n### Handoff Constructor Parameters\n\n| Parameter | Type | Required | Default | Description |\n|-----------|------|----------|---------|-------------|\n| `agent` | `Agent[TContext]` | Yes | - | The destination agent |\n| `name` | `str` | No | agent.name | Custom name for the handoff tool |\n| `description` | `str` | No | agent.description | Tool description shown to the model |\n| `tool_description_override` | `str` | No | None | Override the tool description |\n| `on_handoff` | `Callable` | No | None | Side effect function executed on handoff |\n| `input_type` | `type` | No | None | Type for validating handoff arguments |\n| `input_filter` | `HandoffInputFilter` | No | None | Function to filter/transform inputs |\n| `nest_handoff_history` | `bool` | No | None | Override run-level history nesting setting |\n| `is_enabled` | `bool \\| Callable` | No | True | Whether the handoff is available |\n\n资料来源：[src/agents/handoffs/__init__.py:150-200]()\n\n### Input Type Validation\n\nWhen `input_type` is provided, the model-generated JSON arguments are validated:\n\n```python\nif input_type is not None and on_handoff is None:\n    raise UserError(\"You must provide on_handoff when input_type is provided\")\n```\n\nThe `on_handoff` callback must accept two parameters for type-validated inputs:\n\n```python\nasync def on_handoff(ctx: RunContext, data: ValidatedInputType) -> Agent:\n    ...\n```\n\n资料来源：[src/agents/handoffs/__init__.py:200-220]()\n\n### Enabling/Disabling Handoffs\n\nHandoffs can be conditionally enabled using the `is_enabled` parameter:\n\n```python\n# Static boolean\nhandoff = Handoff(agent=agent, is_enabled=False)\n\n# Dynamic condition\nhandoff = Handoff(\n    agent=agent,\n    is_enabled=lambda ctx, current_agent: ctx.user_id in ADMIN_USERS\n)\n```\n\nDisabled handoffs are hidden from the LLM at runtime.\n\n资料来源：[src/agents/handoffs/__init__.py:180-190]()\n\n## Input Filtering\n\n### HandoffInputFilter\n\nThe `input_filter` function receives the entire conversation history and can modify what the next agent receives:\n\n```python\nHandoffInputFilter = Callable[\n    [HandoffInputData], HandoffInputData | Awaitable[HandoffInputData]\n]\n```\n\n### Common Filtering Patterns\n\n| Pattern | Use Case |\n|---------|----------|\n| Remove sensitive data | Strip user credentials before handoff |\n| Context summarization | Condense long conversations |\n| Tool filtering | Remove tools not needed by next agent |\n| History truncation | Keep only recent relevant items |\n\n### Example Input Filter\n\n```python\ndef filter_sensitive_inputs(data: HandoffInputData) -> HandoffInputData:\n    # Remove tool call outputs containing sensitive info\n    filtered_history = [\n        item for item in data.input_history\n        if not contains_sensitive(item)\n    ]\n    return dataclasses.replace(data, input_history=filtered_history)\n```\n\n资料来源：[src/agents/extensions/handoff_filters.py]()\n\n## History Management\n\n### Nesting Conversation History\n\nWhen `nest_handoff_history=True`, the previous agent's conversation is summarized before being passed to the next agent:\n\n```python\ndef nest_handoff_history(\n    handoff_input_data: HandoffInputData,\n    *,\n    history_mapper: HandoffHistoryMapper | None = None,\n) -> HandoffInputData:\n    \"\"\"Summarize the previous transcript for the next agent.\"\"\"\n```\n\nThis prevents context overflow and provides the new agent with a concise summary rather than full conversation history.\n\n资料来源：[src/agents/handoffs/history.py:40-60]()\n\n### Conversation History Wrappers\n\nDefault markers wrap nested conversation summaries:\n\n| Marker | Default Value |\n|--------|---------------|\n| Start | `<CONVERSATION HISTORY>` |\n| End | `</CONVERSATION HISTORY>` |\n\nThese can be customized:\n\n```python\nset_conversation_history_wrappers(\n    start=\"<PREVIOUS AGENT TRANSCRIPT>\",\n    end=\"</PREVIOUS AGENT TRANSCRIPT>\"\n)\n```\n\n资料来源：[src/agents/handoffs/history.py:20-40]()\n\n## Creating Handoffs\n\n### Basic Handoff\n\n```python\nfrom agents import Agent, Handoff, Runner\n\nagent_a = Agent(name=\"Agent A\", instructions=\"...\")\nagent_b = Agent(name=\"Agent B\", instructions=\"...\")\n\n# Create handoff\nhandoff_to_b = Handoff(name=\"transfer_to_b\", agent=agent_b)\n\n# Add to source agent\nagent_a.handoffs.append(handoff_to_b)\n```\n\n### Handoff with Callbacks\n\n```python\nasync def on_transfer_to_b(ctx: RunContext, input_data: str) -> Agent:\n    # Log the handoff\n    logger.info(f\"Handoff triggered by user: {ctx.user_id}\")\n    # Return destination agent\n    return agent_b\n\nhandoff_to_b = Handoff(\n    agent=agent_b,\n    name=\"transfer_to_b\",\n    on_handoff=on_transfer_to_b\n)\n```\n\n### Handoff with Type Validation\n\n```python\nfrom pydantic import BaseModel\n\nclass TransferData(BaseModel):\n    reason: str\n    priority: int = 1\n\nasync def handle_transfer(ctx: RunContext, data: TransferData) -> Agent:\n    if data.priority > 5:\n        return urgent_agent\n    return standard_agent\n\nhandoff = Handoff(\n    agent=standard_agent,\n    input_type=TransferData,\n    on_handoff=handle_transfer\n)\n```\n\n## Handoffs in the Run Loop\n\n### Turn Resolution with Handoffs\n\nWhen a handoff is triggered during agent execution:\n\n```mermaid\nsequenceDiagram\n    participant Agent as Current Agent\n    participant Run as Run Loop\n    participant Handoff as Handoff Handler\n    \n    Agent->>Run: Generate response with handoff tool call\n    Run->>Handoff: Process NextStepHandoff\n    Handoff->>Handoff: Validate input_type if provided\n    Handoff->>Handoff: Execute input_filter\n    Handoff->>Handoff: Call on_handoff callback\n    Handoff-->>Run: Return new agent and filtered input\n    Run->>Run: Reset current agent\n    Run->>Run: Start next turn with new agent\n```\n\n资料来源：[src/agents/run.py:200-250]()\n\n### Handoff Result Processing\n\nThe run loop handles handoff transitions:\n\n```python\nelif isinstance(turn_result.next_step, NextStepHandoff):\n    current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)\n    # Next agent starts with the nested/filtered input\n    starting_input = turn_result.original_input\n    original_input = turn_result.original_input\n    should_run_agent_start_hooks = True\n```\n\n资料来源：[src/agents/run.py:230-245]()\n\n## Prompt Integration\n\n### Handoff Tool Representation\n\nHandoffs appear as tools to the LLM with descriptions generated from the handoff configuration:\n\n```python\n# Tool name format\nf\"transfer_to_{agent_name}\"\n\n# Tool description includes\n- Handoff name\n- Agent description\n- Input schema if defined\n- Custom tool_description_override if provided\n```\n\n资料来源：[src/agents/extensions/handoff_prompt.py]()\n\n### Prompt Instructions\n\nThe system prompt can include handoff guidance:\n\n```\n- When a task matches another agent's expertise, use the handoff tool\n- Explain the reason for handoff in your response\n- Preserve relevant context during transfer\n```\n\n## Best Practices\n\n### Design Principles\n\n1. **Clear Agent Specialization**: Each agent should have a distinct responsibility\n2. **Minimal Handoff Arguments**: Pass only essential data, not entire conversations\n3. **Meaningful Handoff Names**: Use descriptive names that indicate the destination\n4. **Appropriate History Management**: Enable nesting for long conversations\n\n### Error Handling\n\n| Scenario | Recommended Approach |\n|----------|---------------------|\n| Handoff to unavailable agent | Check `is_enabled` before showing to model |\n| Invalid input type | Use Pydantic validation with clear error messages |\n| Filter failure | Return original input with warning |\n\n### Performance Considerations\n\n- Avoid complex filters that run synchronously on large histories\n- Use `is_enabled` callbacks to prevent unnecessary tool calls\n- Consider disabling history nesting for high-frequency handoffs\n\n## Related Components\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| `Handoff` class | `src/agents/handoffs/__init__.py` | Core handoff definition |\n| `HandoffInputData` | `src/agents/handoffs/__init__.py` | Input data structure |\n| `nest_handoff_history` | `src/agents/handoffs/history.py` | History summarization |\n| `HandoffInputFilter` | `src/agents/extensions/handoff_filters.py` | Input filtering utilities |\n| Handoff prompt integration | `src/agents/extensions/handoff_prompt.py` | Prompt rendering |\n\n## Summary\n\nHandoffs provide a robust mechanism for multi-agent orchestration in the OpenAI Agents Python SDK. Key capabilities include:\n\n- **Structured Transfer**: Defined handoff contracts with optional type validation\n- **Flexible Input Management**: Filtering and transformation before agent handoff\n- **History Control**: Nesting or truncating conversation context\n- **Conditional Execution**: Enable/disable based on runtime conditions\n- **Callback Support**: Side effects and logging during transitions\n\nThese mechanisms enable complex agent workflows while maintaining clean separation of concerns and manageable context sizes.\n\n---\n\n<a id='agents-as-tools'></a>\n\n## Agents as Tools\n\n### 相关页面\n\n相关主题：[Handoffs](#handoffs), [Agents](#agents)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [examples/agent_patterns/agents_as_tools.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools.py)\n- [examples/agent_patterns/agents_as_tools_conditional.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools_conditional.py)\n- [examples/agent_patterns/agents_as_tools_structured.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools_structured.py)\n- [src/agents/agent.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent.py)\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/extensions/visualization.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/visualization.py)\n- [examples/sandbox/handoffs.py](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/handoffs.py)\n</details>\n\n# Agents as Tools\n\nAgents as Tools is a powerful architectural pattern in the openai-agents-python library that enables one agent to be invoked as a callable tool by another agent. This pattern allows for sophisticated multi-agent orchestration where specialized agents can be dynamically called with specific inputs, returning structured results to the calling agent.\n\n## Overview\n\nIn the traditional agent architecture, agents operate as standalone units that receive input, execute tasks, and return results. The \"Agents as Tools\" pattern extends this by wrapping agents inside function tool abstractions, enabling:\n\n- **Dynamic Agent Invocation**: Agents can be called like functions within other agents' workflows\n- **Structured Inputs and Outputs**: Typed interfaces ensure consistent data exchange between agents\n- **Conditional Execution**: Agents can be invoked based on specific conditions or input patterns\n- **Parallel Tool Calls**: Multiple agents can be called simultaneously as tools\n- **Nested Architectures**: Complex hierarchies of agents calling sub-agents as tools\n\nThis pattern is particularly valuable for building research assistants, customer service systems, and specialized workflow engines where different capabilities need to be composed dynamically.\n\n## Architecture\n\n```mermaid\ngraph TD\n    subgraph \"Primary Agent\"\n        PA[Main Agent]\n        PA -->|has tools| T1[Agent-as-Tool 1]\n        PA -->|has tools| T2[Agent-as-Tool 2]\n        PA -->|has tools| Tn[Agent-as-Tool N]\n    end\n    \n    subgraph \"Wrapped Agents\"\n        T1 -->|wraps| A1[Specialized Agent 1]\n        T2 -->|wraps| A2[Specialized Agent 2]\n        Tn -->|wraps| An[Specialized Agent N]\n    end\n    \n    A1 -->|returns| T1\n    A2 -->|returns| T2\n    An -->|returns| Tn\n    T1 -->|tool result| PA\n    T2 -->|tool result| PA\n    Tn -->|tool result| PA\n```\n\n### Core Components\n\n| Component | Role | Location |\n|-----------|------|----------|\n| `Agent` | Base agent with instructions, tools, handoffs | `src/agents/agent.py` |\n| `FunctionTool` | Wraps callable functions for agent use | Tool infrastructure |\n| `Runner` | Executes agents and manages tool calls | `src/agents/run.py` |\n| `Handoff` | Enables agent-to-agent transfers | `src/agents/handoffs/__init__.py` |\n\n## Implementation Patterns\n\n### Basic Agent-to-Tool Conversion\n\nThe simplest form of this pattern converts an existing agent into a callable tool:\n\n```python\nfrom agents import Agent, function_tool\n\n# Create a specialized agent\nsearch_agent = Agent(\n    name=\"web_searcher\",\n    instructions=\"You are a web search expert. Search for the given query and summarize results.\",\n    tools=[web_search_tool],\n)\n\n# Convert to a function tool that the primary agent can use\n@function_tool\ndef search_tool(query: str) -> str:\n    \"\"\"Search the web for information.\"\"\"\n    result = Runner.run(search_agent, input=query)\n    return result.final_output\n```\n\n### AgentTool with Structured Output\n\nFor more sophisticated scenarios, agents can be wrapped with explicit input/output schemas:\n\n```python\nfrom agents import Agent\nfrom pydantic import BaseModel\n\nclass SearchResult(BaseModel):\n    title: str\n    url: str\n    summary: str\n\nsearch_agent = Agent(\n    name=\"structured_searcher\",\n    instructions=\"Search for information and return structured results.\",\n    output_type=SearchResult,\n)\n```\n\n### Conditional Agent Invocation\n\nAgents can be configured to only be available under certain conditions:\n\n```python\nfrom agents import Agent\n\nadmin_agent = Agent(\n    name=\"admin_panel\",\n    instructions=\"Handle administrative tasks.\",\n)\n\n# Conditional enabling based on user role\ndef is_admin(context):\n    return context.user_role == \"admin\"\n\nadmin_agent.is_enabled = is_admin\n```\n\n## Usage Examples\n\n### Research Assistant Pattern\n\nA common use case is a research bot with specialized sub-agents:\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Planner as Planner Agent\n    participant Search as Search Agent (Tool)\n    participant Writer as Writer Agent\n    \n    User->>Planner: \"Research topic: AI trends\"\n    Planner->>Planner: Generate search queries\n    Planner->>Search: tool_call(search_queries[0])\n    Planner->>Search: tool_call(search_queries[1])\n    Planner->>Search: tool_call(search_queries[n])\n    Search-->>Planner: SearchResult\n    Planner->>Writer: Pass summaries\n    Writer-->>User: Final report\n```\n\n### Example: Agent Patterns in Code\n\nThe repository includes several agent pattern examples demonstrating this functionality:\n\n**Basic Pattern** (`examples/agent_patterns/agents_as_tools.py`):\n```python\n# Agents are wrapped as tools and called by a primary agent\nprimary_agent = Agent(\n    name=\"orchestrator\",\n    instructions=\"Coordinate specialized agents to answer user queries.\",\n    tools=[search_agent_as_tool, code_agent_as_tool],\n)\n```\n\n**Conditional Pattern** (`examples/agent_patterns/agents_as_tools_conditional.py`):\n```python\n# Agents are conditionally available based on context\nif user.is_premium:\n    primary_agent.tools.append(premium_agent_tool)\n```\n\n**Structured Pattern** (`examples/agent_patterns/agents_as_tools_structured.py`):\n```python\n# Agents return structured data types\n@function_tool\ndef get_weather(location: str) -> WeatherData:\n    \"\"\"Get weather for a location.\"\"\"\n    return Runner.run(weather_agent, input=location)\n```\n\n## Configuration Options\n\n### Tool Metadata Configuration\n\nWhen converting an agent to a tool, you can override the default tool behavior:\n\n| Parameter | Type | Purpose |\n|-----------|------|---------|\n| `name` | `str` | Override the tool name shown to the LLM |\n| `description` | `str` | Human-readable description of what the tool does |\n| `input_type` | `Type[BaseModel]` | Pydantic model for input validation |\n| `output_type` | `Type[BaseModel]` | Pydantic model for output schema |\n| `is_enabled` | `bool \\| Callable` | Condition for tool availability |\n\n### Agent Configuration\n\nAgents used as tools support standard agent parameters:\n\n| Parameter | Description |\n|-----------|-------------|\n| `instructions` | System prompt for the agent |\n| `tools` | Additional tools available to the agent |\n| `handoffs` | Agents the sub-agent can transfer to |\n| `output_type` | Expected output type |\n| `model` | Specific model to use |\n\n## Execution Flow\n\n```mermaid\nflowchart LR\n    A[Primary Agent] -->|decides to call| B[Agent-as-Tool]\n    B -->|parses input| C{Input Validation}\n    C -->|valid| D[Execute Wrapped Agent]\n    C -->|invalid| E[Return Error]\n    D -->|run agent| F[Runner.run]\n    F -->|collect results| G[Format Output]\n    G -->|return| B\n    B -->|tool result| A\n```\n\n## Integration with Handoffs\n\nThe Agents as Tools pattern complements the handoff mechanism:\n\n| Aspect | Agents as Tools | Handoffs |\n|--------|-----------------|----------|\n| Control Flow | Agent calls tool, waits for result | Agent transfers control completely |\n| State | Shared context | Fresh context for new agent |\n| Use Case | Parallel specialized tasks | Sequential role switches |\n| Return | Structured result | Handoff message |\n\n**资料来源**：[src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n\n## Best Practices\n\n1. **Clear Tool Descriptions**: Provide explicit descriptions so the LLM knows when to invoke the agent\n2. **Typed Interfaces**: Use Pydantic models for input/output to ensure type safety\n3. **Error Handling**: Wrap agent executions in try-catch to handle failures gracefully\n4. **Context Management**: Pass relevant context to sub-agents without overwhelming them\n5. **Conditional Enabling**: Use `is_enabled` to control access based on user permissions\n\n## Related Patterns\n\n- **Handoffs**: Complete agent-to-agent transfer for distinct roles\n- **Multi-Agent Orchestration**: Coordinated multi-agent workflows\n- **Sandbox Agents**: Isolated execution environments for agents\n- **Guardrails**: Input/output validation for agent tool calls\n\n**资料来源**：[examples/sandbox/handoffs.py](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/handoffs.py)\n\n---\n\n<a id='run-loop'></a>\n\n## Run Loop and Execution\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Sessions and Memory](#sessions)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/run_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_config.py)\n- [src/agents/result.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/result.py)\n- [src/agents/run_internal/turn_resolution.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/turn_resolution.py)\n- [src/agents/items.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/items.py)\n</details>\n\n# Run Loop and Execution\n\nThe Run Loop and Execution system is the core engine of the openai-agents-python SDK. It orchestrates the interaction between agents, language models, tools, and external systems through an iterative turn-based processing architecture.\n\n## Overview\n\nThe execution model follows a **turn-based loop** where each turn consists of:\n\n1. **Turn Preparation** - Setting up context, hooks, and session state\n2. **Model Invocation** - Calling the language model with the current input\n3. **Response Processing** - Parsing and validating model outputs\n4. **Tool Execution** - Running any tools or side effects requested by the model\n5. **Turn Resolution** - Determining the next step (continue, handoff, or finish)\n\n资料来源：[src/agents/run.py:1-50]()\n\n## Architecture Components\n\n### Core Execution Flow\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Run Loop Entry]\n    B --> C[Turn Preparation]\n    C --> D[Call Model]\n    D --> E{Response Type?}\n    E -->|Tool Calls| F[Execute Tools]\n    E -->|Handoff| G[Switch Agent]\n    E -->|Message| H[Finalize Output]\n    F --> C\n    G --> C\n    H --> I[Return RunResult]\n```\n\n### Key Modules\n\n| Module | Purpose | Key Classes/Functions |\n|--------|---------|----------------------|\n| `run.py` | Main entry point | `run()`, `run_sync()` |\n| `run_loop.py` | Core loop logic | `run_loop()` |\n| `turn_preparation.py` | Turn setup | Input filtering, hook invocation |\n| `turn_resolution.py` | Response handling | Tool result processing, output finalization |\n| `tool_execution.py` | Tool runner | `execute_tools_and_side_effects()` |\n| `streaming.py` | Streaming support | Stream handlers |\n\n资料来源：[src/agents/run.py:1-30]()\n\n## Run Configuration\n\n### RunOptions\n\nThe `RunOptions` TypedDict defines all parameters for running an agent:\n\n```python\nclass RunOptions(TypedDict, Generic[TContext]):\n    context: NotRequired[TContext | None]\n    max_turns: NotRequired[int | None]\n    hooks: NotRequired[RunHooks[TContext] | None]\n    run_config: NotRequired[RunConfig | None]\n    previous_response_id: NotRequired[str | None]\n    auto_previous_response_id: NotRequired[bool]\n    conversation_id: NotRequired[str | None]\n    session: NotRequired[Session | None]\n    error_handlers: NotRequired[RunErrorHandlers[TContext] | None]\n```\n\n### Configuration Options\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `max_turns` | `int \\| None` | `None` | Maximum turns; `None` disables limit |\n| `context` | `TContext \\| None` | `None` | Custom context object |\n| `hooks` | `RunHooks[TContext]` | `None` | Lifecycle hooks |\n| `run_config` | `RunConfig` | `None` | Runtime configuration |\n| `session` | `Session` | `None` | Session for state persistence |\n| `error_handlers` | `RunErrorHandlers` | `None` | Error callback handlers |\n\n资料来源：[src/agents/run_config.py:50-75]()\n\n## Turn Processing\n\n### Turn Resolution\n\nThe `turn_resolution.py` module handles processing model responses after tool execution:\n\n```python\ntool_final_output = await _maybe_finalize_from_tool_results(\n    public_agent=public_agent,\n    original_input=original_input,\n    new_response=new_response,\n    pre_step_items=pre_step_items,\n    new_step_items=new_step_items,\n    function_results=function_results,\n    hooks=hooks,\n    context_wrapper=context_wrapper,\n    tool_input_guardrail_results=tool_input_guardrail_results,\n    tool_output_guardrail_results=tool_output_guardrail_results,\n)\n```\n\n### Message Output Extraction\n\nThe `ItemHelpers` class provides utilities for extracting content from model responses:\n\n```python\n@classmethod\ndef extract_refusal(cls, message: TResponseOutputItem) -> str | None:\n    \"\"\"Extracts refusal content from a message, if any.\"\"\"\n    if not isinstance(message, ResponseOutputMessage):\n        return None\n    refusal = \"\"\n    for content_item in message.content:\n        if isinstance(content_item, ResponseOutputRefusal):\n            refusal += content_item.refusal or \"\"\n    return refusal or None\n```\n\n### Refusal Handling\n\nWhen the model refuses to respond, a `ModelRefusalError` is raised:\n\n```python\nif refusal:\n    refusal_error = ModelRefusalError(refusal)\n    run_error_data = build_run_error_data(...)\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:25-45]()\n\n## Agent Handoffs\n\n### Handoff Processing\n\nThe run loop handles agent handoffs through the `NextStepHandoff` type:\n\n```python\nelif isinstance(turn_result.next_step, NextStepHandoff):\n    current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)\n    if run_state is not None:\n        run_state._current_agent = current_agent\n    starting_input = turn_result.original_input\n    original_input = turn_result.original_input\n    current_span.finish(reset_current=True)\n    should_run_agent_start_hooks = True\n```\n\n### Loop Continuation\n\nFor cases requiring another iteration without switching agents:\n\n```python\nelif isinstance(turn_result.next_step, NextStepRunAgain):\n    await save_turn_items_if_needed(\n        session=session,\n        run_state=run_state,\n        session_persistence_enabled=session_persistence_enabled,\n        items=session_items_for_turn(turn_result),\n        response_id=turn_result.model_response.response_id,\n        store=store_setting,\n    )\n    continue\n```\n\n资料来源：[src/agents/run.py:150-180]()\n\n## Result Types\n\n### RunResult Structure\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `last_agent` | `Agent` | Final agent that produced output |\n| `new_items` | `list[RunItem]` | All items from the run |\n| `final_output` | `Response` | Final model response |\n| `raw_responses` | `list[RawResponsesFromModel]` | Raw model outputs |\n\n### Tool Output Handling\n\nTool outputs are processed through multiple stages:\n\n1. **Pre-step items** - State before tool execution\n2. **New step items** - State after tool execution\n3. **Function results** - Structured tool call results\n\nThe system tracks tool activity without messages using:\n\n```python\nhas_tool_activity_without_message = not message_items and bool(\n    processed_response.tools_used\n)\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:35-40]()\n\n## Input Processing\n\n### Input Conversion\n\nThe `ItemHelpers` class handles input normalization:\n\n```python\n@classmethod\ndef input_to_new_input_list(\n    cls, input: str | list[TResponseInputItem]\n) -> list[TResponseInputItem]:\n    \"\"\"Converts a string or list of input items into a list of input items.\"\"\"\n    if isinstance(input, str):\n        return [{\"content\": input, \"role\": \"user\"}]\n    return cast(list[TResponseInputItem], _to_dump_compatible(input))\n```\n\n### Text Extraction\n\nConcatenate all text content from message output items:\n\n```python\n@classmethod\ndef text_message_outputs(cls, items: list[RunItem]) -> str:\n    \"\"\"Concatenates all the text content from a list of message output items.\"\"\"\n    text = \"\"\n    for item in items:\n        if isinstance(item, MessageOutputItem):\n            text += cls.text_message_output(item)\n    return text\n```\n\n资料来源：[src/agents/items.py:60-90]()\n\n## Error Handling\n\n### Error Flow\n\n```mermaid\ngraph TD\n    A[Error Occurs] --> B{Error Type?}\n    B -->|Refusal| C[ModelRefusalError]\n    B -->|Tool Failure| D[ToolExecutionError]\n    B -->|Max Turns| E[MaxTurnsExceededError]\n    B -->|Other| F[Generic Error Handler]\n    C --> G[Build Error Data]\n    D --> G\n    E --> G\n    F --> G\n    G --> H[Return Error Result]\n```\n\n### Error Handlers Configuration\n\nCustom error handlers can be registered per error kind:\n\n```python\nerror_handlers: RunErrorHandlers[TContext] | None\n```\n\nThe system supports typed error handling where handlers are keyed by error category.\n\n资料来源：[src/agents/run_config.py:60-65]()\n\n## Session Persistence\n\n### Save Turn Items\n\nThe run loop persists state after each turn when session is enabled:\n\n```python\nawait save_turn_items_if_needed(\n    session=session,\n    run_state=run_state,\n    session_persistence_enabled=session_persistence_enabled,\n    items=session_items_for_turn(turn_result),\n    response_id=turn_result.model_response.response_id,\n    store=store_setting,\n)\n```\n\n### Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `session` | `Session \\| None` | Active session instance |\n| `run_state` | `RunState \\| None` | Current run state |\n| `session_persistence_enabled` | `bool` | Whether persistence is active |\n| `items` | `list[RunItem]` | Items to persist |\n| `response_id` | `str` | Model response ID |\n| `store` | `StoreSetting` | Storage configuration |\n\n资料来源：[src/agents/run.py:160-170]()\n\n## Streaming Support\n\nThe system supports streaming model outputs through the streaming module. Streaming is configured via `RunConfig` and allows real-time output handling without waiting for complete responses.\n\n## Lifecycle Hooks\n\n### Available Hooks\n\n| Hook | Trigger | Purpose |\n|------|---------|---------|\n| `on_agent_start` | Agent turn begins | Initialize agent-specific state |\n| `on_agent_end` | Agent turn ends | Cleanup or logging |\n| `on_tool_call` | Tool invocation | Logging or monitoring |\n| `on_handoff` | Agent switch | Track transitions |\n\nHooks receive `RunContextWrapper` and relevant context data, enabling deep customization of the execution flow.\n\n资料来源：[src/agents/run_config.py:35-45]()\n\n## Summary\n\nThe Run Loop and Execution system provides:\n\n- **Iterative Processing**: Turn-based model interaction with tool execution\n- **Flexible Configuration**: Extensive options via `RunOptions` and `RunConfig`\n- **Agent Orchestration**: Seamless handoff between agents\n- **Error Resilience**: Typed error handlers and refusal detection\n- **Session Management**: Persistent state across turns\n- **Lifecycle Hooks**: Customization at every execution stage\n\nThe architecture prioritizes extensibility, allowing developers to hook into any phase of execution while maintaining a clear, predictable flow from input to final output.\n\n---\n\n<a id='sessions'></a>\n\n## Sessions and Memory\n\n### 相关页面\n\n相关主题：[Run Loop and Execution](#run-loop)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md)\n- [src/agents/sandbox/capabilities/memory.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/capabilities/memory.py)\n- [src/agents/extensions/memory/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/memory/__init__.py)\n- [src/agents/sandbox/session/sinks.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/session/sinks.py)\n- [src/agents/sandbox/session/base_sandbox_session.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/session/base_sandbox_session.py)\n- [src/agents/handoffs/history.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/history.py)\n- [src/agents/sandbox/errors.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/errors.py)\n</details>\n\n# Sessions and Memory\n\n## Overview\n\nThe Sessions and Memory system in the openai-agents-python library provides persistent conversation state management for AI agents. This system enables agents to maintain context across multiple interactions, store conversation history, and access previously learned information through a flexible session abstraction layer.\n\nThe architecture is built around a **protocol-based design** that allows different storage backends while maintaining a consistent interface. Sessions track conversation items, manage agent handoffs, and enable memory persistence for sandboxed agent environments.\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-8]()\n\n## Architecture\n\n### Session Protocol\n\nThe core of the session system is the `Session` protocol, which defines the contract for all session implementations. This allows developers to swap storage backends without changing application code.\n\n```mermaid\ngraph TD\n    A[Agent Run] --> B[Session Protocol]\n    B --> C[SQLiteSession]\n    B --> D[AsyncSQLiteSession]\n    B --> E[AdvancedSQLiteSession]\n    B --> F[EncryptedSession]\n    B --> G[RedisSession]\n    B --> H[SQLAlchemySession]\n    B --> I[MongoDBSession]\n    B --> J[DaprSession]\n```\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-30]()\n\n### Memory Capability in Sandboxes\n\nSandbox agents have a dedicated memory capability that provides context from previous sessions. The `Memory` class in the sandbox capabilities layer enables agents to read and write persistent memory.\n\n```mermaid\ngraph LR\n    A[SandboxAgent] -->|requires| B[Memory Capability]\n    B --> C[read: MemoryReadConfig]\n    B --> D[generate: MemoryGenerateConfig]\n    B --> E[layout: MemoryLayout]\n```\n\nThe memory system requires either `read` or `generate` configuration to be meaningful. When `read.live_update` is enabled, the capability requires both `filesystem` and `shell` capabilities; otherwise, only `shell` is required.\n\n资料来源：[src/agents/sandbox/capabilities/memory.py:1-30]()\n\n## Session Persistence Layer\n\n### Session Lifecycle\n\nSessions manage the persistence of conversation state through a structured workflow:\n\n```mermaid\nsequenceDiagram\n    participant Agent as Agent Run\n    participant Session as Session Store\n    participant Sandbox as Sandbox Session\n    \n    Agent->>Session: Create/Resume Session\n    Session-->>Agent: Session ID\n    Agent->>Sandbox: Initialize Workspace\n    loop Turn Processing\n        Agent->>Sandbox: Execute Tool\n        Sandbox-->>Agent: Tool Result\n        Agent->>Session: Save Turn Items\n        Session-->>Agent: Acknowledge\n    end\n    Agent->>Session: Finalize Session\n```\n\n### Turn Item Persistence\n\nDuring agent execution, each turn generates items that must be persisted:\n\n- `input`: Current segment user input\n- `generated_items`: Memory-relevant assistant and tool items\n- `terminal_metadata`: Completion/failure state\n- `final_output`: Final segment output when available\n\n资料来源：[src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md:1-20]()\n\n## Memory Rollout Extraction\n\nWhen an agent session completes, the system can extract a structured memory summary for future reference. This process is handled by the rollout extraction prompt system.\n\n### JSON Output Schema\n\nThe extraction produces JSON with three fields:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `raw_memory` | string | Raw memory content from the session |\n| `rollout_summary` | string | Generated summary of the session |\n| `rollout_slug` | string | Short identifier (empty string if unknown) |\n\n资料来源：[src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md:1-25]()\n\n### Memory Summary Path\n\nThe memory system reads summaries from a configurable path within the sandbox workspace:\n\n```\nmemory_summary_path = Path(layout.memories_dir) / \"memory_summary.md\"\n```\n\nThe memory summary is truncated to a maximum token limit (`_MEMORY_SUMMARY_MAX_TOKENS`) to ensure efficient processing.\n\n资料来源：[src/agents/sandbox/capabilities/memory.py:50-65]()\n\n## Workspace Sink System\n\nThe `WorkspaceSink` class manages buffered writes to the sandbox workspace, providing a layer between agent operations and persistent storage.\n\n### Flush Strategy\n\nThe sink implements intelligent flushing based on several conditions:\n\n```mermaid\ngraph TD\n    A[Should Flush?] --> B{Seen count % flush_every == 0}\n    A --> C{Operation: persist_workspace start}\n    A --> D{Operation: stop}\n    A --> E{Operation: shutdown start}\n    B -->|Yes| F[Flush to workspace]\n    C -->|Yes| F\n    D -->|Yes| F\n    E -->|Yes| F\n    B -->|No| G{Check running state}\n    G -->|Running| F\n    G -->|Not running| H[Defer flush]\n```\n\nFlush conditions include:\n- Periodic flush based on event count\n- Explicit persist workspace operations\n- Session stop and shutdown events\n\n资料来源：[src/agents/sandbox/session/sinks.py:1-40]()\n\n### Workspace Persistence\n\nThe sink handles reading existing outbox content before writing new data, ensuring append-style semantics for workspace files. If no existing outbox is found, it marks the outbox as loaded and proceeds with new writes.\n\n资料来源：[src/agents/sandbox/session/sinks.py:60-85]()\n\n## Error Handling\n\nThe session system defines specific error types for workspace operations:\n\n### Error Hierarchy\n\n| Error Class | Code | Purpose |\n|-------------|------|---------|\n| `WorkspaceIOError` | - | Base class for workspace read/write errors |\n| `ApplyPatchPathError` | `APPLY_PATCH_INVALID_PATH` | Invalid path (absolute, escape root, or empty) |\n| `ApplyPatchDiffError` | - | Malformed patch diff |\n| `ExecNonZeroError` | - | Non-zero exit code from exec operations |\n| `InvalidManifestPathError` | - | Path resolution failed in manifest context |\n\n### Path Validation\n\nThe system validates relative paths to prevent directory traversal attacks:\n\n```python\ndef _validate_relative_path(*, name: str, path: Path) -> None:\n    if path.is_absolute():\n        raise ValueError(f\"{name} must be relative\")\n    if \"..\" in path.parts:\n        raise ValueError(f\"{name} must not escape root\")\n    if path.parts in [(), (\".\",)]:\n        raise ValueError(f\"{name} must be non-empty\")\n```\n\n资料来源：[src/agents/sandbox/errors.py:1-50]()\n\n## Session Handoff History\n\nWhen agents hand off to other agents, the system can summarize conversation history for the receiving agent. This is managed by the handoff history module.\n\n### History Normalization\n\nThe system normalizes input history and flattens nested messages before creating summaries. Items like `ToolApprovalItem` are filtered out as they shouldn't be forwarded.\n\n```mermaid\ngraph LR\n    A[Handoff Input] --> B[Normalize History]\n    B --> C[Flatten Nested Messages]\n    C --> D[Filter Tool Approvals]\n    D --> E[Convert to Plain Inputs]\n    E --> F[Generate Transcript Summary]\n```\n\n资料来源：[src/agents/handoffs/history.py:1-60]()\n\n### History Markers\n\nThe conversation history uses customizable markers for wrapping summaries:\n\n| Variable | Default |\n|----------|---------|\n| `_conversation_history_start` | `<CONVERSATION HISTORY>` |\n| `_conversation_history_end` | `</CONVERSATION HISTORY>` |\n\nThese can be overridden at runtime using `set_conversation_history_wrappers()`.\n\n资料来源：[src/agents/handoffs/history.py:1-50]()\n\n## Extension Memory Backends\n\nThe library includes several optional session backends that require additional dependencies:\n\n### Available Backends\n\n| Backend | Package | Features |\n|---------|---------|----------|\n| `SQLiteSession` | Built-in | Basic SQLite persistence |\n| `AsyncSQLiteSession` | Built-in | Async SQLite operations |\n| `AdvancedSQLiteSession` | Built-in | Advanced SQLite features |\n| `EncryptedSession` | `cryptography` | Encryption at rest |\n| `RedisSession` | `redis` | Distributed session management |\n| `SQLAlchemySession` | `sqlalchemy` | ORM integration |\n| `MongoDBSession` | `mongodb` | Document store backend |\n| `DaprSession` | `dapr` | Dapr state store integration |\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-50]()\n\n### Lazy Loading\n\nExtensions use lazy imports to avoid requiring all dependencies when not needed:\n\n```python\n_LAZY_EXPORTS: dict[str, tuple[str, tuple[str, str] | None]] = {\n    \"EncryptedSession\": (\".encrypt_session\", (\"cryptography\", \"encrypt\")),\n    \"RedisSession\": (\".redis_session\", (\"redis\", \"redis\")),\n    ...\n}\n```\n\nThis pattern ensures that optional dependencies are only loaded when the specific backend is used.\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-50]()\n\n## Configuration\n\n### Session Settings\n\nSessions are configured through `SessionSettings` which control:\n\n- Storage backend selection\n- Connection parameters\n- Persistence strategies\n- Compaction policies (for OpenAI responses backend)\n\n### Memory Layout\n\nFor sandbox memory, the `MemoryLayout` class specifies directory structure:\n\n| Setting | Description |\n|---------|-------------|\n| `memories_dir` | Directory for stored memories |\n| `sessions_dir` | Directory for session data |\n\nBoth paths must be relative to the sandbox workspace root to prevent escape vulnerabilities.\n\n资料来源：[src/agents/sandbox/capabilities/memory.py:20-35]()\n\n## Usage Patterns\n\n### Basic Session Usage\n\n```python\nfrom agents.memory import SQLiteSession\n\nsession = SQLiteSession(session_id=\"user-123\")\nawait session.initialize()\n\n# Run agent with session\nresult = await Runner.run(agent, input, session=session)\n\n# Session automatically persists turn items\n```\n\n### Sandbox Memory Setup\n\n```python\nfrom agents.sandbox.capabilities import Memory, MemoryReadConfig, MemoryLayout\n\nmemory = Memory(\n    read=MemoryReadConfig(live_update=True),\n    layout=MemoryLayout(memories_dir=\"memory\", sessions_dir=\"sessions\"),\n    run_as=\"root\"\n)\n```\n\n### Resume from Session\n\n```python\n# Resume a previous session\nsession = SQLiteSession(session_id=\"user-123\", resume=True)\n\n# Continue the conversation\nresult = await Runner.run(agent, input, session=session)\n```\n\n## Best Practices\n\n1. **Path Validation**: Always use relative paths for memory directories to prevent sandbox escape vulnerabilities.\n\n2. **Session Initialization**: Check `session.is_initialized()` before running agent logic.\n\n3. **Error Handling**: Catch specific session errors rather than generic exceptions for better recovery.\n\n4. **Turn Item Management**: Let the session system manage persistence automatically through the `save_turn_items_if_needed()` function.\n\n5. **Live Update Trade-offs**: Enable `live_update` only when agents need real-time file system access; otherwise, rely on shell-only mode for better isolation.\n\n6. **Extension Dependencies**: Use lazy-loading backends to minimize startup time and avoid unnecessary dependency loading.\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：openai/openai-agents-python\n\n摘要：发现 24 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：身份坑 - 仓库名和安装名不一致。\n\n## 1. 身份坑 · 仓库名和安装名不一致\n\n- 严重度：medium\n- 证据强度：runtime_trace\n- 发现：仓库名 `openai-agents-python` 与安装入口 `openai-agents` 不完全一致。\n- 对用户的影响：用户照着仓库名搜索包或照着包名找仓库时容易走错入口。\n- 建议检查：在 npm/PyPI/GitHub 上确认包名映射和官方 README 说明。\n- 复现命令：`pip install openai-agents`\n- 防护动作：页面必须同时展示 repo 名和真实安装入口，避免用户搜索错包。\n- 证据：identity.distribution | github_repo:946380199 | https://github.com/openai/openai-agents-python | repo=openai-agents-python; install=openai-agents\n\n## 2. 配置坑 · 来源证据：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_d867c75f80af49c9968398851ff8bf6a | https://github.com/openai/openai-agents-python/issues/3346 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 配置坑 · 来源证据：Clarify whether retry-after delays should respect retry max_delay\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Clarify whether retry-after delays should respect retry max_delay\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_f486d2247bf24df8bbc7a2bd6fddbd65 | https://github.com/openai/openai-agents-python/issues/3266 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 4. 配置坑 · 来源证据：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API reject…\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API rejects it as invalid\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_d6bad5c23bf3457eb546c22a1636cc26 | https://github.com/openai/openai-agents-python/issues/3268 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 5. 配置坑 · 来源证据：Tracing shutdown cannot interrupt exporter retry backoff\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Tracing shutdown cannot interrupt exporter retry backoff\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e1ceae098cf84c8aafae7082b13c5345 | https://github.com/openai/openai-agents-python/issues/3354 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 6. 配置坑 · 来源证据：v0.15.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.2\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_b73472b5ae90447199984775aacdca67 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.2 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 7. 配置坑 · 来源证据：v0.15.3\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.3\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_7e05a382001a4d07b74eda1e1316320b | https://github.com/openai/openai-agents-python/releases/tag/v0.15.3 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 8. 配置坑 · 来源证据：v0.16.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.16.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_44335088ff52486e9f2f41f72a274c35 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 9. 配置坑 · 来源证据：v0.17.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.17.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_86b81f310a6e45feadc65196a057b23b | https://github.com/openai/openai-agents-python/releases/tag/v0.17.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 10. 能力坑 · 来源证据：v0.15.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：v0.15.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4c70d563ac704aeaa14b8e2c49976bc5 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 11. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:946380199 | https://github.com/openai/openai-agents-python | README/documentation is current enough for a first validation pass.\n\n## 12. 运行坑 · 来源证据：v0.14.8\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：v0.14.8\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a31947cfee3a4299923f7714bfb54f42 | https://github.com/openai/openai-agents-python/releases/tag/v0.14.8 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 13. 维护坑 · 来源证据：AdvancedSQLiteSession.add_items can report success after structure metadata failure\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：AdvancedSQLiteSession.add_items can report success after structure metadata failure\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0fed2dd63d55400d9e0d9adaf08570e5 | https://github.com/openai/openai-agents-python/issues/3348 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 14. 维护坑 · 来源证据：Chat Completions converter can send empty tool output for non-text results\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：Chat Completions converter can send empty tool output for non-text results\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_34a35e920a01467e957cdd59b4179cc1 | https://github.com/openai/openai-agents-python/issues/3310 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 15. 维护坑 · 来源证据：v0.15.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：v0.15.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_33cd0193aea84f9b82b15a02098d85cd | https://github.com/openai/openai-agents-python/releases/tag/v0.15.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 16. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | last_activity_observed missing\n\n## 17. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:946380199 | https://github.com/openai/openai-agents-python | no_demo; severity=medium\n\n## 18. 安全/权限坑 · 存在安全注意事项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：No sandbox install has been executed yet; downstream must verify before user use.\n- 对用户的影响：用户安装前需要知道权限边界和敏感操作。\n- 建议检查：转成明确权限清单和安全审查提示。\n- 防护动作：安全注意事项必须面向用户前置展示。\n- 证据：risks.safety_notes | github_repo:946380199 | https://github.com/openai/openai-agents-python | No sandbox install has been executed yet; downstream must verify before user use.\n\n## 19. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:946380199 | https://github.com/openai/openai-agents-python | no_demo; severity=medium\n\n## 20. 安全/权限坑 · 来源证据：Proposal: per-run BudgetGuard for token / request / cost limits (follow-up to #2848)\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Proposal: per-run BudgetGuard for token / request / cost limits (follow-up to #2848)\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_00884163bb274aecb62eeff18df12634 | https://github.com/openai/openai-agents-python/issues/3353 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 21. 安全/权限坑 · 来源证据：v0.16.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：v0.16.0\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a9d11d6b8fd24b22882ee03998b45d63 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 22. 安全/权限坑 · 来源证据：v0.17.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：v0.17.1\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0d47be3955c747baadea812c5f4c6487 | https://github.com/openai/openai-agents-python/releases/tag/v0.17.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 23. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | issue_or_pr_quality=unknown\n\n## 24. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | release_recency=unknown\n\n<!-- canonical_name: openai/openai-agents-python; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "openai-agents-python",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:946380199",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/openai/openai-agents-python"
        },
        {
          "evidence_id": "art_6011864ffb544ed1b0e793253ce40dd1",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/openai/openai-agents-python#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "openai-agents-python 说明书",
      "toc": [
        "https://github.com/openai/openai-agents-python 项目说明书",
        "目录",
        "OpenAI Agents SDK Overview",
        "Introduction",
        "Architecture Overview",
        "Agent System",
        "Handoffs System",
        "Turn Resolution",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "4bd459e403ac826c87b17fef8ffcbdf42a70b09a",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "pyproject.toml",
      "README.md",
      "uv.lock",
      "docs/tracing.md",
      "docs/tools.md",
      "docs/repl.md",
      "docs/human_in_the_loop.md",
      "docs/index.md",
      "docs/context.md",
      "docs/quickstart.md",
      "docs/streaming.md",
      "docs/usage.md",
      "docs/multi_agent.md",
      "docs/examples.md",
      "docs/sandbox_agents.md",
      "docs/results.md",
      "docs/running_agents.md",
      "docs/agents.md",
      "docs/handoffs.md",
      "docs/mcp.md",
      "docs/release.md",
      "docs/guardrails.md",
      "docs/visualization.md",
      "docs/config.md",
      "docs/voice/tracing.md",
      "docs/voice/pipeline.md",
      "docs/voice/quickstart.md",
      "docs/ja/tracing.md",
      "docs/ja/tools.md",
      "docs/ja/repl.md",
      "docs/ja/human_in_the_loop.md",
      "docs/ja/sessions.md",
      "docs/ja/index.md",
      "docs/ja/context.md",
      "docs/ja/quickstart.md",
      "docs/ja/streaming.md",
      "docs/ja/usage.md",
      "docs/ja/multi_agent.md",
      "docs/ja/examples.md",
      "docs/ja/sandbox_agents.md"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# openai-agents-python - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 openai-agents-python 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **希望把专业流程带进宿主 AI 的用户**：仓库包含 Skill 文档。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等 Claim：`clm_0003` supported 0.86\n\n## 它能做什么\n\n- **AI Skill / Agent 指令资产库**（可做安装前预览）：项目包含可被宿主 AI 读取的 Skill 或 Agent 指令文件，可用于把专业流程带入 Claude、Codex、Cursor 等宿主。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等 Claim：`clm_0001` supported 0.86\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`README.md`, `docs/ja/quickstart.md`, `docs/ja/voice/quickstart.md` Claim：`clm_0002` supported 0.86\n\n## 怎么开始\n\n- `pip install openai-agents` 证据：`README.md` Claim：`clm_0004` supported 0.86, `clm_0005` unverified 0.25, `clm_0006` unverified 0.25\n- `pip install 'openai-agents[voice]'` 证据：`docs/ja/voice/quickstart.md` Claim：`clm_0005` unverified 0.25\n- `pip install openai-agents # or` 证据：`docs/ja/quickstart.md` Claim：`clm_0006` unverified 0.25\n\n## 继续前判断卡\n\n- **当前建议**：先做权限沙盒试用\n- **为什么**：项目存在安装命令、宿主配置或本地写入线索，不建议直接进入主力环境，应先在隔离环境试装。\n\n### 30 秒判断\n\n- **现在怎么做**：先做权限沙盒试用\n- **最小安全下一步**：先跑 Prompt Preview；若仍要安装，只在隔离环境试装\n- **先别相信**：工具权限边界不能在安装前相信。\n- **继续会触碰**：命令执行、宿主 AI 配置、本地环境或项目文件\n\n### 现在可以相信\n\n- **适合人群线索：希望把专业流程带进宿主 AI 的用户**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等 Claim：`clm_0003` supported 0.86\n- **能力存在：AI Skill / Agent 指令资产库**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等 Claim：`clm_0001` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`README.md`, `docs/ja/quickstart.md`, `docs/ja/voice/quickstart.md` Claim：`clm_0002` supported 0.86\n- **存在 Quick Start / 安装命令线索**（supported）：可以相信项目文档出现过启动或安装入口；不要因此直接在主力环境运行。 证据：`README.md` Claim：`clm_0004` supported 0.86, `clm_0005` unverified 0.25, `clm_0006` unverified 0.25\n\n### 现在还不能相信\n\n- **工具权限边界不能在安装前相信。**（unverified）：MCP/tool 类项目通常会触碰文件、网络、浏览器或外部 API，必须真实检查权限和日志。\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n- **安装命令是否需要网络、权限或全局写入？**（unverified）：这影响企业环境和个人环境的安装风险。 证据：`README.md`\n\n### 继续会触碰什么\n\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`README.md`, `docs/ja/quickstart.md`, `docs/ja/voice/quickstart.md`\n- **宿主 AI 配置**：Claude/Codex/Cursor/Gemini/OpenCode 等宿主的 plugin、Skill 或规则加载配置。 原因：宿主配置会改变 AI 后续工作方式，可能和用户已有规则冲突。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`README.md`, `docs/ja/quickstart.md`, `docs/ja/voice/quickstart.md`\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：用安装前交互式试用判断工作方式是否匹配，不需要授权或改环境。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **先备份宿主 AI 配置**：Skill、plugin、规则文件可能改变 Claude/Cursor/Codex 的默认行为。（适用：存在插件 manifest、Skill 或宿主规则入口时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **准备移除宿主 plugin / Skill / 规则入口**：如果试装后行为异常，可以把宿主 AI 恢复到试装前状态。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0007` inferred 0.45\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`README.md`, `docs/ja/quickstart.md`, `docs/ja/voice/quickstart.md` Claim：`clm_0008` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **AI Skill / Agent 指令资产库**：先基于 role_skill_index / evidence_index 帮用户挑选可用角色、Skill 或工作流。 边界：可做安装前 Prompt 体验。 证据：`.agents/skills/code-change-verification/SKILL.md`, `.agents/skills/docs-sync/SKILL.md`, `.agents/skills/examples-auto-run/SKILL.md`, `.agents/skills/final-release-review/SKILL.md` 等 Claim：`clm_0001` supported 0.86\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`README.md`, `docs/ja/quickstart.md`, `docs/ja/voice/quickstart.md` Claim：`clm_0002` supported 0.86\n\n### 上下文规模\n\n- 文件总数：1307\n- 重要文件覆盖：40/1307\n- 证据索引条目：80\n- 角色 / Skill 条目：13\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 openai-agents-python 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 openai-agents-python 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 openai-agents-python 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 13 个角色 / Skill / 项目文档条目。\n\n- **code-change-verification**（skill）：Run the mandatory verification stack when changes affect runtime code, tests, or build/test behavior in the OpenAI Agents Python repository. 激活提示：当用户任务与“code-change-verification”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/code-change-verification/SKILL.md`\n- **docs-sync**（skill）：Analyze main branch implementation and configuration to find missing, incorrect, or outdated documentation in docs/. Use when asked to audit doc coverage, sync docs with code, or propose doc updates/structure changes. Only update English docs under docs/ and never touch translated docs under docs/ja, docs/ko, or docs/zh. Provide a report and ask for approval before editing docs. 激活提示：当用户任务与“docs-sync”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/docs-sync/SKILL.md`\n- **examples-auto-run**（skill）：Run python examples in auto mode with logging, rerun helpers, and background control. 激活提示：当用户任务与“examples-auto-run”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/examples-auto-run/SKILL.md`\n- **final-release-review**（skill）：Perform a release-readiness review by locating the previous release tag from remote tags and auditing the diff e.g., v1.2.3... for breaking changes, regressions, improvement opportunities, and risks before releasing openai-agents-python. 激活提示：当用户任务与“final-release-review”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/final-release-review/SKILL.md`\n- **implementation-strategy**（skill）：Decide how to implement runtime and API changes in openai-agents-python before editing code. Use when a task changes exported APIs, runtime behavior, serialized state, tests, or docs and you need to choose the compatibility boundary, whether shims or migrations are warranted, and when unreleased interfaces can be rewritten directly. 激活提示：当用户任务与“implementation-strategy”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/implementation-strategy/SKILL.md`\n- **openai-knowledge**（skill）：Use when working with the OpenAI API Responses API or OpenAI platform features tools, streaming, Realtime API, auth, models, rate limits, MCP and you need authoritative, up-to-date documentation schemas, examples, limits, edge cases . Prefer the OpenAI Developer Documentation MCP server tools when available; otherwise guide the user to enable openaiDeveloperDocs . 激活提示：当用户任务与“openai-knowledge”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/openai-knowledge/SKILL.md`\n- **pr-draft-summary**（skill）：Create the required PR-ready summary block, branch suggestion, title, and draft description for openai-agents-python. Use in the final handoff after moderate-or-larger changes to runtime code, tests, examples, build/test configuration, or docs with behavior impact; skip only for trivial or conversation-only tasks, repo-meta/doc-only tasks without behavior impact, or when the user explicitly says not to include the P… 激活提示：当用户任务与“pr-draft-summary”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/pr-draft-summary/SKILL.md`\n- **runtime-behavior-probe**（skill）：Plan and execute runtime-behavior investigations with temporary probe scripts, validation matrices, state controls, and findings-first reports. Use only when the user explicitly invokes this skill to verify actual runtime behavior beyond normal code-level checks, especially to uncover edge cases, undocumented behavior, or common failure modes in local or live integrations. A baseline smoke check is fine as an entry… 激活提示：当用户任务与“runtime-behavior-probe”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/runtime-behavior-probe/SKILL.md`\n- **test-coverage-improver**（skill）：Improve test coverage in the OpenAI Agents Python repository: run make coverage , inspect coverage artifacts, identify low-coverage files, propose high-impact tests, and confirm with the user before writing tests. 激活提示：当用户任务与“test-coverage-improver”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`.agents/skills/test-coverage-improver/SKILL.md`\n- **credit-note-fixer**（skill）：Fix the tiny credit-note formatting bug and rerun the exact targeted test command. 激活提示：当用户任务与“credit-note-fixer”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`examples/sandbox/docs/skills/credit-note-fixer/SKILL.md`\n- **prior-auth-packet-builder**（skill）：Build a concise prior authorization packet from local case files and payer policy docs. 激活提示：当用户任务与“prior-auth-packet-builder”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`examples/sandbox/healthcare_support/skills/prior-auth-packet-builder/SKILL.md`\n- **playwright**（skill）：Use when the task requires capturing or automating a real browser from the terminal. 激活提示：当用户任务与“playwright”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`examples/sandbox/tutorials/vision_website_clone/skills/playwright/SKILL.md`\n- **csv-workbench**（skill）：Analyze CSV files in /mnt/data and return concise numeric summaries. 激活提示：当用户任务与“csv-workbench”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`examples/tools/skills/csv-workbench/SKILL.md`\n\n## 证据索引\n\n- 共索引 80 条证据。\n\n- **Agents**（documentation）：Agents are the core building block in your apps. An agent is a large language model LLM configured with instructions, tools, and optional runtime behavior such as handoffs, guardrails, and structured outputs. 证据：`docs/agents.md`\n- **エージェント**（documentation）：エージェントは、アプリにおける中核的な構成要素です。エージェントとは、instructions、ツール、およびハンドオフ、ガードレール、structured outputs などの任意のランタイム動作で設定された大規模言語モデル LLM です。 证据：`docs/ja/agents.md`\n- **에이전트**（documentation）：에이전트는 앱의 핵심 구성 요소입니다. 에이전트는 instructions, tools, 그리고 핸드오프, 가드레일, structured outputs와 같은 선택적 런타임 동작으로 구성된 대규모 언어 모델 LLM 입니다. 证据：`docs/ko/agents.md`\n- **智能体**（documentation）：智能体是应用中的核心构建块。智能体是一个大型语言模型（LLM），配置了instructions、tools，以及可选的运行时行为，例如任务转移、安全防护措施和structured outputs。 证据：`docs/zh/agents.md`\n- **Credit Note Example Repo**（documentation）：This tiny repo exists to support examples/sandbox/docs/coding task.py . 证据：`examples/sandbox/docs/repo/README.md`\n- **Credit Note Fixer**（skill_instruction）：1. Read repo/task.md . 2. Inspect repo/credit note.sh and repo/tests/test credit note.sh . 3. Make the smallest correct change that keeps the output label as credit and the amount positive. If you use apply patch , use workspace-root-relative paths such as repo/credit note.sh and repo/tests/test credit note.sh . 4. Run exactly sh tests/test credit note.sh from repo/ . 5. In the final answer, summarize the bug, the fix, and the exact verification command. 证据：`examples/sandbox/docs/skills/credit-note-fixer/SKILL.md`\n- **Contributor Guide**（documentation）：This guide helps new contributors get started with the OpenAI Agents Python repository. It covers repo structure, how to test your work, available utilities, and guidelines for commits and PRs. 证据：`AGENTS.md`\n- **OpenAI Agents SDK ! PyPI https://img.shields.io/pypi/v/openai-agents?label=pypi%20package https://pypi.org/project/open…**（documentation）：OpenAI Agents SDK ! PyPI https://img.shields.io/pypi/v/openai-agents?label=pypi%20package https://pypi.org/project/openai-agents/ 证据：`README.md`\n- **Tests**（documentation）：Before running any tests, make sure you have uv installed and ideally run make sync after . 证据：`tests/README.md`\n- **Common agentic patterns**（documentation）：This folder contains examples of different common patterns for agents. 证据：`examples/agent_patterns/README.md`\n- **Financial Research Agent Example**（documentation）：This example shows how you might compose a richer financial research agent using the Agents SDK. The pattern is similar to the research bot example, but with more specialized sub‑agents and a verification step. 证据：`examples/financial_research_agent/README.md`\n- **MCP Filesystem Example**（documentation）：This example uses the filesystem MCP server https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem , running locally via npx . 证据：`examples/mcp/filesystem_example/README.md`\n- **MCP get all mcp tools Example**（documentation）：Python port of the JS examples/mcp/get-all-mcp-tools-example.ts . It demonstrates: 证据：`examples/mcp/get_all_mcp_tools_example/README.md`\n- **MCP Git Example**（documentation）：This example uses the git MCP server https://github.com/modelcontextprotocol/servers/tree/main/src/git , running locally via uvx . 证据：`examples/mcp/git_example/README.md`\n- **MCP Manager Example FastAPI**（documentation）：This example shows how to use MCPServerManager to keep MCP server lifecycle management in a single task inside a FastAPI app with the Streamable HTTP transport. 证据：`examples/mcp/manager_example/README.md`\n- **MCP Prompt Server Example**（documentation）：This example uses a local MCP prompt server in server.py server.py . 证据：`examples/mcp/prompt_server/README.md`\n- **MCP SSE Example**（documentation）：This example uses a local SSE server in server.py server.py . 证据：`examples/mcp/sse_example/README.md`\n- **MCP SSE Remote Example**（documentation）：Python port of the JS examples/mcp/sse-example.ts . By default it starts the bundled local SSE MCP server and lets the agent use those tools. Set MCP SSE REMOTE URL to try a compatible remote SSE server instead. 证据：`examples/mcp/sse_remote_example/README.md`\n- **MCP Streamable HTTP Remote Example**（documentation）：Python port of the JS examples/mcp/streamable-http-example.ts . It connects to DeepWiki over the Streamable HTTP transport https://mcp.deepwiki.com/mcp and lets the agent use those tools. 证据：`examples/mcp/streamable_http_remote_example/README.md`\n- **Custom HTTP Client Factory Example**（documentation）：This example demonstrates how to use the new httpx client factory parameter in MCPServerStreamableHttp to configure custom HTTP client behavior for MCP StreamableHTTP connections. 证据：`examples/mcp/streamablehttp_custom_client_example/README.md`\n- **MCP Streamable HTTP Example**（documentation）：This example uses a local Streamable HTTP server in server.py server.py . 证据：`examples/mcp/streamablehttp_example/README.md`\n- **MCP Tool Filter Example**（documentation）：Python port of the JS examples/mcp/tool-filter-example.ts . It shows how to: 证据：`examples/mcp/tool_filter_example/README.md`\n- **Model provider examples**（documentation）：The examples in this directory show how to route models through adapter layers such as LiteLLM and any-llm. The default examples all use OpenRouter so you only need one API key: 证据：`examples/model_providers/README.md`\n- **Realtime Demo App**（documentation）：A web-based realtime voice assistant demo with a FastAPI backend and HTML/JS frontend. 证据：`examples/realtime/app/README.md`\n- **Realtime Twilio Integration**（documentation）：This example demonstrates how to connect the OpenAI Realtime API to a phone call using Twilio's Media Streams. The server handles incoming phone calls and streams audio between Twilio and the OpenAI Realtime API, enabling real-time voice conversations with an AI agent over the phone. 证据：`examples/realtime/twilio/README.md`\n- **Twilio SIP Realtime Example**（documentation）：This example shows how to handle OpenAI Realtime SIP calls with the Agents SDK. Incoming calls are accepted through the Realtime Calls API, a triage agent answers with a fixed greeting, and handoffs route the caller to specialist agents FAQ lookup and record updates similar to the realtime UI demo. 证据：`examples/realtime/twilio_sip/README.md`\n- **Research bot**（documentation）：This is a simple example of a multi-agent research bot. To run it: 证据：`examples/research_bot/README.md`\n- **Sandbox examples**（documentation）：These examples show how to run agents with an isolated workspace. Start with the small API examples when you want the smallest surface area, or use the tutorial scaffold when you want the shared layout for guided sandbox tutorials. 证据：`examples/sandbox/README.md`\n- **Cloud Sandbox Extension Examples**（documentation）：These examples are for manual verification of the cloud sandbox backends that live under agents.extensions.sandbox . 证据：`examples/sandbox/extensions/README.md`\n- **NASA Spending Text-to-SQL Agent**（documentation）：Multi-turn conversational agent that translates natural-language questions about NASA federal spending into SQL queries, executes them against a local SQLite database, and returns structured tabular results. 证据：`examples/sandbox/extensions/daytona/usaspending_text2sql/README.md`\n- **Temporal Sandbox Agent**（documentation）：A conversational coding agent that runs as a durable Temporal workflow with support for multiple sandbox backends Daytona, Docker, E2B, local unix . 证据：`examples/sandbox/extensions/temporal/README.md`\n- **Healthcare support**（documentation）：This example shows how to build a healthcare support workflow with Agents SDK using both standard agents and a sandbox agent. The scenario is intentionally synthetic and generic: a patient asks a billing or coverage question, the workflow checks local records, inspects policy documents in an isolated sandbox workspace, writes support artifacts, and optionally routes one ambiguous case to a human reviewer. 证据：`examples/sandbox/healthcare_support/README.md`\n- **Dataroom metric extract**（documentation）：Extract financial metrics from a synthetic 10-K packet, write the resulting table as CSV or JSONL, then validate the generated artifact with a deterministic eval script. 证据：`examples/sandbox/tutorials/dataroom_metric_extract/README.md`\n- **Dataroom Q&A**（documentation）：Answer grounded financial questions over a synthetic 10-K packet. 证据：`examples/sandbox/tutorials/dataroom_qa/README.md`\n- **Repo code review**（documentation）：Review a small public git repository, run its tests, leave line-level review comments in the structured output, and write a patch-oriented review artifact. 证据：`examples/sandbox/tutorials/repo_code_review/README.md`\n- **Sandbox resume**（documentation）：This example shows a small sandbox resume flow with AGENTS.md mounted in the sandbox and loaded into the agent instructions. It runs in two steps: first it builds the app and smoke tests it, then it serializes the sandbox session state, resumes the sandbox, and adds pytest coverage. 证据：`examples/sandbox/tutorials/sandbox_resume/README.md`\n- **Vision UI reproduction**（documentation）：Use the sandbox view image tool to inspect a reference app screenshot, then reproduce the visible screen as a static HTML/CSS artifact. This is a narrow UI repro target for vision and screenshot-debugging; it is not a web-app scaffold. 证据：`examples/sandbox/tutorials/vision_website_clone/README.md`\n- **Static voice demo**（documentation）：This demo operates by capturing a recording, then running a voice pipeline on it. 证据：`examples/voice/static/README.md`\n- **Streamed voice demo**（documentation）：This is an interactive demo, where you can talk to an Agent conversationally. It uses the voice pipeline's built in turn detection feature, so if you stop speaking the Agent responds. 证据：`examples/voice/streamed/README.md`\n- **Realtime**（documentation）：Realtime agents are in beta: expect some breaking changes over the next few weeks as we find issues and fix them. 证据：`src/agents/realtime/README.md`\n- **Code Change Verification**（skill_instruction）：Ensure work is only marked complete after formatting, linting, type checking, and tests pass. Use this skill when changes affect runtime code, tests, or build/test configuration. You can skip it for docs-only or repository metadata unless a user asks for the full stack. 证据：`.agents/skills/code-change-verification/SKILL.md`\n- **Docs Sync**（skill_instruction）：Identify doc coverage gaps and inaccuracies by comparing main branch features and configuration options against the current docs structure, then propose targeted improvements. 证据：`.agents/skills/docs-sync/SKILL.md`\n- **examples-auto-run**（skill_instruction）：- Runs uv run examples/run examples.py with: - Optional dependency extras enabled by default: litellm , any-llm , sqlalchemy , redis , blaxel , modal , runloop , and temporal . - EXAMPLES INTERACTIVE MODE=auto auto-input/auto-approve . - Per-example logs under .tmp/examples-start-logs/ . - Main summary log path passed via --main-log also under .tmp/examples-start-logs/ . - Generates a rerun list of failures at .tmp/examples-rerun.txt when --write-rerun is set. - Provides start/stop/status/logs/tail/collect/rerun helpers via run.sh . - Background option keeps the process running with a pidfile; stop cleans it up. 证据：`.agents/skills/examples-auto-run/SKILL.md`\n- **Final Release Review**（skill_instruction）：Use this skill when validating the latest release candidate commit default tip of origin/main for release. It guides you to fetch remote tags, pick the previous release tag, and thoroughly inspect the BASE TAG...TARGET diff for breaking changes, introduced bugs/regressions, improvement opportunities, and release risks. 证据：`.agents/skills/final-release-review/SKILL.md`\n- **Implementation Strategy**（skill_instruction）：Use this skill before editing code when the task changes runtime behavior or anything that might look like a compatibility concern. The goal is to keep implementations simple while protecting real released contracts. 证据：`.agents/skills/implementation-strategy/SKILL.md`\n- **OpenAI Knowledge**（skill_instruction）：Use the OpenAI Developer Documentation MCP server to search and fetch exact docs markdown , then base your answer on that text instead of guessing. 证据：`.agents/skills/openai-knowledge/SKILL.md`\n- **PR Draft Summary**（skill_instruction）：Purpose Produce the PR-ready summary required in this repository after substantive code work is complete: a concise summary plus a PR-ready title and draft description that begins with \"This pull request ...\". The block should be ready to paste into a PR for openai-agents-python. 证据：`.agents/skills/pr-draft-summary/SKILL.md`\n- **Runtime Behavior Probe**（skill_instruction）：Use this skill to investigate real runtime behavior, not to restate code or documentation. Start by planning the investigation, then execute a case matrix, record observed behavior, and report both the findings and the method used to obtain them. 证据：`.agents/skills/runtime-behavior-probe/SKILL.md`\n- **Prior Auth Packet Builder**（skill_instruction）：Use this skill when a case requires prior authorization review, referral validation, imaging review, or payer-specific policy checks. 证据：`examples/sandbox/healthcare_support/skills/prior-auth-packet-builder/SKILL.md`\n- **Playwright**（skill_instruction）：Use Playwright to capture the static site directly. Do not start a server for this example. 证据：`examples/sandbox/tutorials/vision_website_clone/skills/playwright/SKILL.md`\n- **CSV Workbench**（skill_instruction）：Use this skill when the user asks for quick analysis of tabular data. 证据：`examples/tools/skills/csv-workbench/SKILL.md`\n- **Test Coverage Improver**（skill_instruction）：Use this skill whenever coverage needs assessment or improvement coverage regressions, failing thresholds, or user requests for stronger tests . It runs the coverage suite, analyzes results, highlights the biggest gaps, and prepares test additions while confirming with the user before changing code. 证据：`.agents/skills/test-coverage-improver/SKILL.md`\n- **License**（source_file）：Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files the \"Software\" , to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 证据：`LICENSE`\n- **Configuration**（documentation）：This page covers SDK-wide defaults that you usually set once during application startup, such as the default OpenAI key or client, the default OpenAI API shape, tracing export defaults, and logging behavior. 证据：`docs/config.md`\n- **Context management**（documentation）：Context is an overloaded term. There are two main classes of context you might care about: 证据：`docs/context.md`\n- **Examples**（documentation）：Check out a variety of sample implementations of the SDK in the examples section of the repo https://github.com/openai/openai-agents-python/tree/main/examples . The examples are organized into several categories that demonstrate different patterns and capabilities. 证据：`docs/examples.md`\n- **Guardrails**（documentation）：Guardrails enable you to do checks and validations of user input and agent output. For example, imagine you have an agent that uses a very smart and hence slow/expensive model to help with customer requests. You wouldn't want malicious users to ask the model to help them with their math homework. So, you can run a guardrail with a fast/cheap model. If the guardrail detects malicious usage, it can immediately raise an error and prevent the expensive model from running, saving you time and money when using blocking guardrails; for parallel guardrails, the expensive model may have already started running before the guardrail completes. See \"Execution modes\" below for details . 证据：`docs/guardrails.md`\n- **Handoffs**（documentation）：Handoffs allow an agent to delegate tasks to another agent. This is particularly useful in scenarios where different agents specialize in distinct areas. For example, a customer support app might have agents that each specifically handle tasks like order status, refunds, FAQs, etc. 证据：`docs/handoffs.md`\n- **Human-in-the-loop**（documentation）：Use the human-in-the-loop HITL flow to pause agent execution until a person approves or rejects sensitive tool calls. Tools declare when they need approval, run results surface pending approvals as interruptions, and RunState lets you serialize and resume runs after decisions are made. 证据：`docs/human_in_the_loop.md`\n- **OpenAI Agents SDK**（documentation）：The OpenAI Agents SDK https://github.com/openai/openai-agents-python enables you to build agentic AI apps in a lightweight, easy-to-use package with very few abstractions. It's a production-ready upgrade of our previous experimentation for agents, Swarm https://github.com/openai/swarm/tree/main . The Agents SDK has a very small set of primitives: 证据：`docs/index.md`\n- 其余 20 条证据见 `AI_CONTEXT_PACK.json` 或 `EVIDENCE_INDEX.json`。\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`docs/agents.md`, `docs/ja/agents.md`, `docs/ko/agents.md`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`docs/agents.md`, `docs/ja/agents.md`, `docs/ko/agents.md`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **OpenAI Agents SDK Overview**：importance `high`\n  - source_paths: README.md, src/agents/__init__.py, src/agents/version.py\n- **Installation and Setup**：importance `high`\n  - source_paths: pyproject.toml, src/agents/_config.py, src/agents/run_config.py\n- **Examples Index**：importance `high`\n  - source_paths: examples/basic/hello_world.py, examples/agent_patterns/agents_as_tools.py, examples/sandbox/basic.py, examples/voice/streamed/main.py\n- **Agents**：importance `high`\n  - source_paths: src/agents/agent.py, src/agents/lifecycle.py, src/agents/agent_output.py, src/agents/items.py, src/agents/function_schema.py\n- **Tools**：importance `high`\n  - source_paths: src/agents/tool.py, src/agents/tool_context.py, src/agents/agent_tool_state.py, src/agents/editor.py, src/agents/computer.py\n- **Guardrails**：importance `medium`\n  - source_paths: src/agents/guardrail.py, src/agents/tool_guardrails.py, src/agents/run_internal/guardrails.py\n- **Handoffs**：importance `medium`\n  - source_paths: src/agents/handoffs/__init__.py, src/agents/handoffs/history.py, src/agents/extensions/handoff_filters.py, src/agents/extensions/handoff_prompt.py\n- **Agents as Tools**：importance `medium`\n  - source_paths: examples/agent_patterns/agents_as_tools.py, examples/agent_patterns/agents_as_tools_conditional.py, examples/agent_patterns/agents_as_tools_structured.py\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `4bd459e403ac826c87b17fef8ffcbdf42a70b09a`\n- inspected_files: `pyproject.toml`, `README.md`, `uv.lock`, `docs/tracing.md`, `docs/tools.md`, `docs/repl.md`, `docs/human_in_the_loop.md`, `docs/index.md`, `docs/context.md`, `docs/quickstart.md`, `docs/streaming.md`, `docs/usage.md`, `docs/multi_agent.md`, `docs/examples.md`, `docs/sandbox_agents.md`, `docs/results.md`, `docs/running_agents.md`, `docs/agents.md`, `docs/handoffs.md`, `docs/mcp.md`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 仓库名和安装名不一致\n\n- Trigger: 仓库名 `openai-agents-python` 与安装入口 `openai-agents` 不完全一致。\n- Host AI rule: 在 npm/PyPI/GitHub 上确认包名映射和官方 README 说明。\n- Why it matters: 用户照着仓库名搜索包或照着包名找仓库时容易走错入口。\n- Evidence: identity.distribution | github_repo:946380199 | https://github.com/openai/openai-agents-python | repo=openai-agents-python; install=openai-agents\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 来源证据：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_d867c75f80af49c9968398851ff8bf6a | https://github.com/openai/openai-agents-python/issues/3346 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 来源证据：Clarify whether retry-after delays should respect retry max_delay\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Clarify whether retry-after delays should respect retry max_delay\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_f486d2247bf24df8bbc7a2bd6fddbd65 | https://github.com/openai/openai-agents-python/issues/3266 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 来源证据：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API reject…\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API rejects it as invalid\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_d6bad5c23bf3457eb546c22a1636cc26 | https://github.com/openai/openai-agents-python/issues/3268 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: 来源证据：Tracing shutdown cannot interrupt exporter retry backoff\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Tracing shutdown cannot interrupt exporter retry backoff\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能阻塞安装或首次运行。\n- Evidence: community_evidence:github | cevd_e1ceae098cf84c8aafae7082b13c5345 | https://github.com/openai/openai-agents-python/issues/3354 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 来源证据：v0.15.2\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.2\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_b73472b5ae90447199984775aacdca67 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.2 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 7: 来源证据：v0.15.3\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.3\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_7e05a382001a4d07b74eda1e1316320b | https://github.com/openai/openai-agents-python/releases/tag/v0.15.3 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 8: 来源证据：v0.16.1\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.16.1\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_44335088ff52486e9f2f41f72a274c35 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 9: 来源证据：v0.17.0\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.17.0\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_86b81f310a6e45feadc65196a057b23b | https://github.com/openai/openai-agents-python/releases/tag/v0.17.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 10: 来源证据：v0.15.1\n\n- Trigger: GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：v0.15.1\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_4c70d563ac704aeaa14b8e2c49976bc5 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：openai/openai-agents-python\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：chatgpt\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 仓库名和安装名不一致（medium）：用户照着仓库名搜索包或照着包名找仓库时容易走错入口。 建议检查：在 npm/PyPI/GitHub 上确认包名映射和官方 README 说明。\n- 来源证据：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：Clarify whether retry-after delays should respect retry max_delay（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 来源证据：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API reject…（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：Tracing shutdown cannot interrupt exporter retry backoff（medium）：可能阻塞安装或首次运行。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/openai/openai-agents-python 项目说明书\n\n生成时间：2026-05-16 04:52:19 UTC\n\n## 目录\n\n- [OpenAI Agents SDK Overview](#overview)\n- [Installation and Setup](#installation)\n- [Examples Index](#examples-index)\n- [Agents](#agents)\n- [Tools](#tools)\n- [Guardrails](#guardrails)\n- [Handoffs](#handoffs)\n- [Agents as Tools](#agents-as-tools)\n- [Run Loop and Execution](#run-loop)\n- [Sessions and Memory](#sessions)\n\n<a id='overview'></a>\n\n## OpenAI Agents SDK Overview\n\n### 相关页面\n\n相关主题：[Installation and Setup](#installation), [Agents](#agents), [Tools](#tools)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/openai/openai-agents-python/blob/main/README.md)\n- [src/agents/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/__init__.py)\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/run_internal/turn_resolution.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/turn_resolution.py)\n- [src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n- [src/agents/items.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/items.py)\n- [src/agents/extensions/visualization.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/visualization.py)\n- [src/agents/mcp/server.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/mcp/server.py)\n</details>\n\n# OpenAI Agents SDK Overview\n\n## Introduction\n\nThe OpenAI Agents SDK is a Python framework designed to build multi-agent systems that can interact with users, execute tools, and delegate tasks to specialized sub-agents. The SDK provides a structured approach to orchestrating agent conversations, managing tool execution, handling handoffs between agents, and maintaining conversation state throughout the execution lifecycle.\n\nThe SDK's core responsibility is to manage the runtime execution of agents, handling the turn-based conversation flow, tool invocations, guardrail evaluations, and multi-agent handoffs within a single unified execution model. 资料来源：[src/agents/__init__.py]()\n\n## Architecture Overview\n\nThe SDK follows a layered architecture that separates concerns between agent definition, runtime execution, and tool/mcp integrations.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Runner]\n    B --> C[Agent]\n    C --> D[Handoffs]\n    C --> E[Tools]\n    C --> F[Guardrails]\n    D --> C\n    D --> G[Sub-Agent]\n    E --> H[MCP Servers]\n    F --> I[Input/Output Guards]\n    G --> C\n    B --> J[Session Persistence]\n    B --> K[Tracing]\n```\n\n### Core Components\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| `Agent` | Defines agent behavior, tools, handoffs, and instructions | `src/agents/__init__.py` |\n| `Runner` | Executes agents and manages conversation flow | `src/agents/run.py` |\n| `Handoff` | Enables transfer of control between agents | `src/agents/handoffs/__init__.py` |\n| `MCPServer` | Provides Model Context Protocol server abstraction | `src/agents/mcp/server.py` |\n| `ItemHelpers` | Utility for extracting content from conversation items | `src/agents/items.py` |\n\n## Agent System\n\n### Agent Definition\n\nAgents are the fundamental unit of computation in the SDK. An agent encapsulates:\n\n- **Instructions**: The system prompt that defines the agent's role and behavior\n- **Tools**: A list of callable tools the agent can invoke\n- **Handoffs**: Definitions for transferring control to other agents\n- **Input Guardrails**: Pre-processing validation before agent execution\n- **Output Guardrails**: Post-processing validation of agent responses\n\n```mermaid\ngraph LR\n    A[Agent] --> B[Instructions]\n    A --> C[Tools]\n    A --> D[Handoffs]\n    A --> E[Guardrails]\n```\n\n### Agent Execution Flow\n\nThe execution follows a turn-based model where each turn processes user input, generates model responses, executes tools, and evaluates handoffs until a final response is produced.\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Runner\n    participant Agent\n    participant Tools\n    participant Handoffs\n\n    User->>Runner: User Input\n    Runner->>Agent: Process Turn\n    Agent->>Agent: Generate Response\n    alt Tool Call\n        Agent->>Tools: Execute Tool\n        Tools-->>Agent: Tool Result\n    end\n    alt Handoff\n        Agent->>Handoffs: Request Handoff\n        Handoffs->>Agent: Switch Agent\n    end\n    Agent-->>Runner: Final Output\n    Runner-->>User: Response\n```\n\n## Handoffs System\n\nThe handoff system enables agents to delegate conversations to other specialized agents while preserving conversation context. Each handoff defines:\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `name` | `str` | Unique identifier for the handoff tool |\n| `tool_name` | `str` | Name exposed to the model for invoking |\n| `tool_description` | `str` | Description shown to the model |\n| `input_json_schema` | `dict` | JSON schema for handoff arguments |\n| `on_invoke_handoff` | `Callable` | Function that returns the target agent |\n| `input_filter` | `HandoffInputFilter` | Optional filter for conversation context |\n\n资料来源：[src/agents/handoffs/__init__.py]()\n\n### Handoff Input Filtering\n\nBy default, the new agent receives the entire conversation history. The `input_filter` function allows customization of what context is passed to the target agent:\n\n```python\ninput_filter: HandoffInputFilter | None = None\n\"\"\"A function that filters the inputs that are passed to the next agent.\"\"\"\n```\n\n## Turn Resolution\n\nThe turn resolution system handles the complexity of multi-step agent interactions within a single turn. This includes managing pre-step items, new step items, tool results, guardrail evaluations, and handoff transitions.\n\n### Turn Resolution States\n\n```mermaid\nstateDiagram-v2\n    [*] --> InputGuardrails: Input Received\n    InputGuardrails --> ModelResponse: Passed\n    ModelResponse --> ToolExecution: Tool Call\n    ModelResponse --> Handoff: Agent Switch\n    ModelResponse --> FinalOutput: Direct Response\n    ToolExecution --> ModelResponse: More Tools\n    ToolExecution --> Handoff: Switch During Tool\n    ToolExecution --> FinalOutput: Complete\n    Handoff --> InputGuardrails: New Agent\n    FinalOutput --> [*]\n```\n\n### Key Resolution Functions\n\nThe turn resolution process evaluates several conditions:\n\n1. **Tool Input Guardrail Results**: Validation before tool execution\n2. **Function Results**: Output from tool invocations\n3. **Tool Output Guardrail Results**: Validation after tool execution\n4. **Handoff Evaluation**: Check for agent transfer requests\n\n资料来源：[src/agents/run_internal/turn_resolution.py]()\n\n## Tool Execution and Guardrails\n\n### Guardrail System\n\nThe SDK implements a two-layer guardrail system:\n\n| Guardrail Type | Timing | Purpose |\n|----------------|--------|---------|\n| Input Guardrails | Before agent processes input | Validate and sanitize user input |\n| Output Guardrails | After agent generates response | Validate response content |\n\n### Tool Use Tracking\n\nTools are tracked throughout execution to maintain state and enable:\n\n- Streaming output collection\n- Refusal detection\n- Error handling\n- Output validation\n\n```mermaid\ngraph TD\n    A[Tool Call] --> B{Input Guardrails}\n    B -->|Pass| C[Execute Tool]\n    B -->|Fail| D[Reject]\n    C --> E[Tool Result]\n    E --> F{Output Guardrails}\n    F -->|Pass| G[Continue]\n    F -->|Fail| H[Error Response]\n```\n\n## Model Context Protocol (MCP) Integration\n\nThe SDK provides a Python abstraction for MCP servers through the `MCPServer` base class. This enables agents to interact with external MCP-capable tools and services.\n\n### MCPServer Base Class\n\nThe `MCPServer` class provides the foundation for MCP protocol implementation with methods for:\n\n- **Resources**: `list_resources()`, `list_resource_templates()`, `read_resource()`\n- **Tools**: Tool invocation and management\n- **Prompts**: Server-provided prompt templates\n\n资料来源：[src/agents/mcp/server.py]()\n\n### Require Approval Settings\n\nMCP tools support granular approval controls:\n\n| Setting | Behavior |\n|---------|----------|\n| `RequireApprovalSetting.NEVER` | Always auto-approve |\n| `RequireApprovalSetting.ALWAYS` | Always require approval |\n| `RequireApprovalSetting.UNDETERMINED` | Use default behavior |\n\n## Session and State Management\n\n### Run State\n\nThe `run_state` object tracks execution context including:\n\n- Current agent\n- Conversation history\n- Generated items\n- Original input\n- Turn counters\n\n### Persistence\n\nThe SDK supports session persistence for maintaining state across multiple interactions:\n\n```python\nsession_persistence_enabled: bool\nstore: StoreSetting\n```\n\n## Tracing and Visualization\n\n### Agent Visualization\n\nThe SDK includes visualization utilities for generating DOT-format diagrams of agent relationships:\n\n| Function | Purpose |\n|----------|---------|\n| `get_all_nodes()` | Generate node definitions for agent graph |\n| `get_all_edges()` | Generate edge definitions for handoff connections |\n\n```mermaid\ngraph TD\n    A[User] --> B[Orchestrator Agent]\n    B --> C[Research Agent]\n    B --> D[Writer Agent]\n    C --> E[Web Search Tool]\n    D --> F[File Write Tool]\n    B --> G[Analytics Agent]\n    G --> H[Data Analysis Tool]\n```\n\n资料来源：[src/agents/extensions/visualization.py]()\n\n## Item Processing\n\n### Message Item Extraction\n\nThe SDK provides utilities for extracting content from conversation items:\n\n| Method | Purpose |\n|--------|---------|\n| `text_message_output()` | Extract text from a single message output item |\n| `text_message_outputs()` | Extract concatenated text from multiple items |\n| `extract_refusal()` | Extract refusal content if model refused to respond |\n\n```python\n@classmethod\ndef extract_refusal(cls, message: TResponseOutputItem) -> str | None:\n    \"\"\"Extracts refusal content from a message, if any.\"\"\"\n```\n\n## Run Configuration\n\n### Key Configuration Options\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `max_turns` | `int` | Maximum conversation turns |\n| `tools` | `list[Function]` | Available tools for the run |\n| `input_guardrails` | `list[InputGuardrail]` | Input validation |\n| `output_guardrails` | `list[OutputGuardrail]` | Output validation |\n| `tool_use_tracker` | `ToolUseTracker` | Tracks tool invocations |\n| `run_state` | `RunState` | Mutable execution state |\n\n资料来源：[src/agents/run.py]()\n\n## Example Workflow Patterns\n\n### Research Bot Architecture\n\nA common pattern involves multiple specialized agents:\n\n1. **Planner Agent**: Decomposes user queries into search tasks\n2. **Search Agent**: Executes web searches in parallel\n3. **Writer Agent**: Synthesizes research into final reports\n\n```mermaid\ngraph LR\n    A[User Query] --> B[Planner Agent]\n    B --> C[Search 1]\n    B --> D[Search 2]\n    B --> E[Search N]\n    C --> F[Writer Agent]\n    D --> F\n    E --> F\n    F --> G[Final Report]\n```\n\n### Sandbox Agent Workflow\n\nSandbox agents provide isolated execution environments:\n\n```mermaid\ngraph TD\n    A[SandboxAgent] --> B[Workspace]\n    A --> C[Manifest]\n    C --> D[Skill Loading]\n    B --> E[Artifact Management]\n    E --> F[File System Access]\n    D --> G[Tool Execution]\n```\n\n## SDK Version\n\nCurrent SDK version: `1.0.0` (semantic versioning)\n\n资料来源：[src/agents/version.py]()\n\n## Summary\n\nThe OpenAI Agents SDK provides a comprehensive framework for building sophisticated multi-agent applications. Key capabilities include:\n\n- **Multi-Agent Orchestration**: Define and coordinate multiple agents with specialized roles\n- **Handoff System**: Seamlessly transfer control between agents while maintaining context\n- **Tool Execution**: Integrate tools with guardrail validation at input and output\n- **MCP Integration**: Connect to external Model Context Protocol servers\n- **State Management**: Track execution state with persistence support\n- **Tracing**: Monitor and visualize agent interactions and flows\n\nThe SDK abstracts the complexity of turn resolution, tool tracking, and handoff management, allowing developers to focus on defining agent behavior and tool integrations.\n\n---\n\n<a id='installation'></a>\n\n## Installation and Setup\n\n### 相关页面\n\n相关主题：[OpenAI Agents SDK Overview](#overview), [Examples Index](#examples-index)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [pyproject.toml](https://github.com/openai/openai-agents-python/blob/main/pyproject.toml)\n- [src/agents/_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/_config.py)\n- [src/agents/run_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_config.py)\n- [examples/sandbox/extensions/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/extensions/README.md)\n- [examples/sandbox/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/README.md)\n- [examples/model_providers/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/model_providers/README.md)\n</details>\n\n# Installation and Setup\n\n## Overview\n\nThe `openai-agents-python` library provides a comprehensive multi-agent framework for building AI-powered applications. The installation and setup process involves managing dependencies, configuring environment variables, and optionally setting up sandbox backends for code execution capabilities.\n\nThis page covers the complete setup workflow from initial installation through runtime configuration.\n\n---\n\n## Prerequisites\n\n### System Requirements\n\n| Requirement | Specification |\n|-------------|---------------|\n| Python | 3.10 or higher |\n| Package Manager | pip, uv, or poetry |\n| API Access | OpenAI API key (or compatible provider) |\n\n### Environment Variables\n\nThe library requires the `OPENAI_API_KEY` environment variable for core functionality. Additional provider-specific variables may be needed depending on your use case.\n\n```bash\n# Core requirement\nexport OPENAI_API_KEY=\"sk-...\"\n\n# Optional: Model provider alternatives\nexport OPENROUTER_API_KEY=\"...\"\nexport LITELLM_API_KEY=\"...\"\n```\n\n资料来源：[examples/model_providers/README.md]()\n\n---\n\n## Installation Methods\n\n### Using pip\n\n```bash\npip install openai-agents\n```\n\n### Using uv (Recommended)\n\n```bash\nuv pip install openai-agents\n```\n\nOr with sync for development:\n\n```bash\nuv sync\n```\n\n### With Extras\n\nThe `pyproject.toml` defines optional dependency groups for specific features:\n\n| Extra | Description | Dependencies |\n|-------|-------------|--------------|\n| `sandbox` | Core sandbox functionality | e2b-sdk, modal-client |\n| `e2b` | E2B sandbox backend | e2b-code-interpreter, e2b |\n| `blaxel` | Blaxel sandbox backend | blaxel |\n| `modal` | Modal sandbox backend | modal |\n| `vercel` | Vercel deployment | vercel |\n| `daytona` | Daytona sandbox backend | daytona |\n| `temporal` | Temporal workflow integration | temporal-sdk |\n| `runloop` | Runloop backend | runloop |\n| `dev` | Development dependencies | pytest, ruff, mypy |\n\n安装带所有 sandbox 后端的完整版本：\n\n```bash\nuv sync --extra sandbox\n```\n\n资料来源：[pyproject.toml](https://github.com/openai/openai-agents-python/blob/main/pyproject.toml)\n\n---\n\n## Configuration Architecture\n\nThe library uses a layered configuration system:\n\n```mermaid\ngraph TD\n    A[Environment Variables] --> B[DefaultConfig]\n    C[User Code Config] --> D[RunConfig]\n    B --> D\n    E[Agent-specific Config] --> F[Agent]\n    F --> D\n```\n\n### Configuration Loading Order\n\n1. **Environment Variables** - Base API keys and provider settings\n2. **Default Config** - Library defaults from `_config.py`\n3. **RunConfig** - User-provided runtime configuration\n4. **Agent Config** - Per-agent overrides\n\n资料来源：[src/agents/_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/_config.py)\n\n---\n\n## Core Configuration\n\n### RunConfig Parameters\n\nThe `RunConfig` class provides runtime configuration for agent execution:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `model` | `str` | `\"gpt-4o\"` | Model identifier |\n| `model_provider` | `str \\| None` | `None` | Custom model provider |\n| `max_tokens` | `int \\| None` | `None` | Maximum response tokens |\n| `temperature` | `float \\| None` | `None` | Sampling temperature |\n| `parallel_tool_calls` | `bool` | `True` | Enable parallel tool execution |\n| `tool_choice` | `str \\| None` | `None` | Tool selection strategy |\n| `tracing` | `TracingKind` | `\"off\"` | Tracing provider |\n| `trace_include_defaults` | `bool` | `False` | Include default values in traces |\n| `trace_include_raw_model_messages` | `bool` | `False` | Include raw model messages |\n| `session.persistence` | `SessionPersistence` | `None` | Conversation persistence |\n\n资料来源：[src/agents/run_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_config.py)\n\n### Basic Configuration Example\n\n```python\nfrom agents import Agent, Runner, RunConfig\n\nconfig = RunConfig(\n    model=\"gpt-4o\",\n    temperature=0.7,\n    tracing=\"console\",  # Enable console tracing\n)\n\nagent = Agent(\n    name=\"assistant\",\n    instructions=\"You are a helpful assistant.\",\n)\n\nresult = await Runner.run(agent, \"Hello!\", run_config=config)\n```\n\n---\n\n## Sandbox Backend Setup\n\nThe library supports multiple sandbox backends for secure code execution. Each backend has specific setup requirements.\n\n### Backend Comparison\n\n| Backend | Use Case | Key Features |\n|---------|----------|--------------|\n| E2B | General-purpose sandbox | Bash/Jupyter interfaces, filesystem access |\n| Blaxel | Cloud development | Persistent storage, cloud bucket mounts |\n| Modal | Serverless execution | GPU support, scalable workloads |\n| Daytona | Containerized dev | Full development environments |\n| Vercel | Deployment | Serverless deployment, edge functions |\n\n资料来源：[examples/sandbox/extensions/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/extensions/README.md)\n\n### E2B Setup\n\n```bash\n# Install E2B extra\nuv sync --extra e2b\n\n# Set API key\nexport E2B_API_KEY=\"e2b_...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/basic.py --backend e2b\nuv run python examples/sandbox/basic.py --backend e2b_code_interpreter\n```\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n### Blaxel Setup\n\n```bash\n# Install Blaxel extra\nuv sync --extra blaxel\n\n# Set environment variables\nexport OPENAI_API_KEY=\"...\"\nexport BL_API_KEY=\"...\"\nexport BL_WORKSPACE=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/blaxel_runner.py --stream\n```\n\n**Useful Flags:**\n\n| Flag | Description |\n|------|-------------|\n| `--image blaxel/py-app` | Container image |\n| `--region us-pdx-1` | Deployment region |\n| `--memory 4096` | Memory allocation (MB) |\n| `--ttl 1h` | Session time-to-live |\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n### Modal Setup\n\n```bash\n# Install Modal extra\nuv sync --extra modal\n\n# Authenticate\nuv run modal token set --token-id <token-id> --token-secret <token-secret>\n\n# Or use environment variables\nexport MODAL_TOKEN_ID=\"...\"\nexport MODAL_TOKEN_SECRET=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/modal_runner.py \\\n  --app-name openai-agents-python-sandbox-example \\\n  --stream\n```\n\n**Useful Flags:**\n\n| Flag | Description |\n|------|-------------|\n| `--workspace-persistence tar` | Workspace persistence mode |\n| `--sandbox-create-timeout-s 60` | Sandbox creation timeout |\n| `--runtime node22` | Runtime environment |\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n### Daytona Setup\n\n```bash\n# Install Daytona extra\nuv sync --extra daytona\n\n# Set API key\nexport OPENAI_API_KEY=\"...\"\nexport DAYTONA_API_KEY=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/daytona/daytona_runner.py --stream\n```\n\n### Vercel Setup\n\n```bash\n# Install Vercel extra\nuv sync --extra vercel\n\n# Option 1: OIDC token (recommended)\nexport OPENAI_API_KEY=\"...\"\nexport VERCEL_OIDC_TOKEN=\"...\"\n\n# Option 2: Explicit tokens\nexport OPENAI_API_KEY=\"...\"\nexport VERCEL_TOKEN=\"...\"\nexport VERCEL_PROJECT_ID=\"...\"\nexport VERCEL_TEAM_ID=\"...\"\n```\n\n**Run Example:**\n\n```bash\nuv run python examples/sandbox/extensions/vercel_runner.py --stream\n```\n\n### Runloop Setup\n\n```bash\n# Install Runloop extra\nuv sync --extra runloop\n\n# Sign up at platform.runloop.ai\n```\n\n资料来源：[examples/sandbox/extensions/README.md]()\n\n---\n\n## Sandbox Basic Examples\n\n### Minimal Sandbox Setup\n\n```python\nfrom agents.sandbox import SandboxAgent, SandboxSession\nfrom agents.sandbox.backends.e2b import E2BBackend\n\n# Create backend\nbackend = E2BBackend(api_key=\"e2b_...\")\n\n# Create sandbox session\nsession = SandboxSession(backend=backend)\n\n# Run agent\nagent = SandboxAgent(\n    name=\"code_assistant\",\n    instructions=\"Execute Python code in the sandbox.\",\n)\n\nresult = await Runner.run(agent, \"Print hello world\", session=session)\n```\n\n资料来源：[examples/sandbox/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/README.md)\n\n### Available Sandbox Examples\n\n| Example | File | Description |\n|---------|------|-------------|\n| Basic sandbox | `examples/sandbox/basic.py` | Create session, run agent, stream results |\n| Handoffs | `examples/sandbox/handoffs.py` | Agent handoffs with sandbox backends |\n| Workspace capabilities | `examples/sandbox/sandbox_agent_capabilities.py` | Configure workspace access |\n| Combined tools | `examples/sandbox/sandbox_agent_with_tools.py` | Sandbox + host-defined tools |\n| Agents as tools | `examples/sandbox/sandbox_agents_as_tools.py` | Expose sandbox agents as tools |\n| Remote snapshots | `examples/sandbox/sandbox_agent_with_remote_snapshot.py` | Start from saved snapshots |\n\n**Run Commands:**\n\n```bash\nuv run python examples/sandbox/basic.py\nuv run python examples/sandbox/handoffs.py\nuv run python examples/sandbox/sandbox_agent_capabilities.py\n```\n\n---\n\n## Model Provider Configuration\n\n### OpenRouter (Default for Examples)\n\n```bash\nexport OPENROUTER_API_KEY=\"...\"\n```\n\n```python\nfrom agents import Agent, Runner, RunConfig\n\nconfig = RunConfig(\n    model=\"openrouter/openai/gpt-4o-mini\",\n)\n\nresult = await Runner.run(agent, \"Hello\", run_config=config)\n```\n\n### LiteLLM Provider\n\n```bash\nuv sync --extra litellm\n```\n\n```python\nfrom agents.model_providers.litellm_provider import LiteLLMProvider\n\nprovider = LiteLLMProvider(model=\"gpt-4o-mini\")\n```\n\n### Any-LLM Provider\n\n```bash\nuv sync --extra any-llm\n```\n\n```python\nfrom agents.model_providers.any_llm_provider import AnyLLMProvider\n\nprovider = AnyLLMProvider(model=\"gpt-4o-mini\")\n```\n\n**Run Examples:**\n\n```bash\nuv run examples/model_providers/litellm_provider.py\nuv run examples/model_providers/litellm_auto.py\nuv run examples/model_providers/any_llm_provider.py\nuv run examples/model_providers/any_llm_auto.py\n```\n\n资料来源：[examples/model_providers/README.md]()\n\n---\n\n## Example Project Setup\n\n### Healthcare Support Example\n\n```bash\n# List available scenarios\nuv run python examples/sandbox/healthcare_support/main.py --list-scenarios\n\n# Run specific scenario\nuv run python examples/sandbox/healthcare_support/main.py --scenario blue_cross_pt_benefits\nuv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n\n# Reset memory state\nuv run python examples/sandbox/healthcare_support/main.py --reset-memory\n```\n\n**For unattended runs:**\n\n```bash\nEXAMPLES_INTERACTIVE_MODE=auto uv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n```\n\n资料来源：[examples/sandbox/healthcare_support/README.md]()\n\n### Research Bot Example\n\n```bash\npython -m examples.research_bot.main\n```\n\n资料来源：[examples/research_bot/README.md]()\n\n---\n\n## Temporal Integration Setup\n\nFor workflow-based sandbox management:\n\n```bash\n# Install Temporal extra\nuv sync --extra temporal\n\n# Install Temporal CLI and just\n# Start dev server\njust temporal\n\n# In separate terminals\njust worker  # Start worker\njust tui     # Start TUI\n```\n\n**TUI Commands:**\n\n| Command | Description |\n|---------|-------------|\n| `/switch` | Switch to different sandbox backend |\n| `/fork [title]` | Fork session to different backend |\n| `/title <name>` | Rename current session |\n\n资料来源：[examples/sandbox/extensions/temporal/README.md]()\n\n---\n\n## Environment Configuration Files\n\n### Repository Root `.env`\n\nPlace a `.env` file at the repository root:\n\n```\nOPENAI_API_KEY=\"sk-...\"\n```\n\n### Example-Specific `.env`\n\nSome examples support their own `.env` files:\n\n```\n# examples/sandbox/extensions/temporal/.env\nOPENAI_API_KEY=\"sk-...\"\nDAYTONA_API_KEY=\"dtn_...\"\nE2B_API_KEY=\"e2b_...\"\n```\n\n---\n\n## Troubleshooting Setup Issues\n\n### Common Issues\n\n| Issue | Solution |\n|-------|----------|\n| Missing API key | Set `OPENAI_API_KEY` environment variable |\n| Backend connection failed | Verify backend API key and network access |\n| Import errors | Run `uv sync` to install all dependencies |\n| Sandbox timeout | Increase `--sandbox-create-timeout-s` parameter |\n\n### Verify Installation\n\n```python\nimport agents\nprint(agents.__version__)\n```\n\n### Check Backend Configuration\n\n```python\nfrom agents.sandbox.backends.e2b import E2BBackend\n\nbackend = E2BBackend()\n# Check if backend is properly configured\n```\n\n---\n\n## Next Steps\n\nAfter completing installation and setup:\n\n1. **Quick Start** - Run `examples/sandbox/basic.py` to verify sandbox functionality\n2. **Agent Development** - Create your first agent with custom instructions\n3. **Tool Integration** - Add custom tools to extend agent capabilities\n4. **Multi-Agent Systems** - Implement agent handoffs and orchestration\n\n---\n\n<a id='examples-index'></a>\n\n## Examples Index\n\n### 相关页面\n\n相关主题：[OpenAI Agents SDK Overview](#overview), [Agents](#agents)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [examples/basic/hello_world.py](https://github.com/openai/openai-agents-python/blob/main/examples/basic/hello_world.py)\n- [examples/agent_patterns/agents_as_tools.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools.py)\n- [examples/sandbox/basic.py](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/basic.py)\n- [examples/voice/streamed/main.py](https://github.com/openai/openai-agents-python/blob/main/examples/voice/streamed/main.py)\n- [examples/financial_research_agent/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/financial_research_agent/README.md)\n- [examples/research_bot/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/research_bot/README.md)\n- [examples/sandbox/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/README.md)\n- [examples/mcp/streamable_http_remote_example/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/mcp/streamable_http_remote_example/README.md)\n- [examples/model_providers/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/model_providers/README.md)\n- [examples/sandbox/extensions/README.md](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/extensions/README.md)\n</details>\n\n# Examples Index\n\n## Overview\n\nThe Examples Index serves as a comprehensive guide to the sample applications and demonstrations provided in the openai-agents-python repository. These examples are designed to showcase the capabilities of the Agents SDK across various use cases, from basic agent interactions to complex multi-agent workflows involving sandboxed execution environments, voice interfaces, and external tool integrations.\n\nThe examples directory structure organizes demonstrations by functional category, allowing developers to quickly locate relevant implementations for their specific requirements. Each example is designed to be runnable with minimal configuration, serving as both documentation and a starting point for custom implementations.\n\n## Example Categories\n\n### Basic Examples\n\nThe basic examples provide the foundational patterns for building agents with the SDK. These examples demonstrate core concepts with minimal complexity.\n\n| Example | File | Purpose |\n|---------|------|---------|\n| Hello World | `examples/basic/hello_world.py` | Simple agent that responds to user input |\n| Agent as Tool | `examples/agent_patterns/agents_as_tools.py` | Demonstrates wrapping agents as tools for other agents |\n\n资料来源：[examples/basic/hello_world.py](examples/basic/hello_world.py)\n资料来源：[examples/agent_patterns/agents_as_tools.py](examples/agent_patterns/agents_as_tools.py)\n\n### Sandbox Examples\n\nSandbox examples demonstrate the isolated workspace capabilities of the Agents SDK, enabling agents to execute code and manipulate files in a secure environment.\n\n#### Small API Examples\n\n| Example | Command | Description |\n|---------|---------|-------------|\n| Basic Sandbox | `uv run python examples/sandbox/basic.py` | Creates a sandbox session from a manifest, runs a `SandboxAgent`, and streams the result |\n| Handoffs | `uv run python examples/sandbox/handoffs.py` | Uses handoffs with sandbox-backed agents |\n| Workspace Capabilities | `uv run python examples/sandbox/sandbox_agent_capabilities.py` | Configures a sandbox agent with workspace capabilities |\n| Sandbox with Tools | `uv run python examples/sandbox/sandbox_agent_with_tools.py` | Combines sandbox capabilities with host-defined tools |\n| Agents as Tools | `uv run python examples/sandbox/sandbox_agents_as_tools.py` | Exposes sandbox agents as tools for another agent |\n| Remote Snapshot | `uv run python examples/sandbox/sandbox_agent_with_remote_snapshot.py` | Starts from a remote snapshot |\n\n资料来源：[examples/sandbox/README.md:1-20](examples/sandbox/README.md)\n\n#### Sandbox Extensions\n\nSandbox extensions provide integrations with various cloud sandbox providers:\n\n| Provider | Setup Command | Run Command |\n|----------|---------------|-------------|\n| E2B | `uv sync --extra e2b` | `uv run python examples/sandbox/basic.py --backend e2b` |\n| Modal | `uv sync --extra modal` | `uv run python examples/sandbox/extensions/modal_runner.py --stream` |\n| Blaxel | `uv sync --extra blaxel` | `uv run python examples/sandbox/extensions/blaxel_runner.py --stream` |\n| Vercel | `uv sync --extra vercel` | `uv run python/examples/sandbox/extensions/vercel_runner.py --stream` |\n| Daytona | `uv sync --extra daytona` | `uv run python examples/sandbox/extensions/daytona/daytona_runner.py --stream` |\n| Runloop | `uv sync --extra runloop` | Platform-specific setup |\n| Temporal | Temporal CLI + just | `just worker` / `just tui` |\n\n资料来源：[examples/sandbox/extensions/README.md](examples/sandbox/extensions/README.md)\n\n### Multi-Agent Research Examples\n\n#### Research Bot\n\nThe research bot demonstrates a multi-agent system where agents collaborate to perform web research and synthesize findings into reports.\n\n**Architecture Flow:**\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Planner Agent]\n    B --> C[Generate Search Queries]\n    C --> D[Search Agent 1]\n    C --> E[Search Agent 2]\n    C --> F[Search Agent N]\n    D --> G[Parallel Execution]\n    E --> G\n    F --> G\n    G --> H[Writer Agent]\n    H --> I[Final Report]\n```\n\n**Key Components:**\n\n- **Planner Agent**: Creates a research plan with search terms and rationale\n- **Search Agent**: Uses Web Search tool to search and summarize results\n- **Writer Agent**: Synthesizes summaries into a long-form markdown report\n\n资料来源：[examples/research_bot/README.md](examples/research_bot/README.md)\n\n#### Financial Research Agent\n\nThe financial research agent demonstrates domain-specific research capabilities with access to specialized analysis tools.\n\n**Agent Configuration:**\n\n```\nYou are a senior financial analyst. You will be provided with the original query\nand a set of raw search summaries. Your job is to synthesize these into a\nlong‑form markdown report with a short executive summary.\n```\n\n**Available Tools:**\n- `fundamentals_analysis` - Specialist write-up for fundamental analysis\n- `risk_analysis` - Specialist write-up for risk assessment\n\n资料来源：[examples/financial_research_agent/README.md](examples/financial_research_agent/README.md)\n\n### Healthcare Support Example\n\nA demonstration workflow that combines sandbox execution with human-in-the-loop approvals for healthcare-related tasks.\n\n**Workflow Components:**\n\n- **Orchestrator Agent**: Coordinates the overall workflow\n- **Benefits Subagent**: Handles benefits-related queries\n- **Sandbox Policy Agent**: Executes policy validation in sandbox\n- **Memory Recap Agent**: Maintains conversation context\n\n**Key Files:**\n\n| File | Purpose |\n|------|---------|\n| `main.py` | Standalone CLI demo runner |\n| `workflow.py` | Shared workflow execution logic, sandbox setup, artifact copying, tracing |\n| `support_agents.py` | Agent definitions |\n| `tools.py` | Local lookup tools and approval-gated human handoff |\n| `skills/prior-auth-packet-builder/SKILL.md` | Sandbox skill definition |\n\n**Available Scenarios:**\n\n```bash\nuv run python examples/sandbox/healthcare_support/main.py --list-scenarios\nuv run python examples/sandbox/healthcare_support/main.py --scenario blue_cross_pt_benefits\nuv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n```\n\n资料来源：[examples/sandbox/healthcare_support/README.md](examples/sandbox/healthcare_support/README.md)\n\n### Voice Examples\n\nVoice examples demonstrate real-time audio interaction capabilities with agents.\n\n**Architecture:**\n\n```mermaid\ngraph LR\n    A[Audio Input] --> B[Voice Agent]\n    B --> C[Streaming Response]\n    C --> D[Audio Output]\n    B --> E[Tool Calls]\n    E --> F[External Services]\n```\n\n**Run Command:**\n\n```bash\nuv run python examples/voice/streamed/main.py\n```\n\n资料来源：[examples/voice/streamed/main.py](examples/voice/streamed/main.py)\n\n### MCP Examples\n\nModel Context Protocol (MCP) examples demonstrate integration with external MCP servers for extended tool capabilities.\n\n#### Streamable HTTP Remote Example\n\nConnects to DeepWiki over the Streamable HTTP transport to leverage external tools.\n\n**Run Command:**\n\n```bash\nuv run python examples/mcp/streamable_http_remote_example/main.py\n```\n\n**Prerequisites:**\n- `OPENAI_API_KEY` set for model calls\n\n资料来源：[examples/mcp/streamable_http_remote_example/README.md](examples/mcp/streamable_http_remote_example/README.md)\n\n### Model Provider Examples\n\nModel provider examples demonstrate routing models through adapter layers for flexibility in model selection.\n\n| Adapter | Direct Run | Auto Mode |\n|---------|------------|-----------|\n| any-llm | `uv run examples/model_providers/any_llm_provider.py` | `uv run examples/model_providers/any_llm_auto.py` |\n| LiteLLM | `uv run examples/model_providers/litellm_provider.py` | `uv run examples/model_providers/litellm_auto.py` |\n\n**Model Override:**\n\n```bash\nuv run examples/model_providers/any_llm_provider.py --model openrouter/openai/gpt-5.4-mini\n```\n\n资料来源：[examples/model_providers/README.md](examples/model_providers/README.md)\n\n## Common Configuration\n\n### Environment Variables\n\nMost examples require the `OPENAI_API_KEY` environment variable. Configure it in one of these locations:\n\n1. Repository-root `.env` file\n2. Example's local `.env` file\n3. Shell environment\n\n### Running with uv\n\nThe project uses `uv` for dependency management. Run examples with:\n\n```bash\nuv run python <path-to-example>\n```\n\n### Interactive Mode\n\nFor examples with prompts, set `EXAMPLES_INTERACTIVE_MODE=auto` to auto-answer:\n\n```bash\nEXAMPLES_INTERACTIVE_MODE=auto uv run python examples/sandbox/healthcare_support/main.py --scenario messy_ambiguous_knee_case\n```\n\n## Example Selection Guide\n\n```mermaid\ngraph TD\n    A[Use Case] --> B{Basic Interaction?}\n    B -->|Yes| C[Basic Examples]\n    B -->|No| D{Multi-Agent Workflow?}\n    D -->|Yes| E{Research Domain?}\n    D -->|No| F{Sandbox Required?}\n    E -->|Financial| G[Financial Research Agent]\n    E -->|General| H[Research Bot]\n    F -->|Yes| I{Specialized Provider?}\n    F -->|No| J[Agent Patterns]\n    I -->|E2B| K[E2B Examples]\n    I -->|Modal| L[Modal Examples]\n    I -->|Vercel| M[Vercel Examples]\n    I -->|Daytona| N[Daytona Examples]\n    I -->|Blaxel| O[Blaxel Examples]\n```\n\n## Sandbox Backend Comparison\n\n| Backend | Interface | Workspace Persistence | Cloud Support |\n|---------|-----------|----------------------|---------------|\n| E2B | Bash-style | Snapshot files | Yes |\n| Modal | Bash-style | Tar, snapshot files/directory | Yes |\n| Blaxel | Bash-style + PTY | Drive mount, cloud buckets | Yes (S3, R2, GCS) |\n| Vercel | Command execution | Tar, snapshot | Yes |\n| Daytona | Bash-style | Yes | Yes |\n| Runloop | TBD | Yes | Yes |\n\n资料来源：[examples/sandbox/extensions/README.md](examples/sandbox/extensions/README.md)\n\n---\n\n<a id='agents'></a>\n\n## Agents\n\n### 相关页面\n\n相关主题：[Tools](#tools), [Handoffs](#handoffs), [Guardrails](#guardrails), [Run Loop and Execution](#run-loop)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/agent.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent.py)\n- [src/agents/lifecycle.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/lifecycle.py)\n- [src/agents/agent_output.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent_output.py)\n- [src/agents/items.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/items.py)\n- [src/agents/function_schema.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/function_schema.py)\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/run_internal/turn_resolution.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/turn_resolution.py)\n- [src/agents/extensions/visualization.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/visualization.py)\n- [src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n- [src/agents/handoffs/history.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/history.py)\n</details>\n\n# Agents\n\n## Overview\n\nAgents are the core execution units in the OpenAI Agents SDK. An agent encapsulates an LLM with instructions, tools, handoffs, and guardrails that enable autonomous task completion. Agents process user inputs, make decisions about tool usage, transfer control to other agents, and generate responses.\n\nThe agent system provides a structured approach to building AI-powered applications by separating concerns between orchestration, tool execution, and response generation. Agents can be composed hierarchically, where one agent can delegate tasks to sub-agents or hand off control entirely to specialized agents.\n\n## Architecture\n\n### Agent Core Components\n\nAn agent consists of several interconnected components that work together to process requests and generate responses.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Agent]\n    B --> C[Instructions/Prompt]\n    B --> D[Tools]\n    B --> E[Handoffs]\n    B --> F[Guardrails]\n    C --> G[LLM Decision Engine]\n    D --> H[Tool Execution]\n    E --> I[Agent Transfer]\n    G --> J[Response/Action]\n    H --> J\n    I --> K[Target Agent]\n    K --> G\n```\n\n### Agent Types\n\n| Type | Description | Use Case |\n|------|-------------|----------|\n| `Agent[TContext]` | Base agent type with generic context | General purpose agents |\n| `SandboxAgent` | Agent with isolated workspace | Code execution, file operations |\n| `FunctionAgent` | Agent for function/tool orchestration | Tool-heavy workflows |\n\n### Source File Organization\n\n| File | Purpose |\n|------|---------|\n| `src/agents/agent.py` | Core agent class definition |\n| `src/agents/lifecycle.py` | Agent lifecycle management |\n| `src/agents/agent_output.py` | Output types and responses |\n| `src/agents/items.py` | Run item definitions and helpers |\n| `src/agents/function_schema.py` | Tool schema generation |\n\n## Agent Lifecycle\n\nAgents follow a defined lifecycle from initialization through execution to completion or handoff.\n\n```mermaid\nstateDiagram-v2\n    [*] --> Initialized: Agent Created\n    Initialized --> Running: Input Received\n    Running --> ToolExecution: Tool Call\n    ToolExecution --> Running: Tool Result\n    Running --> Handoff: Transfer Request\n    Handoff --> [*]: Complete\n    Running --> Response: Final Output\n    Response --> [*]: Complete\n    Handoff --> Running: New Agent\n```\n\n### Lifecycle States\n\n| State | Description | Entry Condition |\n|-------|-------------|-----------------|\n| `Initialized` | Agent created but not yet processing | Object instantiation |\n| `Running` | Actively processing input | `run()` or `run_sync()` called |\n| `ToolExecution` | Executing one or more tools | LLM requests tool call |\n| `Handoff` | Transferring to another agent | LLM triggers handoff |\n| `Response` | Generating final response | No more actions needed |\n\n资料来源：[src/agents/lifecycle.py:1-50]()\n\n### Turn Resolution\n\nThe turn resolution process handles the core agent loop. Each turn processes input and determines next actions.\n\n```mermaid\nsequenceDiagram\n    participant U as User\n    participant R as Runner\n    participant A as Agent\n    participant T as Tools\n    participant H as Handoffs\n    \n    U->>R: User Input\n    R->>A: Process Turn\n    A->>T: Tool Calls?\n    T-->>A: Results\n    A->>H: Handoff?\n    H-->>A: New Agent\n    A->>R: Response\n    R-->>U: Output\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:1-80]()\n\n## Run Items\n\nRun items represent the atomic units of work within an agent execution. They capture messages, tool calls, tool results, and handoffs.\n\n### Item Types\n\n| Type | Description | Source |\n|------|-------------|--------|\n| `MessageOutputItem` | LLM generated message | `src/agents/items.py:30-60` |\n| `ToolCallItem` | Tool invocation request | `src/agents/items.py:61-90` |\n| `ToolCallOutputItem` | Tool execution result | `src/agents/items.py:91-120` |\n| `HandoffItem` | Agent transfer | `src/agents/items.py:121-150` |\n| `ToolApprovalItem` | Human approval for tools | `src/agents/handoffs/history.py:50-70` |\n\n### Message Extraction\n\nThe `ItemHelpers` class provides utilities for extracting content from run items:\n\n```python\n# Extract text from message output\ntext = ItemHelpers.text_message_output(message_item)\n\n# Extract refusal if present\nrefusal = ItemHelpers.extract_refusal(message.raw_item)\n\n# Convert string to input list\ninput_list = ItemHelpers.input_to_new_input_list(\"user message\")\n```\n\n资料来源：[src/agents/items.py:40-75]()\n\n## Handoffs\n\nHandoffs enable agent-to-agent transfer, allowing specialized agents to handle specific tasks.\n\n### Handoff Configuration\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `agent` | `Agent` | Target agent |\n| `tool_name_override` | `str` | Override for handoff tool name |\n| `tool_description_override` | `str` | Override for handoff description |\n| `on_handoff` | `Callable` | Callback when handoff occurs |\n| `input_type` | `Type` | Type validation for handoff input |\n| `input_filter` | `Callable` | Filter inputs passed to next agent |\n| `is_enabled` | `bool \\| Callable` | Enable/disable handoff |\n\n资料来源：[src/agents/handoffs/__init__.py:30-80]()\n\n### Handoff History Management\n\nWhen an agent hands off to another, the conversation history is summarized to maintain context:\n\n```python\n# Nested history processing\nnested_history = nest_handoff_history(\n    handoff_input_data,\n    history_mapper=custom_mapper\n)\n```\n\nThe history wrapper markers default to `<CONVERSATION HISTORY>` tags but can be customized:\n\n```python\n# Customize history markers\nset_conversation_history_wrappers(\n    start=\"<PREVIOUS_CONTEXT>\",\n    end=\"</PREVIOUS_CONTEXT>\"\n)\n```\n\n资料来源：[src/agents/handoffs/history.py:20-60]()\n\n## Tools and Function Schema\n\nTools extend agent capabilities by providing functions the LLM can call.\n\n### Function Schema Generation\n\nThe `FunctionSchema` class converts Python functions into OpenAI-compatible tool schemas:\n\n```python\nschema = FunctionSchema.from_fn(my_function)\ntool_definition = schema.to_tool_definition()\n```\n\n### Tool Definition Structure\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `name` | `str` | Tool identifier |\n| `description` | `str` | Human-readable description |\n| `parameters` | `dict` | JSON schema for parameters |\n| `strict` | `bool` | Enable strict parameter validation |\n\n资料来源：[src/agents/function_schema.py:1-50]()\n\n## Agent Visualization\n\nThe SDK provides DOT-format visualization for agent graphs:\n\n```mermaid\ngraph TD\n    subgraph AgentGraph\n        A[\"User Input\"] --> B[\"Agent\"]\n        B --> C[\"Tool: search\"]\n        B --> D[\"Tool: calculate\"]\n        B --> E[\"Handoff: specialist\"]\n        E --> F[\"Specialist Agent\"]\n    end\n```\n\n### Graph Components\n\n| Component | Shape | Color | Description |\n|-----------|-------|-------|-------------|\n| Start | Ellipse | lightblue | Entry point |\n| Agent | Box | lightyellow | Agent nodes |\n| Tool | Ellipse | lightgreen | Tool definitions |\n| Handoff | Box | lightgrey | Agent transfer points |\n| End | Ellipse | lightblue | Exit point |\n\n资料来源：[src/agents/extensions/visualization.py:1-60]()\n\n## Agent Output\n\nAgent execution produces structured output containing messages, tool calls, and metadata.\n\n### Output Structure\n\n```python\n@dataclass\nclass AgentOutput:\n    messages: list[MessageOutputItem]\n    tool_calls: list[ToolCallItem]\n    tool_results: list[ToolCallOutputItem]\n    handoffs: list[HandoffItem]\n    final_response: str | None\n```\n\n资料来源：[src/agents/agent_output.py:1-40]()\n\n### Response Finalization\n\nAfter tool execution, the system finalizes responses:\n\n```python\ntool_final_output = await _maybe_finalize_from_tool_results(\n    public_agent=agent,\n    original_input=input,\n    new_response=response,\n    pre_step_items=pre_items,\n    new_step_items=new_items,\n    function_results=results\n)\n```\n\nRefusals are extracted and converted to errors:\n\n```python\nrefusal = ItemHelpers.extract_refusal(message_item.raw_item)\nif refusal:\n    raise ModelRefusalError(refusal)\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:80-120]()\n\n## Runner Integration\n\nThe `Runner` class orchestrates agent execution, managing the turn loop and state transitions.\n\n### Run Configuration\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `max_turns` | `int` | `10` | Maximum conversation turns |\n| `max_tools` | `int` | `100` | Maximum tool calls |\n| `context_length` | `int` | Model dependent | Context window size |\n| `tool_choice` | `str` | `\"auto\"` | Tool selection strategy |\n\n### State Management\n\nThe runner maintains `RunState` throughout execution:\n\n```python\nrun_state = RunState(\n    current_agent=agent,\n    model_response=response,\n    generated_items=items,\n    run_config=config\n)\n```\n\n资料来源：[src/agents/run.py:100-180]()\n\n## Error Handling\n\n### Model Refusal\n\nWhen the LLM refuses to respond, a `ModelRefusalError` is raised:\n\n```python\nif refusal:\n    refusal_error = ModelRefusalError(refusal)\n    run_error_data = build_run_error_data(...)\n```\n\n### Tool Activity Tracking\n\nThe system tracks tool usage even when no messages are generated:\n\n```python\nhas_tool_activity_without_message = not message_items and bool(\n    processed_response.tools_used\n)\n```\n\n## Multi-Agent Patterns\n\n### Hierarchical Agents\n\n```mermaid\ngraph TD\n    O[Orchestrator] --> S[Search Agent]\n    O --> A[Analysis Agent]\n    O --> W[Writer Agent]\n    S --> R[Research Results]\n    A --> R\n    A --> D[Data Insights]\n    W --> R\n    W --> D\n```\n\n### Parallel Execution\n\nAgents can execute in parallel for independent tasks:\n\n```python\n# Multiple search agents running concurrently\nsearch_tasks = [search_agent.run(query) for query in queries]\nresults = await asyncio.gather(*search_tasks)\n```\n\n## Best Practices\n\n1. **Context Management**: Use generic `Agent[TContext]` with custom context classes for type safety\n2. **Handoff Design**: Create focused agents with clear responsibilities and minimal handoffs\n3. **Tool Organization**: Group related tools into toolkits for better organization\n4. **History Filtering**: Use `input_filter` in handoffs to prevent context overflow\n5. **Error Handling**: Always handle `ModelRefusalError` and tool execution failures\n\n## Related Components\n\n| Component | File | Relationship |\n|-----------|------|--------------|\n| MCP Server | `src/agents/mcp/server.py` | Provides external tool access |\n| Guardrails | `src/agents/guardrails.py` | Input/output validation |\n| Streaming | `src/agents/streaming.py` | Real-time output |\n| Tracing | `src/agents/tracing.py` | Execution monitoring |\n\n---\n\n<a id='tools'></a>\n\n## Tools\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Guardrails](#guardrails)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/tool.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/tool.py)\n- [src/agents/tool_context.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/tool_context.py)\n- [src/agents/agent_tool_state.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent_tool_state.py)\n- [src/agents/editor.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/editor.py)\n- [src/agents/computer.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/computer.py)\n- [src/agents/apply_diff.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/apply_diff.py)\n</details>\n\n# Tools\n\n## Overview\n\nTools in the OpenAI Agents Python SDK enable AI agents to interact with external systems, execute code, manipulate files, and perform actions in isolated environments. The tools system provides a structured way for agents to extend their capabilities beyond pure text generation by calling functions, accessing resources, and performing complex operations.\n\nThe SDK implements a tool abstraction that wraps callable functions with metadata, descriptions, and execution logic. When an agent decides to use a tool, the SDK handles the invocation, manages the context, processes results, and returns responses to the agent for further processing.\n\n工具系统支持多种工具类型，从简单的函数调用到复杂的沙箱执行环境。工具可以在初始化时配置各种选项，包括名称、描述、参数模式等，并且可以与代理的批准机制和防护栏系统集成。\n\n## Core Tool Architecture\n\n### Tool Base Class\n\nThe foundation of the tools system is the `Tool` class defined in `src/agents/tool.py`. This abstract base class defines the interface that all tools must implement, ensuring consistent behavior across different tool types.\n\n```mermaid\ngraph TD\n    A[Tool Base Class] --> B[FunctionTool]\n    A --> C[FileSearchTool]\n    A --> D[ComputerTool]\n    A --> E[WebSearchTool]\n    A --> F[Sandbox Agent Tools]\n```\n\nEach tool implementation must provide:\n- A unique name identifier\n- A description for the LLM to understand tool purpose\n- Parameter schema for function calling\n- Execution logic in an `invoke` or `acall` method\n\n### Tool Interface\n\nThe tool interface follows a standard pattern where each tool is defined with metadata that allows the LLM to understand when and how to use it. Tools can be synchronous or asynchronous, supporting both simple function calls and complex operations that require I/O operations.\n\n工具的关键属性包括：\n\n| Property | Type | Description |\n|----------|------|-------------|\n| `name` | `str` | Unique identifier for the tool |\n| `description` | `str` | Natural language description for LLM |\n| `parameters` | `dict` | JSON Schema for tool arguments |\n| `strict` | `bool` | Whether to enforce parameter validation |\n\n资料来源：[src/agents/tool.py:1-50]()\n\n## Built-in Tool Types\n\n### FunctionTool\n\n`FunctionTool` is the most common tool type, wrapping a Python function with tool metadata. It allows developers to expose arbitrary Python functions as tools that agents can call.\n\n```python\nfrom agents import FunctionTool\n\ndef calculate_budget(items: list[str]) -> float:\n    # Implementation\n    return total\n\nbudget_tool = FunctionTool(\n    name=\"calculate_budget\",\n    description=\"Calculate the total budget for a list of items\",\n    params_json_schema={...},\n    handle_invoke=calculate_budget\n)\n```\n\n### File and Editor Tools\n\nThe SDK provides specialized tools for file operations. The `FileSearchTool` enables searching through file contents, while editor tools provide controlled file manipulation capabilities.\n\n资料来源：[src/agents/editor.py:1-100]()\n\n#### Editor Tool Capabilities\n\n| Operation | Description |\n|-----------|-------------|\n| `read` | Read file contents |\n| `write` | Write content to files |\n| `edit` | Modify existing files |\n| `glob` | Find files by pattern |\n| `ls` | List directory contents |\n| `mv` | Move/rename files |\n| `rm` | Delete files |\n\n### Computer Tool\n\nThe `ComputerTool` enables agents to interact with a virtualized computer environment. This is particularly useful for tasks requiring UI automation, screenshot analysis, and keyboard/mouse control.\n\n资料来源：[src/agents/computer.py:1-100]()\n\nThe Computer Tool provides:\n\n- **Screen Capture**: Take screenshots of the virtual display\n- **Mouse Control**: Move cursor, click, scroll operations\n- **Keyboard Control**: Type text, press keys and key combinations\n- **Process Management**: Launch and interact with applications\n\n```mermaid\ngraph LR\n    A[Agent Decision] --> B[Computer Tool Action]\n    B --> C{Screen Capture?}\n    C -->|Yes| D[Screenshot Analysis]\n    C -->|No| E[Execute Action]\n    D --> F[Observation Result]\n    E --> G[Action Result]\n    F --> H[Agent Processing]\n    G --> H\n```\n\n### ApplyDiff Tool\n\nThe `ApplyDiff` tool provides efficient file modification capabilities using diff-based operations. Instead of replacing entire files, it applies targeted changes, making it more efficient for large files and reducing the risk of unintended modifications.\n\n资料来源：[src/agents/apply_diff.py:1-100]()\n\n## Tool Context and State Management\n\n### Tool Context\n\nTool context (`tool_context`) provides runtime information to tools during execution. It encapsulates the current run state, session information, and access to shared resources.\n\n资料来源：[src/agents/tool_context.py:1-100]()\n\n```mermaid\ngraph TD\n    A[Tool Execution] --> B[ToolContext]\n    B --> C[RunContext]\n    B --> D[Session]\n    B --> E[Store Settings]\n    C --> F[Current Agent]\n    C --> G[User Context]\n```\n\n### Agent Tool State\n\nThe `AgentToolState` manages tool-related state within an agent's execution context. This includes tracking tool usage, maintaining state across tool calls, and managing tool-specific configurations.\n\n资料来源：[src/agents/agent_tool_state.py:1-100]()\n\nKey responsibilities include:\n- Tracking which tools have been invoked\n- Maintaining state between sequential tool calls\n- Managing tool-specific configuration options\n- Handling tool result caching when appropriate\n\n## Tool Configuration\n\n### Tool Parameters\n\nTools are configured with JSON Schema definitions that describe their expected parameters. This schema serves dual purposes:\n\n1. **LLM Understanding**: Helps the model generate correct tool calls\n2. **Validation**: Ensures incoming parameters meet requirements\n\n```python\nparams_json_schema = {\n    \"type\": \"object\",\n    \"properties\": {\n        \"query\": {\n            \"type\": \"string\",\n            \"description\": \"Search query string\"\n        },\n        \"limit\": {\n            \"type\": \"integer\",\n            \"description\": \"Maximum results to return\",\n            \"default\": 10\n        }\n    },\n    \"required\": [\"query\"]\n}\n```\n\n### Tool Options\n\n| Option | Description | Default |\n|--------|-------------|---------|\n| `name` | Tool identifier | Function name |\n| `description` | LLM-facing description | Docstring |\n| `params_json_schema` | Parameter schema | Auto-generated |\n| `strict` | Enforce schema strictly | `False` |\n| `require_approval` | Require human approval | `None` |\n\n## Tool Guardrails\n\n### Input Guardrails\n\nInput guardrails validate tool parameters before execution. They provide an opportunity to inspect, modify, or reject tool calls based on custom logic.\n\n```python\nasync def validate_search_params(\n    ctx: RunContextWrapper,\n    tool: MCPTool,\n    params: dict\n) -> InputGuardrailResult:\n    # Custom validation logic\n    if contains_prohibited_terms(params.get(\"query\")):\n        return InputGuardrailResult(\n            did_pass=False,\n            message=\"Query contains prohibited content\"\n        )\n    return InputGuardrailResult(did_pass=True)\n```\n\n### Output Guardrails\n\nOutput guardrails validate tool results after execution. They ensure that tool outputs meet safety, formatting, or content requirements before being returned to the agent.\n\n资料来源：[src/agents/items.py:50-100]()\n\n## Tool Filtering\n\nThe SDK supports filtering which tools are exposed to agents. This is particularly useful when:\n\n- Limiting agent capabilities for security\n- Testing specific tool behaviors\n- Implementing role-based access control\n\n资料来源：[examples/mcp/tool_filter_example/README.md]()\n\n```python\n# Static tool filter\ntool_filter = [\"filesystem_read\", \"filesystem_write\"]\n\n# Dynamic tool filter\nasync def dynamic_filter(\n    ctx: RunContextWrapper,\n    agent: Agent,\n    tool: Tool\n) -> bool:\n    return tool.name in allowed_tools\n```\n\n## Integration with Agents\n\n### Adding Tools to Agents\n\nTools are added to agents through the agent's initialization or configuration:\n\n```python\nagent = Agent(\n    name=\"research_agent\",\n    tools=[\n        web_search_tool,\n        file_search_tool,\n        custom_function_tool\n    ],\n    instructions=\"You are a research assistant...\"\n)\n```\n\n### Tool Execution Flow\n\n```mermaid\nsequenceDiagram\n    participant Agent\n    participant SDK\n    participant Tool\n    participant External\n\n    Agent->>SDK: Request tool execution\n    SDK->>Tool: Validate parameters\n    Tool->>Tool: Apply input guardrails\n    Tool->>External: Execute operation\n    External-->>Tool: Return result\n    Tool->>Tool: Apply output guardrails\n    Tool-->>SDK: Return processed result\n    SDK-->>Agent: Provide tool result\n```\n\n## Human-in-the-Loop with Tools\n\n### Approval Requirements\n\nTools can be configured to require human approval before execution. When enabled, the SDK pauses tool execution and awaits human confirmation.\n\n```python\ntool = FunctionTool(\n    name=\"send_email\",\n    handle_invoke=send_email,\n    require_approval=\"always\"\n)\n```\n\n资料来源：[src/agents/mcp/server.py:100-150]()\n\n### Approval Resume\n\nAfter human approval or rejection, the SDK resumes execution with the approval result:\n\n```python\nawait runner.resume(\n    run_id=run_id,\n    approval_result=ApprovalResult(approved=True)\n)\n```\n\n## Summary\n\nThe Tools system in the OpenAI Agents Python SDK provides a flexible, extensible framework for adding capabilities to AI agents. Key features include:\n\n- **Abstraction**: Consistent interface for diverse tool types\n- **Composition**: Tools can be combined and filtered dynamically\n- **Safety**: Built-in guardrails and approval mechanisms\n- **Context Awareness**: Runtime context enables stateful tool interactions\n- **Integration**: Seamless integration with the agent execution model\n\nBy leveraging these tools, developers can create sophisticated agents that can search the web, manipulate files, execute code, interact with computer interfaces, and integrate with external services through protocols like MCP.\n\n---\n\n<a id='guardrails'></a>\n\n## Guardrails\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Tools](#tools)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/guardrail.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/guardrail.py)\n- [src/agents/tool_guardrails.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/tool_guardrails.py)\n- [src/agents/run_internal/guardrails.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/guardrails.py)\n</details>\n\n# Guardrails\n\nGuardrails provide a security and validation layer in the agents framework, enabling developers to intercept, validate, and control both incoming inputs and outgoing outputs at various stages of agent execution. They serve as programmable checkpoints that can enforce policy compliance, prevent data leakage, block harmful content, and ensure operational safety across the entire agent runtime.\n\n## Overview\n\nThe guardrail system operates at multiple checkpoints during agent execution:\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Input Guardrails]\n    B --> C[Agent Processing]\n    C --> D[Tool Call]\n    D --> E[Tool Input Guardrails]\n    E --> F[Tool Execution]\n    F --> G[Tool Output Guardrails]\n    G --> H[Response Generation]\n    H --> I[Output Guardrails]\n    I --> J[Final Output]\n    \n    B -.->|Block/Modify| A\n    E -.->|Block/Modify| D\n    G -.->|Block/Modify| F\n    I -.->|Block/Modify| H\n```\n\nGuardrails are implemented as pluggable components that can be attached to agents, individual tools, or configured globally. Each guardrail can define one of three behavioral responses when triggered:\n\n| Behavior Type | Description |\n|---------------|-------------|\n| `raise_exception` | Throws a tripwire exception, halting execution |\n| `reject_content` | Replaces the content with a custom rejection message |\n| `filter` | Removes or sanitizes the problematic content (planned) |\n\n资料来源：[src/agents/run_internal/tool_execution.py:1-50]()\n\n## Types of Guardrails\n\n### Input Guardrails\n\nInput guardrails validate user-provided input before it reaches the agent. They receive the raw input and can inspect, modify, or reject it based on custom logic.\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Runner\n    participant InputGuardrail\n    participant Agent\n    \n    User->>Runner: User Input\n    Runner->>InputGuardrail: Run input through guardrails\n    alt Guardrail triggers\n        InputGuardrail->>Runner: GuardrailOutput with behavior\n        alt raise_exception\n            Runner-->>User: GuardrailTripwireTriggered Error\n        else reject_content\n            Runner->>Agent: Modified/Sanitized input\n        end\n    else Pass through\n        InputGuardrail->>Runner: GuardrailOutput with pass behavior\n        Runner->>Agent: Original input\n    end\n```\n\n资料来源：[src/agents/run_internal/guardrails.py:1-30]()\n\n### Tool Input Guardrails\n\nTool input guardrails validate the arguments passed to tool calls before execution. They have access to the tool context, agent information, and the raw tool arguments.\n\n```python\n@dataclass\nclass ToolInputGuardrailData:\n    context: ToolContext[Any]\n    agent: Agent[Any]\n    input: Any  # The raw tool arguments\n```\n\n资料来源：[src/agents/tool_guardrails.py:1-20]()\n\n### Tool Output Guardrails\n\nTool output guardrails validate the results returned from tool execution before those results are processed further. They can inspect, filter, or reject tool outputs.\n\n```python\n@dataclass\nclass ToolOutputGuardrailData:\n    context: ToolContext[Any]\n    agent: Agent[Any]\n    output: Any  # The raw tool result\n```\n\n资料来源：[src/agents/tool_guardrails.py:1-20]()\n\n### Output Guardrails\n\nOutput guardrails validate the agent's final response before it is returned to the user. These operate on the completed message stream and can perform final content filtering or policy checks.\n\n## GuardrailResult Structure\n\nEach guardrail execution produces a `GuardrailOutput` result that defines the subsequent action:\n\n```python\n@dataclass\nclass GuardrailOutput:\n    content_filtered: bool\n    policy_name: str\n    policy_version: str\n    content: str | None\n    behavior: dict[str, Any]\n```\n\nThe `behavior` dictionary must contain at minimum a `type` key specifying one of the supported behavior types.\n\n资料来源：[src/agents/guardrail.py:1-50]()\n\n## Configuration\n\n### Agent-Level Guardrail Configuration\n\nGuardrails can be attached directly to an agent instance:\n\n```python\nfrom agents import Agent, Guardrail\n\nagent = Agent(\n    name=\"secure_agent\",\n    instructions=\"You are a helpful assistant\",\n    input_guardrails=[\n        Guardrail(guardrail_name=\"content_filter\"),\n        Guardrail(guardrail_name=\"pii_detector\"),\n    ],\n    output_guardrails=[\n        Guardrail(guardrail_name=\"safety_check\"),\n    ],\n)\n```\n\n### Tool-Level Guardrail Configuration\n\nIndividual tools can have their own guardrails:\n\n```python\nfrom agents import function_tool, ToolInputGuardrail, ToolOutputGuardrail\n\n@function_tool(\n    tool_input_guardrails=[input_check_guardrail],\n    tool_output_guardrails=[output_check_guardrail],\n)\ndef sensitive_operation(x: str) -> str:\n    return process(x)\n```\n\n资料来源：[src/agents/tool.py:1-30]()\n\n### Guardrail Behavior Configuration\n\nGuardrails can be configured with different tripwire behaviors:\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `guardrail_name` | `str` | Unique identifier for the guardrail |\n| `on_fail` | `GuardrailFailureMode` | Behavior when triggered |\n| `error_message` | `str` | Custom error message for exceptions |\n| `log` | `bool` | Whether to log guardrail triggers |\n\n## Tracing and Observability\n\nGuardrail execution is automatically traced using the observability framework:\n\n```mermaid\ngraph LR\n    A[Guardrail Trigger] --> B[guardrail_span]\n    B --> C[Record triggered status]\n    B --> D[Capture span data]\n    D --> E[Export to trace provider]\n    \n    C -->|True| F[Mark span as triggered]\n    C -->|False| G[Continue normally]\n```\n\nThe `guardrail_span` function creates spans for monitoring:\n\n```python\ndef guardrail_span(\n    name: str,\n    triggered: bool = False,\n    span_id: str | None = None,\n    parent: Trace | Span[Any] | None = None,\n    disabled: bool = False,\n) -> Span[GuardrailSpanData]:\n```\n\n资料来源：[src/agents/tracing/create.py:1-40]()\n\n## Execution Flow\n\n### Tool Guardrail Execution\n\nTool guardrails are executed within the tool execution pipeline:\n\n```mermaid\nflowchart TD\n    A[Tool Call Invoked] --> B{Input Guardrails exist?}\n    B -->|Yes| C[Execute Input Guardrails]\n    C --> D{Any trigger raise_exception?}\n    D -->|Yes| E[Raise ToolInputGuardrailTripwireTriggered]\n    D -->|No| F{Any trigger reject_content?}\n    F -->|Yes| G[Replace input with message]\n    F -->|No| H[Execute Tool]\n    H --> I{Output Guardrails exist?}\n    I -->|Yes| J[Execute Output Guardrails]\n    J --> K{Any trigger raise_exception?}\n    K -->|Yes| L[Raise ToolOutputGuardrailTripwireTriggered]\n    K -->|No| M{Any trigger reject_content?}\n    M -->|Yes| N[Replace output with message]\n    M -->|No| O[Return result]\n```\n\n资料来源：[src/agents/run_internal/tool_execution.py:50-100]()\n\n### Guardrail Tripwire Exceptions\n\nWhen a guardrail triggers with `raise_exception` behavior, specific exception types are raised:\n\n| Exception Type | Triggered By |\n|---------------|--------------|\n| `ToolInputGuardrailTripwireTriggered` | Tool input guardrail rejection |\n| `ToolOutputGuardrailTripwireTriggered` | Tool output guardrail rejection |\n\nThese exceptions contain both the guardrail reference and the output that triggered it, enabling detailed error handling and debugging.\n\n## Implementation Pattern\n\n### Creating a Custom Guardrail\n\n```python\nfrom agents import Guardrail, RunContextWrapper\nfrom agents.guardrail import (\n    GuardrailOutput,\n    InputGuardrailOutputData,\n    OutputGuardrailOutputData,\n)\n\nasync def my_guardrail(\n    context: RunContextWrapper,\n    input_data: InputGuardrailOutputData,\n) -> GuardrailOutput:\n    text = input_data.agents_input\n    if contains_problematic_content(text):\n        return GuardrailOutput(\n            content_filtered=True,\n            policy_name=\"my_policy\",\n            policy_version=\"1.0\",\n            content=\"Content filtered due to policy violation\",\n            behavior={\"type\": \"reject_content\", \"message\": \"Content not allowed\"},\n        )\n    return GuardrailOutput(\n        content_filtered=False,\n        policy_name=\"my_policy\",\n        policy_version=\"1.0\",\n        content=None,\n        behavior={\"type\": \"pass\"},\n    )\n\nguardrail = Guardrail(\n    guardrail_name=\"my_custom_guardrail\",\n    guardrail_function=my_guardrail,\n)\n```\n\n### Using with FunctionTool\n\n```python\nfrom agents import function_tool, ToolInputGuardrail, ToolOutputGuardrail\n\n@function_tool(\n    tool_input_guardrails=[\n        ToolInputGuardrail(guardrail_function=validate_json_input),\n    ],\n    tool_output_guardrails=[\n        ToolOutputGuardrail(guardrail_function=validate_output_schema),\n    ],\n)\ndef process_data(input: str) -> dict:\n    # Tool implementation\n    pass\n```\n\n## Best Practices\n\n1. **Defense in Depth**: Layer multiple guardrails at different checkpoints for comprehensive coverage\n2. **Fail-Safe Defaults**: Configure guardrails to fail closed (reject) rather than open (pass) when uncertain\n3. **Logging**: Enable guardrail logging for security auditing and debugging\n4. **Performance**: Keep guardrail logic lightweight to avoid introducing latency\n5. **Idempotency**: Ensure guardrails produce consistent results for the same input\n\n## See Also\n\n- [Agents Overview](../agents/overview) — General agent architecture\n- [Tools](../tools/overview) — Tool implementation and configuration\n- [Tracing](../tracing/overview) — Observability and monitoring\n- [Handoffs](../handoffs/overview) — Multi-agent handoff mechanisms\n\n---\n\n<a id='handoffs'></a>\n\n## Handoffs\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Agents as Tools](#agents-as-tools), [Run Loop and Execution](#run-loop)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n- [src/agents/handoffs/history.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/history.py)\n- [src/agents/extensions/handoff_filters.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/handoff_filters.py)\n- [src/agents/extensions/handoff_prompt.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/handoff_prompt.py)\n</details>\n\n# Handoffs\n\n## Overview\n\nHandoffs in the OpenAI Agents Python SDK enable seamless transfer of control and conversation context between different agents. When an agent determines that a task should be handled by another agent, a handoff executes the transition, optionally filtering or transforming the input data before the receiving agent begins processing.\n\nThe handoff mechanism serves as the backbone for multi-agent architectures, allowing complex workflows where specialized agents handle specific subtasks while maintaining coherent conversation state across transitions.\n\n## Core Concepts\n\n### What is a Handoff?\n\nA handoff is a structured mechanism that transfers control from one agent to another. It encapsulates:\n\n- The destination agent\n- Tool configuration for invoking the handoff\n- Optional input filtering logic\n- Optional type validation for handoff arguments\n- Enable/disable conditions\n\n资料来源：[src/agents/handoffs/__init__.py:1-100]()\n\n### The Handoff Class\n\nThe `Handoff` class is the primary abstraction for defining agent-to-agent transfers:\n\n```python\nclass Handoff(Generic[TAgent, TContext]):\n    name: str\n    description: str\n    input_json_schema: dict[str, Any]\n    on_invoke_handoff: Callable[[RunContextWrapper[Any], str], Awaitable[TAgent]]\n    agent_name: str\n    input_filter: HandoffInputFilter | None = None\n    is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[TContext]], bool] = True\n```\n\n资料来源：[src/agents/handoffs/__init__.py:100-130]()\n\n### HandoffInputData\n\nWhen a handoff is invoked, it receives and processes `HandoffInputData`:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `input_history` | `list[InputItem]` | Conversation history up to the handoff point |\n| `pre_handoff_items` | `list[RunItem]` | Run items generated before handoff |\n| `input_items` | `list[InputItem]` | Input items to pass to the next agent |\n| `new_items` | `list[RunItem]` | New items to add to the receiving agent's context |\n\n资料来源：[src/agents/handoffs/__init__.py:50-80]()\n\n## Architecture\n\n### Handoff Flow\n\n```mermaid\ngraph TD\n    A[Current Agent] -->|Determines handoff needed| B[Handoff Tool Call]\n    B --> C{is_enabled check}\n    C -->|Enabled| D[on_invoke_handoff]\n    C -->|Disabled| E[Hide from LLM]\n    D --> F[Input Filter Processing]\n    F --> G{HandoffInputData}\n    G --> H[Next Agent Context]\n    H --> I[Receiving Agent]\n    \n    J[Type Validation] -.->|if input_type provided| F\n    K[History Nesting] -.->|if nest_handoff_history enabled| G\n```\n\n### Agent Hierarchy with Handoffs\n\n```mermaid\ngraph TD\n    A[Orchestrator Agent] -->|handoff| B[Research Agent]\n    A -->|handoff| C[Writer Agent]\n    A -->|handoff| D[Review Agent]\n    B -->|handoff| E[Web Search Agent]\n    B -->|handoff| F[Data Analysis Agent]\n    C -->|handoff| D\n```\n\n## Configuration Options\n\n### Handoff Constructor Parameters\n\n| Parameter | Type | Required | Default | Description |\n|-----------|------|----------|---------|-------------|\n| `agent` | `Agent[TContext]` | Yes | - | The destination agent |\n| `name` | `str` | No | agent.name | Custom name for the handoff tool |\n| `description` | `str` | No | agent.description | Tool description shown to the model |\n| `tool_description_override` | `str` | No | None | Override the tool description |\n| `on_handoff` | `Callable` | No | None | Side effect function executed on handoff |\n| `input_type` | `type` | No | None | Type for validating handoff arguments |\n| `input_filter` | `HandoffInputFilter` | No | None | Function to filter/transform inputs |\n| `nest_handoff_history` | `bool` | No | None | Override run-level history nesting setting |\n| `is_enabled` | `bool \\| Callable` | No | True | Whether the handoff is available |\n\n资料来源：[src/agents/handoffs/__init__.py:150-200]()\n\n### Input Type Validation\n\nWhen `input_type` is provided, the model-generated JSON arguments are validated:\n\n```python\nif input_type is not None and on_handoff is None:\n    raise UserError(\"You must provide on_handoff when input_type is provided\")\n```\n\nThe `on_handoff` callback must accept two parameters for type-validated inputs:\n\n```python\nasync def on_handoff(ctx: RunContext, data: ValidatedInputType) -> Agent:\n    ...\n```\n\n资料来源：[src/agents/handoffs/__init__.py:200-220]()\n\n### Enabling/Disabling Handoffs\n\nHandoffs can be conditionally enabled using the `is_enabled` parameter:\n\n```python\n# Static boolean\nhandoff = Handoff(agent=agent, is_enabled=False)\n\n# Dynamic condition\nhandoff = Handoff(\n    agent=agent,\n    is_enabled=lambda ctx, current_agent: ctx.user_id in ADMIN_USERS\n)\n```\n\nDisabled handoffs are hidden from the LLM at runtime.\n\n资料来源：[src/agents/handoffs/__init__.py:180-190]()\n\n## Input Filtering\n\n### HandoffInputFilter\n\nThe `input_filter` function receives the entire conversation history and can modify what the next agent receives:\n\n```python\nHandoffInputFilter = Callable[\n    [HandoffInputData], HandoffInputData | Awaitable[HandoffInputData]\n]\n```\n\n### Common Filtering Patterns\n\n| Pattern | Use Case |\n|---------|----------|\n| Remove sensitive data | Strip user credentials before handoff |\n| Context summarization | Condense long conversations |\n| Tool filtering | Remove tools not needed by next agent |\n| History truncation | Keep only recent relevant items |\n\n### Example Input Filter\n\n```python\ndef filter_sensitive_inputs(data: HandoffInputData) -> HandoffInputData:\n    # Remove tool call outputs containing sensitive info\n    filtered_history = [\n        item for item in data.input_history\n        if not contains_sensitive(item)\n    ]\n    return dataclasses.replace(data, input_history=filtered_history)\n```\n\n资料来源：[src/agents/extensions/handoff_filters.py]()\n\n## History Management\n\n### Nesting Conversation History\n\nWhen `nest_handoff_history=True`, the previous agent's conversation is summarized before being passed to the next agent:\n\n```python\ndef nest_handoff_history(\n    handoff_input_data: HandoffInputData,\n    *,\n    history_mapper: HandoffHistoryMapper | None = None,\n) -> HandoffInputData:\n    \"\"\"Summarize the previous transcript for the next agent.\"\"\"\n```\n\nThis prevents context overflow and provides the new agent with a concise summary rather than full conversation history.\n\n资料来源：[src/agents/handoffs/history.py:40-60]()\n\n### Conversation History Wrappers\n\nDefault markers wrap nested conversation summaries:\n\n| Marker | Default Value |\n|--------|---------------|\n| Start | `<CONVERSATION HISTORY>` |\n| End | `</CONVERSATION HISTORY>` |\n\nThese can be customized:\n\n```python\nset_conversation_history_wrappers(\n    start=\"<PREVIOUS AGENT TRANSCRIPT>\",\n    end=\"</PREVIOUS AGENT TRANSCRIPT>\"\n)\n```\n\n资料来源：[src/agents/handoffs/history.py:20-40]()\n\n## Creating Handoffs\n\n### Basic Handoff\n\n```python\nfrom agents import Agent, Handoff, Runner\n\nagent_a = Agent(name=\"Agent A\", instructions=\"...\")\nagent_b = Agent(name=\"Agent B\", instructions=\"...\")\n\n# Create handoff\nhandoff_to_b = Handoff(name=\"transfer_to_b\", agent=agent_b)\n\n# Add to source agent\nagent_a.handoffs.append(handoff_to_b)\n```\n\n### Handoff with Callbacks\n\n```python\nasync def on_transfer_to_b(ctx: RunContext, input_data: str) -> Agent:\n    # Log the handoff\n    logger.info(f\"Handoff triggered by user: {ctx.user_id}\")\n    # Return destination agent\n    return agent_b\n\nhandoff_to_b = Handoff(\n    agent=agent_b,\n    name=\"transfer_to_b\",\n    on_handoff=on_transfer_to_b\n)\n```\n\n### Handoff with Type Validation\n\n```python\nfrom pydantic import BaseModel\n\nclass TransferData(BaseModel):\n    reason: str\n    priority: int = 1\n\nasync def handle_transfer(ctx: RunContext, data: TransferData) -> Agent:\n    if data.priority > 5:\n        return urgent_agent\n    return standard_agent\n\nhandoff = Handoff(\n    agent=standard_agent,\n    input_type=TransferData,\n    on_handoff=handle_transfer\n)\n```\n\n## Handoffs in the Run Loop\n\n### Turn Resolution with Handoffs\n\nWhen a handoff is triggered during agent execution:\n\n```mermaid\nsequenceDiagram\n    participant Agent as Current Agent\n    participant Run as Run Loop\n    participant Handoff as Handoff Handler\n    \n    Agent->>Run: Generate response with handoff tool call\n    Run->>Handoff: Process NextStepHandoff\n    Handoff->>Handoff: Validate input_type if provided\n    Handoff->>Handoff: Execute input_filter\n    Handoff->>Handoff: Call on_handoff callback\n    Handoff-->>Run: Return new agent and filtered input\n    Run->>Run: Reset current agent\n    Run->>Run: Start next turn with new agent\n```\n\n资料来源：[src/agents/run.py:200-250]()\n\n### Handoff Result Processing\n\nThe run loop handles handoff transitions:\n\n```python\nelif isinstance(turn_result.next_step, NextStepHandoff):\n    current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)\n    # Next agent starts with the nested/filtered input\n    starting_input = turn_result.original_input\n    original_input = turn_result.original_input\n    should_run_agent_start_hooks = True\n```\n\n资料来源：[src/agents/run.py:230-245]()\n\n## Prompt Integration\n\n### Handoff Tool Representation\n\nHandoffs appear as tools to the LLM with descriptions generated from the handoff configuration:\n\n```python\n# Tool name format\nf\"transfer_to_{agent_name}\"\n\n# Tool description includes\n- Handoff name\n- Agent description\n- Input schema if defined\n- Custom tool_description_override if provided\n```\n\n资料来源：[src/agents/extensions/handoff_prompt.py]()\n\n### Prompt Instructions\n\nThe system prompt can include handoff guidance:\n\n```\n- When a task matches another agent's expertise, use the handoff tool\n- Explain the reason for handoff in your response\n- Preserve relevant context during transfer\n```\n\n## Best Practices\n\n### Design Principles\n\n1. **Clear Agent Specialization**: Each agent should have a distinct responsibility\n2. **Minimal Handoff Arguments**: Pass only essential data, not entire conversations\n3. **Meaningful Handoff Names**: Use descriptive names that indicate the destination\n4. **Appropriate History Management**: Enable nesting for long conversations\n\n### Error Handling\n\n| Scenario | Recommended Approach |\n|----------|---------------------|\n| Handoff to unavailable agent | Check `is_enabled` before showing to model |\n| Invalid input type | Use Pydantic validation with clear error messages |\n| Filter failure | Return original input with warning |\n\n### Performance Considerations\n\n- Avoid complex filters that run synchronously on large histories\n- Use `is_enabled` callbacks to prevent unnecessary tool calls\n- Consider disabling history nesting for high-frequency handoffs\n\n## Related Components\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| `Handoff` class | `src/agents/handoffs/__init__.py` | Core handoff definition |\n| `HandoffInputData` | `src/agents/handoffs/__init__.py` | Input data structure |\n| `nest_handoff_history` | `src/agents/handoffs/history.py` | History summarization |\n| `HandoffInputFilter` | `src/agents/extensions/handoff_filters.py` | Input filtering utilities |\n| Handoff prompt integration | `src/agents/extensions/handoff_prompt.py` | Prompt rendering |\n\n## Summary\n\nHandoffs provide a robust mechanism for multi-agent orchestration in the OpenAI Agents Python SDK. Key capabilities include:\n\n- **Structured Transfer**: Defined handoff contracts with optional type validation\n- **Flexible Input Management**: Filtering and transformation before agent handoff\n- **History Control**: Nesting or truncating conversation context\n- **Conditional Execution**: Enable/disable based on runtime conditions\n- **Callback Support**: Side effects and logging during transitions\n\nThese mechanisms enable complex agent workflows while maintaining clean separation of concerns and manageable context sizes.\n\n---\n\n<a id='agents-as-tools'></a>\n\n## Agents as Tools\n\n### 相关页面\n\n相关主题：[Handoffs](#handoffs), [Agents](#agents)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [examples/agent_patterns/agents_as_tools.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools.py)\n- [examples/agent_patterns/agents_as_tools_conditional.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools_conditional.py)\n- [examples/agent_patterns/agents_as_tools_structured.py](https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/agents_as_tools_structured.py)\n- [src/agents/agent.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/agent.py)\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/extensions/visualization.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/visualization.py)\n- [examples/sandbox/handoffs.py](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/handoffs.py)\n</details>\n\n# Agents as Tools\n\nAgents as Tools is a powerful architectural pattern in the openai-agents-python library that enables one agent to be invoked as a callable tool by another agent. This pattern allows for sophisticated multi-agent orchestration where specialized agents can be dynamically called with specific inputs, returning structured results to the calling agent.\n\n## Overview\n\nIn the traditional agent architecture, agents operate as standalone units that receive input, execute tasks, and return results. The \"Agents as Tools\" pattern extends this by wrapping agents inside function tool abstractions, enabling:\n\n- **Dynamic Agent Invocation**: Agents can be called like functions within other agents' workflows\n- **Structured Inputs and Outputs**: Typed interfaces ensure consistent data exchange between agents\n- **Conditional Execution**: Agents can be invoked based on specific conditions or input patterns\n- **Parallel Tool Calls**: Multiple agents can be called simultaneously as tools\n- **Nested Architectures**: Complex hierarchies of agents calling sub-agents as tools\n\nThis pattern is particularly valuable for building research assistants, customer service systems, and specialized workflow engines where different capabilities need to be composed dynamically.\n\n## Architecture\n\n```mermaid\ngraph TD\n    subgraph \"Primary Agent\"\n        PA[Main Agent]\n        PA -->|has tools| T1[Agent-as-Tool 1]\n        PA -->|has tools| T2[Agent-as-Tool 2]\n        PA -->|has tools| Tn[Agent-as-Tool N]\n    end\n    \n    subgraph \"Wrapped Agents\"\n        T1 -->|wraps| A1[Specialized Agent 1]\n        T2 -->|wraps| A2[Specialized Agent 2]\n        Tn -->|wraps| An[Specialized Agent N]\n    end\n    \n    A1 -->|returns| T1\n    A2 -->|returns| T2\n    An -->|returns| Tn\n    T1 -->|tool result| PA\n    T2 -->|tool result| PA\n    Tn -->|tool result| PA\n```\n\n### Core Components\n\n| Component | Role | Location |\n|-----------|------|----------|\n| `Agent` | Base agent with instructions, tools, handoffs | `src/agents/agent.py` |\n| `FunctionTool` | Wraps callable functions for agent use | Tool infrastructure |\n| `Runner` | Executes agents and manages tool calls | `src/agents/run.py` |\n| `Handoff` | Enables agent-to-agent transfers | `src/agents/handoffs/__init__.py` |\n\n## Implementation Patterns\n\n### Basic Agent-to-Tool Conversion\n\nThe simplest form of this pattern converts an existing agent into a callable tool:\n\n```python\nfrom agents import Agent, function_tool\n\n# Create a specialized agent\nsearch_agent = Agent(\n    name=\"web_searcher\",\n    instructions=\"You are a web search expert. Search for the given query and summarize results.\",\n    tools=[web_search_tool],\n)\n\n# Convert to a function tool that the primary agent can use\n@function_tool\ndef search_tool(query: str) -> str:\n    \"\"\"Search the web for information.\"\"\"\n    result = Runner.run(search_agent, input=query)\n    return result.final_output\n```\n\n### AgentTool with Structured Output\n\nFor more sophisticated scenarios, agents can be wrapped with explicit input/output schemas:\n\n```python\nfrom agents import Agent\nfrom pydantic import BaseModel\n\nclass SearchResult(BaseModel):\n    title: str\n    url: str\n    summary: str\n\nsearch_agent = Agent(\n    name=\"structured_searcher\",\n    instructions=\"Search for information and return structured results.\",\n    output_type=SearchResult,\n)\n```\n\n### Conditional Agent Invocation\n\nAgents can be configured to only be available under certain conditions:\n\n```python\nfrom agents import Agent\n\nadmin_agent = Agent(\n    name=\"admin_panel\",\n    instructions=\"Handle administrative tasks.\",\n)\n\n# Conditional enabling based on user role\ndef is_admin(context):\n    return context.user_role == \"admin\"\n\nadmin_agent.is_enabled = is_admin\n```\n\n## Usage Examples\n\n### Research Assistant Pattern\n\nA common use case is a research bot with specialized sub-agents:\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Planner as Planner Agent\n    participant Search as Search Agent (Tool)\n    participant Writer as Writer Agent\n    \n    User->>Planner: \"Research topic: AI trends\"\n    Planner->>Planner: Generate search queries\n    Planner->>Search: tool_call(search_queries[0])\n    Planner->>Search: tool_call(search_queries[1])\n    Planner->>Search: tool_call(search_queries[n])\n    Search-->>Planner: SearchResult\n    Planner->>Writer: Pass summaries\n    Writer-->>User: Final report\n```\n\n### Example: Agent Patterns in Code\n\nThe repository includes several agent pattern examples demonstrating this functionality:\n\n**Basic Pattern** (`examples/agent_patterns/agents_as_tools.py`):\n```python\n# Agents are wrapped as tools and called by a primary agent\nprimary_agent = Agent(\n    name=\"orchestrator\",\n    instructions=\"Coordinate specialized agents to answer user queries.\",\n    tools=[search_agent_as_tool, code_agent_as_tool],\n)\n```\n\n**Conditional Pattern** (`examples/agent_patterns/agents_as_tools_conditional.py`):\n```python\n# Agents are conditionally available based on context\nif user.is_premium:\n    primary_agent.tools.append(premium_agent_tool)\n```\n\n**Structured Pattern** (`examples/agent_patterns/agents_as_tools_structured.py`):\n```python\n# Agents return structured data types\n@function_tool\ndef get_weather(location: str) -> WeatherData:\n    \"\"\"Get weather for a location.\"\"\"\n    return Runner.run(weather_agent, input=location)\n```\n\n## Configuration Options\n\n### Tool Metadata Configuration\n\nWhen converting an agent to a tool, you can override the default tool behavior:\n\n| Parameter | Type | Purpose |\n|-----------|------|---------|\n| `name` | `str` | Override the tool name shown to the LLM |\n| `description` | `str` | Human-readable description of what the tool does |\n| `input_type` | `Type[BaseModel]` | Pydantic model for input validation |\n| `output_type` | `Type[BaseModel]` | Pydantic model for output schema |\n| `is_enabled` | `bool \\| Callable` | Condition for tool availability |\n\n### Agent Configuration\n\nAgents used as tools support standard agent parameters:\n\n| Parameter | Description |\n|-----------|-------------|\n| `instructions` | System prompt for the agent |\n| `tools` | Additional tools available to the agent |\n| `handoffs` | Agents the sub-agent can transfer to |\n| `output_type` | Expected output type |\n| `model` | Specific model to use |\n\n## Execution Flow\n\n```mermaid\nflowchart LR\n    A[Primary Agent] -->|decides to call| B[Agent-as-Tool]\n    B -->|parses input| C{Input Validation}\n    C -->|valid| D[Execute Wrapped Agent]\n    C -->|invalid| E[Return Error]\n    D -->|run agent| F[Runner.run]\n    F -->|collect results| G[Format Output]\n    G -->|return| B\n    B -->|tool result| A\n```\n\n## Integration with Handoffs\n\nThe Agents as Tools pattern complements the handoff mechanism:\n\n| Aspect | Agents as Tools | Handoffs |\n|--------|-----------------|----------|\n| Control Flow | Agent calls tool, waits for result | Agent transfers control completely |\n| State | Shared context | Fresh context for new agent |\n| Use Case | Parallel specialized tasks | Sequential role switches |\n| Return | Structured result | Handoff message |\n\n**资料来源**：[src/agents/handoffs/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/__init__.py)\n\n## Best Practices\n\n1. **Clear Tool Descriptions**: Provide explicit descriptions so the LLM knows when to invoke the agent\n2. **Typed Interfaces**: Use Pydantic models for input/output to ensure type safety\n3. **Error Handling**: Wrap agent executions in try-catch to handle failures gracefully\n4. **Context Management**: Pass relevant context to sub-agents without overwhelming them\n5. **Conditional Enabling**: Use `is_enabled` to control access based on user permissions\n\n## Related Patterns\n\n- **Handoffs**: Complete agent-to-agent transfer for distinct roles\n- **Multi-Agent Orchestration**: Coordinated multi-agent workflows\n- **Sandbox Agents**: Isolated execution environments for agents\n- **Guardrails**: Input/output validation for agent tool calls\n\n**资料来源**：[examples/sandbox/handoffs.py](https://github.com/openai/openai-agents-python/blob/main/examples/sandbox/handoffs.py)\n\n---\n\n<a id='run-loop'></a>\n\n## Run Loop and Execution\n\n### 相关页面\n\n相关主题：[Agents](#agents), [Sessions and Memory](#sessions)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/run.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run.py)\n- [src/agents/run_config.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_config.py)\n- [src/agents/result.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/result.py)\n- [src/agents/run_internal/turn_resolution.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/run_internal/turn_resolution.py)\n- [src/agents/items.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/items.py)\n</details>\n\n# Run Loop and Execution\n\nThe Run Loop and Execution system is the core engine of the openai-agents-python SDK. It orchestrates the interaction between agents, language models, tools, and external systems through an iterative turn-based processing architecture.\n\n## Overview\n\nThe execution model follows a **turn-based loop** where each turn consists of:\n\n1. **Turn Preparation** - Setting up context, hooks, and session state\n2. **Model Invocation** - Calling the language model with the current input\n3. **Response Processing** - Parsing and validating model outputs\n4. **Tool Execution** - Running any tools or side effects requested by the model\n5. **Turn Resolution** - Determining the next step (continue, handoff, or finish)\n\n资料来源：[src/agents/run.py:1-50]()\n\n## Architecture Components\n\n### Core Execution Flow\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Run Loop Entry]\n    B --> C[Turn Preparation]\n    C --> D[Call Model]\n    D --> E{Response Type?}\n    E -->|Tool Calls| F[Execute Tools]\n    E -->|Handoff| G[Switch Agent]\n    E -->|Message| H[Finalize Output]\n    F --> C\n    G --> C\n    H --> I[Return RunResult]\n```\n\n### Key Modules\n\n| Module | Purpose | Key Classes/Functions |\n|--------|---------|----------------------|\n| `run.py` | Main entry point | `run()`, `run_sync()` |\n| `run_loop.py` | Core loop logic | `run_loop()` |\n| `turn_preparation.py` | Turn setup | Input filtering, hook invocation |\n| `turn_resolution.py` | Response handling | Tool result processing, output finalization |\n| `tool_execution.py` | Tool runner | `execute_tools_and_side_effects()` |\n| `streaming.py` | Streaming support | Stream handlers |\n\n资料来源：[src/agents/run.py:1-30]()\n\n## Run Configuration\n\n### RunOptions\n\nThe `RunOptions` TypedDict defines all parameters for running an agent:\n\n```python\nclass RunOptions(TypedDict, Generic[TContext]):\n    context: NotRequired[TContext | None]\n    max_turns: NotRequired[int | None]\n    hooks: NotRequired[RunHooks[TContext] | None]\n    run_config: NotRequired[RunConfig | None]\n    previous_response_id: NotRequired[str | None]\n    auto_previous_response_id: NotRequired[bool]\n    conversation_id: NotRequired[str | None]\n    session: NotRequired[Session | None]\n    error_handlers: NotRequired[RunErrorHandlers[TContext] | None]\n```\n\n### Configuration Options\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `max_turns` | `int \\| None` | `None` | Maximum turns; `None` disables limit |\n| `context` | `TContext \\| None` | `None` | Custom context object |\n| `hooks` | `RunHooks[TContext]` | `None` | Lifecycle hooks |\n| `run_config` | `RunConfig` | `None` | Runtime configuration |\n| `session` | `Session` | `None` | Session for state persistence |\n| `error_handlers` | `RunErrorHandlers` | `None` | Error callback handlers |\n\n资料来源：[src/agents/run_config.py:50-75]()\n\n## Turn Processing\n\n### Turn Resolution\n\nThe `turn_resolution.py` module handles processing model responses after tool execution:\n\n```python\ntool_final_output = await _maybe_finalize_from_tool_results(\n    public_agent=public_agent,\n    original_input=original_input,\n    new_response=new_response,\n    pre_step_items=pre_step_items,\n    new_step_items=new_step_items,\n    function_results=function_results,\n    hooks=hooks,\n    context_wrapper=context_wrapper,\n    tool_input_guardrail_results=tool_input_guardrail_results,\n    tool_output_guardrail_results=tool_output_guardrail_results,\n)\n```\n\n### Message Output Extraction\n\nThe `ItemHelpers` class provides utilities for extracting content from model responses:\n\n```python\n@classmethod\ndef extract_refusal(cls, message: TResponseOutputItem) -> str | None:\n    \"\"\"Extracts refusal content from a message, if any.\"\"\"\n    if not isinstance(message, ResponseOutputMessage):\n        return None\n    refusal = \"\"\n    for content_item in message.content:\n        if isinstance(content_item, ResponseOutputRefusal):\n            refusal += content_item.refusal or \"\"\n    return refusal or None\n```\n\n### Refusal Handling\n\nWhen the model refuses to respond, a `ModelRefusalError` is raised:\n\n```python\nif refusal:\n    refusal_error = ModelRefusalError(refusal)\n    run_error_data = build_run_error_data(...)\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:25-45]()\n\n## Agent Handoffs\n\n### Handoff Processing\n\nThe run loop handles agent handoffs through the `NextStepHandoff` type:\n\n```python\nelif isinstance(turn_result.next_step, NextStepHandoff):\n    current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)\n    if run_state is not None:\n        run_state._current_agent = current_agent\n    starting_input = turn_result.original_input\n    original_input = turn_result.original_input\n    current_span.finish(reset_current=True)\n    should_run_agent_start_hooks = True\n```\n\n### Loop Continuation\n\nFor cases requiring another iteration without switching agents:\n\n```python\nelif isinstance(turn_result.next_step, NextStepRunAgain):\n    await save_turn_items_if_needed(\n        session=session,\n        run_state=run_state,\n        session_persistence_enabled=session_persistence_enabled,\n        items=session_items_for_turn(turn_result),\n        response_id=turn_result.model_response.response_id,\n        store=store_setting,\n    )\n    continue\n```\n\n资料来源：[src/agents/run.py:150-180]()\n\n## Result Types\n\n### RunResult Structure\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `last_agent` | `Agent` | Final agent that produced output |\n| `new_items` | `list[RunItem]` | All items from the run |\n| `final_output` | `Response` | Final model response |\n| `raw_responses` | `list[RawResponsesFromModel]` | Raw model outputs |\n\n### Tool Output Handling\n\nTool outputs are processed through multiple stages:\n\n1. **Pre-step items** - State before tool execution\n2. **New step items** - State after tool execution\n3. **Function results** - Structured tool call results\n\nThe system tracks tool activity without messages using:\n\n```python\nhas_tool_activity_without_message = not message_items and bool(\n    processed_response.tools_used\n)\n```\n\n资料来源：[src/agents/run_internal/turn_resolution.py:35-40]()\n\n## Input Processing\n\n### Input Conversion\n\nThe `ItemHelpers` class handles input normalization:\n\n```python\n@classmethod\ndef input_to_new_input_list(\n    cls, input: str | list[TResponseInputItem]\n) -> list[TResponseInputItem]:\n    \"\"\"Converts a string or list of input items into a list of input items.\"\"\"\n    if isinstance(input, str):\n        return [{\"content\": input, \"role\": \"user\"}]\n    return cast(list[TResponseInputItem], _to_dump_compatible(input))\n```\n\n### Text Extraction\n\nConcatenate all text content from message output items:\n\n```python\n@classmethod\ndef text_message_outputs(cls, items: list[RunItem]) -> str:\n    \"\"\"Concatenates all the text content from a list of message output items.\"\"\"\n    text = \"\"\n    for item in items:\n        if isinstance(item, MessageOutputItem):\n            text += cls.text_message_output(item)\n    return text\n```\n\n资料来源：[src/agents/items.py:60-90]()\n\n## Error Handling\n\n### Error Flow\n\n```mermaid\ngraph TD\n    A[Error Occurs] --> B{Error Type?}\n    B -->|Refusal| C[ModelRefusalError]\n    B -->|Tool Failure| D[ToolExecutionError]\n    B -->|Max Turns| E[MaxTurnsExceededError]\n    B -->|Other| F[Generic Error Handler]\n    C --> G[Build Error Data]\n    D --> G\n    E --> G\n    F --> G\n    G --> H[Return Error Result]\n```\n\n### Error Handlers Configuration\n\nCustom error handlers can be registered per error kind:\n\n```python\nerror_handlers: RunErrorHandlers[TContext] | None\n```\n\nThe system supports typed error handling where handlers are keyed by error category.\n\n资料来源：[src/agents/run_config.py:60-65]()\n\n## Session Persistence\n\n### Save Turn Items\n\nThe run loop persists state after each turn when session is enabled:\n\n```python\nawait save_turn_items_if_needed(\n    session=session,\n    run_state=run_state,\n    session_persistence_enabled=session_persistence_enabled,\n    items=session_items_for_turn(turn_result),\n    response_id=turn_result.model_response.response_id,\n    store=store_setting,\n)\n```\n\n### Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `session` | `Session \\| None` | Active session instance |\n| `run_state` | `RunState \\| None` | Current run state |\n| `session_persistence_enabled` | `bool` | Whether persistence is active |\n| `items` | `list[RunItem]` | Items to persist |\n| `response_id` | `str` | Model response ID |\n| `store` | `StoreSetting` | Storage configuration |\n\n资料来源：[src/agents/run.py:160-170]()\n\n## Streaming Support\n\nThe system supports streaming model outputs through the streaming module. Streaming is configured via `RunConfig` and allows real-time output handling without waiting for complete responses.\n\n## Lifecycle Hooks\n\n### Available Hooks\n\n| Hook | Trigger | Purpose |\n|------|---------|---------|\n| `on_agent_start` | Agent turn begins | Initialize agent-specific state |\n| `on_agent_end` | Agent turn ends | Cleanup or logging |\n| `on_tool_call` | Tool invocation | Logging or monitoring |\n| `on_handoff` | Agent switch | Track transitions |\n\nHooks receive `RunContextWrapper` and relevant context data, enabling deep customization of the execution flow.\n\n资料来源：[src/agents/run_config.py:35-45]()\n\n## Summary\n\nThe Run Loop and Execution system provides:\n\n- **Iterative Processing**: Turn-based model interaction with tool execution\n- **Flexible Configuration**: Extensive options via `RunOptions` and `RunConfig`\n- **Agent Orchestration**: Seamless handoff between agents\n- **Error Resilience**: Typed error handlers and refusal detection\n- **Session Management**: Persistent state across turns\n- **Lifecycle Hooks**: Customization at every execution stage\n\nThe architecture prioritizes extensibility, allowing developers to hook into any phase of execution while maintaining a clear, predictable flow from input to final output.\n\n---\n\n<a id='sessions'></a>\n\n## Sessions and Memory\n\n### 相关页面\n\n相关主题：[Run Loop and Execution](#run-loop)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md)\n- [src/agents/sandbox/capabilities/memory.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/capabilities/memory.py)\n- [src/agents/extensions/memory/__init__.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/extensions/memory/__init__.py)\n- [src/agents/sandbox/session/sinks.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/session/sinks.py)\n- [src/agents/sandbox/session/base_sandbox_session.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/session/base_sandbox_session.py)\n- [src/agents/handoffs/history.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/handoffs/history.py)\n- [src/agents/sandbox/errors.py](https://github.com/openai/openai-agents-python/blob/main/src/agents/sandbox/errors.py)\n</details>\n\n# Sessions and Memory\n\n## Overview\n\nThe Sessions and Memory system in the openai-agents-python library provides persistent conversation state management for AI agents. This system enables agents to maintain context across multiple interactions, store conversation history, and access previously learned information through a flexible session abstraction layer.\n\nThe architecture is built around a **protocol-based design** that allows different storage backends while maintaining a consistent interface. Sessions track conversation items, manage agent handoffs, and enable memory persistence for sandboxed agent environments.\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-8]()\n\n## Architecture\n\n### Session Protocol\n\nThe core of the session system is the `Session` protocol, which defines the contract for all session implementations. This allows developers to swap storage backends without changing application code.\n\n```mermaid\ngraph TD\n    A[Agent Run] --> B[Session Protocol]\n    B --> C[SQLiteSession]\n    B --> D[AsyncSQLiteSession]\n    B --> E[AdvancedSQLiteSession]\n    B --> F[EncryptedSession]\n    B --> G[RedisSession]\n    B --> H[SQLAlchemySession]\n    B --> I[MongoDBSession]\n    B --> J[DaprSession]\n```\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-30]()\n\n### Memory Capability in Sandboxes\n\nSandbox agents have a dedicated memory capability that provides context from previous sessions. The `Memory` class in the sandbox capabilities layer enables agents to read and write persistent memory.\n\n```mermaid\ngraph LR\n    A[SandboxAgent] -->|requires| B[Memory Capability]\n    B --> C[read: MemoryReadConfig]\n    B --> D[generate: MemoryGenerateConfig]\n    B --> E[layout: MemoryLayout]\n```\n\nThe memory system requires either `read` or `generate` configuration to be meaningful. When `read.live_update` is enabled, the capability requires both `filesystem` and `shell` capabilities; otherwise, only `shell` is required.\n\n资料来源：[src/agents/sandbox/capabilities/memory.py:1-30]()\n\n## Session Persistence Layer\n\n### Session Lifecycle\n\nSessions manage the persistence of conversation state through a structured workflow:\n\n```mermaid\nsequenceDiagram\n    participant Agent as Agent Run\n    participant Session as Session Store\n    participant Sandbox as Sandbox Session\n    \n    Agent->>Session: Create/Resume Session\n    Session-->>Agent: Session ID\n    Agent->>Sandbox: Initialize Workspace\n    loop Turn Processing\n        Agent->>Sandbox: Execute Tool\n        Sandbox-->>Agent: Tool Result\n        Agent->>Session: Save Turn Items\n        Session-->>Agent: Acknowledge\n    end\n    Agent->>Session: Finalize Session\n```\n\n### Turn Item Persistence\n\nDuring agent execution, each turn generates items that must be persisted:\n\n- `input`: Current segment user input\n- `generated_items`: Memory-relevant assistant and tool items\n- `terminal_metadata`: Completion/failure state\n- `final_output`: Final segment output when available\n\n资料来源：[src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md:1-20]()\n\n## Memory Rollout Extraction\n\nWhen an agent session completes, the system can extract a structured memory summary for future reference. This process is handled by the rollout extraction prompt system.\n\n### JSON Output Schema\n\nThe extraction produces JSON with three fields:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `raw_memory` | string | Raw memory content from the session |\n| `rollout_summary` | string | Generated summary of the session |\n| `rollout_slug` | string | Short identifier (empty string if unknown) |\n\n资料来源：[src/agents/sandbox/memory/prompts/rollout_extraction_user_message.md:1-25]()\n\n### Memory Summary Path\n\nThe memory system reads summaries from a configurable path within the sandbox workspace:\n\n```\nmemory_summary_path = Path(layout.memories_dir) / \"memory_summary.md\"\n```\n\nThe memory summary is truncated to a maximum token limit (`_MEMORY_SUMMARY_MAX_TOKENS`) to ensure efficient processing.\n\n资料来源：[src/agents/sandbox/capabilities/memory.py:50-65]()\n\n## Workspace Sink System\n\nThe `WorkspaceSink` class manages buffered writes to the sandbox workspace, providing a layer between agent operations and persistent storage.\n\n### Flush Strategy\n\nThe sink implements intelligent flushing based on several conditions:\n\n```mermaid\ngraph TD\n    A[Should Flush?] --> B{Seen count % flush_every == 0}\n    A --> C{Operation: persist_workspace start}\n    A --> D{Operation: stop}\n    A --> E{Operation: shutdown start}\n    B -->|Yes| F[Flush to workspace]\n    C -->|Yes| F\n    D -->|Yes| F\n    E -->|Yes| F\n    B -->|No| G{Check running state}\n    G -->|Running| F\n    G -->|Not running| H[Defer flush]\n```\n\nFlush conditions include:\n- Periodic flush based on event count\n- Explicit persist workspace operations\n- Session stop and shutdown events\n\n资料来源：[src/agents/sandbox/session/sinks.py:1-40]()\n\n### Workspace Persistence\n\nThe sink handles reading existing outbox content before writing new data, ensuring append-style semantics for workspace files. If no existing outbox is found, it marks the outbox as loaded and proceeds with new writes.\n\n资料来源：[src/agents/sandbox/session/sinks.py:60-85]()\n\n## Error Handling\n\nThe session system defines specific error types for workspace operations:\n\n### Error Hierarchy\n\n| Error Class | Code | Purpose |\n|-------------|------|---------|\n| `WorkspaceIOError` | - | Base class for workspace read/write errors |\n| `ApplyPatchPathError` | `APPLY_PATCH_INVALID_PATH` | Invalid path (absolute, escape root, or empty) |\n| `ApplyPatchDiffError` | - | Malformed patch diff |\n| `ExecNonZeroError` | - | Non-zero exit code from exec operations |\n| `InvalidManifestPathError` | - | Path resolution failed in manifest context |\n\n### Path Validation\n\nThe system validates relative paths to prevent directory traversal attacks:\n\n```python\ndef _validate_relative_path(*, name: str, path: Path) -> None:\n    if path.is_absolute():\n        raise ValueError(f\"{name} must be relative\")\n    if \"..\" in path.parts:\n        raise ValueError(f\"{name} must not escape root\")\n    if path.parts in [(), (\".\",)]:\n        raise ValueError(f\"{name} must be non-empty\")\n```\n\n资料来源：[src/agents/sandbox/errors.py:1-50]()\n\n## Session Handoff History\n\nWhen agents hand off to other agents, the system can summarize conversation history for the receiving agent. This is managed by the handoff history module.\n\n### History Normalization\n\nThe system normalizes input history and flattens nested messages before creating summaries. Items like `ToolApprovalItem` are filtered out as they shouldn't be forwarded.\n\n```mermaid\ngraph LR\n    A[Handoff Input] --> B[Normalize History]\n    B --> C[Flatten Nested Messages]\n    C --> D[Filter Tool Approvals]\n    D --> E[Convert to Plain Inputs]\n    E --> F[Generate Transcript Summary]\n```\n\n资料来源：[src/agents/handoffs/history.py:1-60]()\n\n### History Markers\n\nThe conversation history uses customizable markers for wrapping summaries:\n\n| Variable | Default |\n|----------|---------|\n| `_conversation_history_start` | `<CONVERSATION HISTORY>` |\n| `_conversation_history_end` | `</CONVERSATION HISTORY>` |\n\nThese can be overridden at runtime using `set_conversation_history_wrappers()`.\n\n资料来源：[src/agents/handoffs/history.py:1-50]()\n\n## Extension Memory Backends\n\nThe library includes several optional session backends that require additional dependencies:\n\n### Available Backends\n\n| Backend | Package | Features |\n|---------|---------|----------|\n| `SQLiteSession` | Built-in | Basic SQLite persistence |\n| `AsyncSQLiteSession` | Built-in | Async SQLite operations |\n| `AdvancedSQLiteSession` | Built-in | Advanced SQLite features |\n| `EncryptedSession` | `cryptography` | Encryption at rest |\n| `RedisSession` | `redis` | Distributed session management |\n| `SQLAlchemySession` | `sqlalchemy` | ORM integration |\n| `MongoDBSession` | `mongodb` | Document store backend |\n| `DaprSession` | `dapr` | Dapr state store integration |\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-50]()\n\n### Lazy Loading\n\nExtensions use lazy imports to avoid requiring all dependencies when not needed:\n\n```python\n_LAZY_EXPORTS: dict[str, tuple[str, tuple[str, str] | None]] = {\n    \"EncryptedSession\": (\".encrypt_session\", (\"cryptography\", \"encrypt\")),\n    \"RedisSession\": (\".redis_session\", (\"redis\", \"redis\")),\n    ...\n}\n```\n\nThis pattern ensures that optional dependencies are only loaded when the specific backend is used.\n\n资料来源：[src/agents/extensions/memory/__init__.py:1-50]()\n\n## Configuration\n\n### Session Settings\n\nSessions are configured through `SessionSettings` which control:\n\n- Storage backend selection\n- Connection parameters\n- Persistence strategies\n- Compaction policies (for OpenAI responses backend)\n\n### Memory Layout\n\nFor sandbox memory, the `MemoryLayout` class specifies directory structure:\n\n| Setting | Description |\n|---------|-------------|\n| `memories_dir` | Directory for stored memories |\n| `sessions_dir` | Directory for session data |\n\nBoth paths must be relative to the sandbox workspace root to prevent escape vulnerabilities.\n\n资料来源：[src/agents/sandbox/capabilities/memory.py:20-35]()\n\n## Usage Patterns\n\n### Basic Session Usage\n\n```python\nfrom agents.memory import SQLiteSession\n\nsession = SQLiteSession(session_id=\"user-123\")\nawait session.initialize()\n\n# Run agent with session\nresult = await Runner.run(agent, input, session=session)\n\n# Session automatically persists turn items\n```\n\n### Sandbox Memory Setup\n\n```python\nfrom agents.sandbox.capabilities import Memory, MemoryReadConfig, MemoryLayout\n\nmemory = Memory(\n    read=MemoryReadConfig(live_update=True),\n    layout=MemoryLayout(memories_dir=\"memory\", sessions_dir=\"sessions\"),\n    run_as=\"root\"\n)\n```\n\n### Resume from Session\n\n```python\n# Resume a previous session\nsession = SQLiteSession(session_id=\"user-123\", resume=True)\n\n# Continue the conversation\nresult = await Runner.run(agent, input, session=session)\n```\n\n## Best Practices\n\n1. **Path Validation**: Always use relative paths for memory directories to prevent sandbox escape vulnerabilities.\n\n2. **Session Initialization**: Check `session.is_initialized()` before running agent logic.\n\n3. **Error Handling**: Catch specific session errors rather than generic exceptions for better recovery.\n\n4. **Turn Item Management**: Let the session system manage persistence automatically through the `save_turn_items_if_needed()` function.\n\n5. **Live Update Trade-offs**: Enable `live_update` only when agents need real-time file system access; otherwise, rely on shell-only mode for better isolation.\n\n6. **Extension Dependencies**: Use lazy-loading backends to minimize startup time and avoid unnecessary dependency loading.\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：openai/openai-agents-python\n\n摘要：发现 24 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：身份坑 - 仓库名和安装名不一致。\n\n## 1. 身份坑 · 仓库名和安装名不一致\n\n- 严重度：medium\n- 证据强度：runtime_trace\n- 发现：仓库名 `openai-agents-python` 与安装入口 `openai-agents` 不完全一致。\n- 对用户的影响：用户照着仓库名搜索包或照着包名找仓库时容易走错入口。\n- 建议检查：在 npm/PyPI/GitHub 上确认包名映射和官方 README 说明。\n- 复现命令：`pip install openai-agents`\n- 防护动作：页面必须同时展示 repo 名和真实安装入口，避免用户搜索错包。\n- 证据：identity.distribution | github_repo:946380199 | https://github.com/openai/openai-agents-python | repo=openai-agents-python; install=openai-agents\n\n## 2. 配置坑 · 来源证据：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_d867c75f80af49c9968398851ff8bf6a | https://github.com/openai/openai-agents-python/issues/3346 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 配置坑 · 来源证据：Clarify whether retry-after delays should respect retry max_delay\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Clarify whether retry-after delays should respect retry max_delay\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_f486d2247bf24df8bbc7a2bd6fddbd65 | https://github.com/openai/openai-agents-python/issues/3266 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 4. 配置坑 · 来源证据：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API reject…\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API rejects it as invalid\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_d6bad5c23bf3457eb546c22a1636cc26 | https://github.com/openai/openai-agents-python/issues/3268 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 5. 配置坑 · 来源证据：Tracing shutdown cannot interrupt exporter retry backoff\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Tracing shutdown cannot interrupt exporter retry backoff\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e1ceae098cf84c8aafae7082b13c5345 | https://github.com/openai/openai-agents-python/issues/3354 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 6. 配置坑 · 来源证据：v0.15.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.2\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_b73472b5ae90447199984775aacdca67 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.2 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 7. 配置坑 · 来源证据：v0.15.3\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.3\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_7e05a382001a4d07b74eda1e1316320b | https://github.com/openai/openai-agents-python/releases/tag/v0.15.3 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 8. 配置坑 · 来源证据：v0.16.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.16.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_44335088ff52486e9f2f41f72a274c35 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 9. 配置坑 · 来源证据：v0.17.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.17.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_86b81f310a6e45feadc65196a057b23b | https://github.com/openai/openai-agents-python/releases/tag/v0.17.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 10. 能力坑 · 来源证据：v0.15.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：v0.15.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4c70d563ac704aeaa14b8e2c49976bc5 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 11. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:946380199 | https://github.com/openai/openai-agents-python | README/documentation is current enough for a first validation pass.\n\n## 12. 运行坑 · 来源证据：v0.14.8\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：v0.14.8\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a31947cfee3a4299923f7714bfb54f42 | https://github.com/openai/openai-agents-python/releases/tag/v0.14.8 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 13. 维护坑 · 来源证据：AdvancedSQLiteSession.add_items can report success after structure metadata failure\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：AdvancedSQLiteSession.add_items can report success after structure metadata failure\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0fed2dd63d55400d9e0d9adaf08570e5 | https://github.com/openai/openai-agents-python/issues/3348 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 14. 维护坑 · 来源证据：Chat Completions converter can send empty tool output for non-text results\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：Chat Completions converter can send empty tool output for non-text results\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_34a35e920a01467e957cdd59b4179cc1 | https://github.com/openai/openai-agents-python/issues/3310 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 15. 维护坑 · 来源证据：v0.15.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：v0.15.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_33cd0193aea84f9b82b15a02098d85cd | https://github.com/openai/openai-agents-python/releases/tag/v0.15.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 16. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | last_activity_observed missing\n\n## 17. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:946380199 | https://github.com/openai/openai-agents-python | no_demo; severity=medium\n\n## 18. 安全/权限坑 · 存在安全注意事项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：No sandbox install has been executed yet; downstream must verify before user use.\n- 对用户的影响：用户安装前需要知道权限边界和敏感操作。\n- 建议检查：转成明确权限清单和安全审查提示。\n- 防护动作：安全注意事项必须面向用户前置展示。\n- 证据：risks.safety_notes | github_repo:946380199 | https://github.com/openai/openai-agents-python | No sandbox install has been executed yet; downstream must verify before user use.\n\n## 19. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:946380199 | https://github.com/openai/openai-agents-python | no_demo; severity=medium\n\n## 20. 安全/权限坑 · 来源证据：Proposal: per-run BudgetGuard for token / request / cost limits (follow-up to #2848)\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Proposal: per-run BudgetGuard for token / request / cost limits (follow-up to #2848)\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_00884163bb274aecb62eeff18df12634 | https://github.com/openai/openai-agents-python/issues/3353 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 21. 安全/权限坑 · 来源证据：v0.16.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：v0.16.0\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a9d11d6b8fd24b22882ee03998b45d63 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 22. 安全/权限坑 · 来源证据：v0.17.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：v0.17.1\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0d47be3955c747baadea812c5f4c6487 | https://github.com/openai/openai-agents-python/releases/tag/v0.17.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 23. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | issue_or_pr_quality=unknown\n\n## 24. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | release_recency=unknown\n\n<!-- canonical_name: openai/openai-agents-python; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：openai/openai-agents-python\n\n摘要：发现 24 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：身份坑 - 仓库名和安装名不一致。\n\n## 1. 身份坑 · 仓库名和安装名不一致\n\n- 严重度：medium\n- 证据强度：runtime_trace\n- 发现：仓库名 `openai-agents-python` 与安装入口 `openai-agents` 不完全一致。\n- 对用户的影响：用户照着仓库名搜索包或照着包名找仓库时容易走错入口。\n- 建议检查：在 npm/PyPI/GitHub 上确认包名映射和官方 README 说明。\n- 复现命令：`pip install openai-agents`\n- 防护动作：页面必须同时展示 repo 名和真实安装入口，避免用户搜索错包。\n- 证据：identity.distribution | github_repo:946380199 | https://github.com/openai/openai-agents-python | repo=openai-agents-python; install=openai-agents\n\n## 2. 配置坑 · 来源证据：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：AdvancedSQLiteSession.delete_branch() leaves branch-only messages in the base table\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_d867c75f80af49c9968398851ff8bf6a | https://github.com/openai/openai-agents-python/issues/3346 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 配置坑 · 来源证据：Clarify whether retry-after delays should respect retry max_delay\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Clarify whether retry-after delays should respect retry max_delay\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_f486d2247bf24df8bbc7a2bd6fddbd65 | https://github.com/openai/openai-agents-python/issues/3266 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 4. 配置坑 · 来源证据：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API reject…\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：OpenAIConversationsSession persists empty reasoning item {\"type\":\"reasoning\",\"summary\":[]} and Conversations API rejects it as invalid\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_d6bad5c23bf3457eb546c22a1636cc26 | https://github.com/openai/openai-agents-python/issues/3268 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 5. 配置坑 · 来源证据：Tracing shutdown cannot interrupt exporter retry backoff\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：Tracing shutdown cannot interrupt exporter retry backoff\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e1ceae098cf84c8aafae7082b13c5345 | https://github.com/openai/openai-agents-python/issues/3354 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 6. 配置坑 · 来源证据：v0.15.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.2\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_b73472b5ae90447199984775aacdca67 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.2 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 7. 配置坑 · 来源证据：v0.15.3\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.15.3\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_7e05a382001a4d07b74eda1e1316320b | https://github.com/openai/openai-agents-python/releases/tag/v0.15.3 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 8. 配置坑 · 来源证据：v0.16.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.16.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_44335088ff52486e9f2f41f72a274c35 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 9. 配置坑 · 来源证据：v0.17.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：v0.17.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_86b81f310a6e45feadc65196a057b23b | https://github.com/openai/openai-agents-python/releases/tag/v0.17.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 10. 能力坑 · 来源证据：v0.15.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个能力理解相关的待验证问题：v0.15.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_4c70d563ac704aeaa14b8e2c49976bc5 | https://github.com/openai/openai-agents-python/releases/tag/v0.15.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 11. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:946380199 | https://github.com/openai/openai-agents-python | README/documentation is current enough for a first validation pass.\n\n## 12. 运行坑 · 来源证据：v0.14.8\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个运行相关的待验证问题：v0.14.8\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a31947cfee3a4299923f7714bfb54f42 | https://github.com/openai/openai-agents-python/releases/tag/v0.14.8 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 13. 维护坑 · 来源证据：AdvancedSQLiteSession.add_items can report success after structure metadata failure\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：AdvancedSQLiteSession.add_items can report success after structure metadata failure\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0fed2dd63d55400d9e0d9adaf08570e5 | https://github.com/openai/openai-agents-python/issues/3348 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 14. 维护坑 · 来源证据：Chat Completions converter can send empty tool output for non-text results\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：Chat Completions converter can send empty tool output for non-text results\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_34a35e920a01467e957cdd59b4179cc1 | https://github.com/openai/openai-agents-python/issues/3310 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 15. 维护坑 · 来源证据：v0.15.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个维护/版本相关的待验证问题：v0.15.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_33cd0193aea84f9b82b15a02098d85cd | https://github.com/openai/openai-agents-python/releases/tag/v0.15.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 16. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | last_activity_observed missing\n\n## 17. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:946380199 | https://github.com/openai/openai-agents-python | no_demo; severity=medium\n\n## 18. 安全/权限坑 · 存在安全注意事项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：No sandbox install has been executed yet; downstream must verify before user use.\n- 对用户的影响：用户安装前需要知道权限边界和敏感操作。\n- 建议检查：转成明确权限清单和安全审查提示。\n- 防护动作：安全注意事项必须面向用户前置展示。\n- 证据：risks.safety_notes | github_repo:946380199 | https://github.com/openai/openai-agents-python | No sandbox install has been executed yet; downstream must verify before user use.\n\n## 19. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:946380199 | https://github.com/openai/openai-agents-python | no_demo; severity=medium\n\n## 20. 安全/权限坑 · 来源证据：Proposal: per-run BudgetGuard for token / request / cost limits (follow-up to #2848)\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Proposal: per-run BudgetGuard for token / request / cost limits (follow-up to #2848)\n- 对用户的影响：可能阻塞安装或首次运行。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_00884163bb274aecb62eeff18df12634 | https://github.com/openai/openai-agents-python/issues/3353 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 21. 安全/权限坑 · 来源证据：v0.16.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：v0.16.0\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_a9d11d6b8fd24b22882ee03998b45d63 | https://github.com/openai/openai-agents-python/releases/tag/v0.16.0 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 22. 安全/权限坑 · 来源证据：v0.17.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：v0.17.1\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0d47be3955c747baadea812c5f4c6487 | https://github.com/openai/openai-agents-python/releases/tag/v0.17.1 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 23. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | issue_or_pr_quality=unknown\n\n## 24. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:946380199 | https://github.com/openai/openai-agents-python | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# openai-agents-python - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for openai/openai-agents-python.\n\nProject:\n- Name: openai-agents-python\n- Repository: https://github.com/openai/openai-agents-python\n- Summary: A lightweight, powerful framework for multi-agent workflows\n- Host target: chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: A lightweight, powerful framework for multi-agent workflows\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. overview: OpenAI Agents SDK Overview. Produce one small intermediate artifact and wait for confirmation.\n2. examples-index: Examples Index. Produce one small intermediate artifact and wait for confirmation.\n3. agents: Agents. Produce one small intermediate artifact and wait for confirmation.\n4. tools: Tools. Produce one small intermediate artifact and wait for confirmation.\n5. run-loop: Run Loop and Execution. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/openai/openai-agents-python\n- https://github.com/openai/openai-agents-python#readme\n- .agents/skills/code-change-verification/SKILL.md\n- .agents/skills/docs-sync/SKILL.md\n- .agents/skills/examples-auto-run/SKILL.md\n- .agents/skills/final-release-review/SKILL.md\n- .agents/skills/implementation-strategy/SKILL.md\n- .agents/skills/openai-knowledge/SKILL.md\n- .agents/skills/pr-draft-summary/SKILL.md\n- .agents/skills/runtime-behavior-probe/SKILL.md\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：openai/openai-agents-python\n\n## 官方安装入口\n\n### Python / pip · 官方安装入口\n\n```bash\npip install openai-agents\n```\n\n来源：https://github.com/openai/openai-agents-python#readme\n\n## 来源\n\n- repo: https://github.com/openai/openai-agents-python\n- docs: https://github.com/openai/openai-agents-python#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_a44f7347bf66437e874a64a265e0fceb"
}
