{
  "canonical_name": "openlit/openlit",
  "compilation_id": "pack_8ee0e331091d47a9aee3382ee14c1fd1",
  "created_at": "2026-05-16T21:35:12.653400+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=prompt, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=prompt, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `pip install openlit` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "pip install openlit",
      "sandbox_container_image": "python:3.12-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "llm_execute_isolated_install",
      "sandbox_validation_id": "sbx_7a347252a92446b598c1b1d2c454d187"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_f5114bcff7bb9c22130b8491cafb6ec8",
    "canonical_name": "openlit/openlit",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/openlit/openlit",
    "slug": "openlit",
    "source_packet_id": "phit_89ffc7f190094e568bb47dc3666592de",
    "source_validation_id": "dval_bc021b9f126d4942b10ce3ce4d4df599"
  },
  "merchandising": {
    "best_for": "需要软件开发与交付能力，并使用 local_cli的用户",
    "github_forks": 276,
    "github_stars": 2446,
    "one_liner_en": "Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.",
    "one_liner_zh": "Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.",
    "primary_category": {
      "category_id": "software-development",
      "confidence": "medium",
      "name_en": "Software Development",
      "name_zh": "软件开发与交付",
      "reason": "matched_keywords:git, cli"
    },
    "target_user": "使用 local_cli 等宿主 AI 的用户",
    "title_en": "openlit",
    "title_zh": "openlit 能力包",
    "visible_tags": [
      {
        "label_en": "Browser Agents",
        "label_zh": "浏览器 Agent",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-browser-agents",
        "type": "product_domain"
      },
      {
        "label_en": "Web Task Automation",
        "label_zh": "网页任务自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-web-task-automation",
        "type": "user_job"
      },
      {
        "label_en": "Browser Automation",
        "label_zh": "浏览器自动化",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-browser-automation",
        "type": "core_capability"
      },
      {
        "label_en": "Checkpoint Resume",
        "label_zh": "断点恢复流程",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-checkpoint-resume",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Evaluation Suite",
        "label_zh": "评测体系",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-evaluation-suite",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_89ffc7f190094e568bb47dc3666592de",
  "page_model": {
    "artifacts": {
      "artifact_slug": "openlit",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "pip install openlit",
          "label": "Python / pip · 官方安装入口",
          "source": "https://github.com/openlit/openlit#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "浏览器自动化",
        "断点恢复流程",
        "评测体系"
      ],
      "eyebrow": "软件开发与交付",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要软件开发与交付能力，并使用 local_cli的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs."
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "local_cli",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "prompt, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Integration: Governance and compliance signals for LLM observability",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_16e8a1979e4646f18ae6d36da1fd46fe | https://github.com/openlit/openlit/issues/1106 | 来源类型 github_issue 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：Integration: Governance and compliance signals for LLM observability",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_9788255c9fb34a7eae64ba6413a52030 | https://github.com/openlit/openlit/issues/1186 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。",
            "title": "来源证据：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：[Bug]: Docker Image doesn't run on windows 64bit",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_e25a08120daf4deb81b9193aeab1f929 | https://github.com/openlit/openlit/issues/786 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：[Bug]: Docker Image doesn't run on windows 64bit",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安装相关的待验证问题：openlit-1.19.0",
            "category": "安装坑",
            "evidence": [
              "community_evidence:github | cevd_0504e467960f4bbe919ff101c6a14d7b | https://github.com/openlit/openlit/releases/tag/openlit-1.19.0 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：openlit-1.19.0",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：controller-0.2.0",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_addec19eec37420da207487d5a685eaa | https://github.com/openlit/openlit/releases/tag/controller-0.2.0 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：controller-0.2.0",
            "user_impact": "可能影响升级、迁移或版本选择。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个配置相关的待验证问题：openlit-1.20.0",
            "category": "配置坑",
            "evidence": [
              "community_evidence:github | cevd_217968c917e9426f9f8fbb4b50bebdb5 | https://github.com/openlit/openlit/releases/tag/openlit-1.20.0 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：openlit-1.20.0",
            "user_impact": "可能影响升级、迁移或版本选择。"
          },
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:747319327 | https://github.com/openlit/openlit | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "downstream_validation.risk_items | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "进入安全/权限治理复核队列。",
            "title": "下游验证发现风险项",
            "user_impact": "下游已经要求复核，不能在页面中弱化。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "risks.scoring_risks | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "把风险写入边界卡，并确认是否需要人工复核。",
            "title": "存在评分风险",
            "user_impact": "风险会影响是否适合普通用户安装。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var",
            "category": "安全/权限坑",
            "evidence": [
              "community_evidence:github | cevd_bfba0945570d4cbbaead1257e8f70dfe | https://github.com/openlit/openlit/issues/1135 | 来源讨论提到 python 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var",
            "user_impact": "可能影响授权、密钥配置或安全边界。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：openlit-1.19.1",
            "category": "安全/权限坑",
            "evidence": [
              "community_evidence:github | cevd_b5088506959947828f2d740f9297d5b5 | https://github.com/openlit/openlit/releases/tag/openlit-1.19.1 | 来源类型 github_release 暴露的待验证使用条件。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：openlit-1.19.1",
            "user_impact": "可能增加新用户试用和生产接入成本。"
          },
          {
            "body": "GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：py-1.41.2",
            "category": "安全/权限坑",
            "evidence": [
              "community_evidence:github | cevd_ff3f4dfa2dc04616be73482b2145ac5c | https://github.com/openlit/openlit/releases/tag/py-1.41.2 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。"
            ],
            "severity": "medium",
            "suggested_check": "来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。",
            "title": "来源证据：py-1.41.2",
            "user_impact": "可能影响授权、密钥配置或安全边界。"
          },
          {
            "body": "issue_or_pr_quality=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | issue_or_pr_quality=unknown"
            ],
            "severity": "low",
            "suggested_check": "抽样最近 issue/PR，判断是否长期无人处理。",
            "title": "issue/PR 响应质量未知",
            "user_impact": "用户无法判断遇到问题后是否有人维护。"
          },
          {
            "body": "release_recency=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | release_recency=unknown"
            ],
            "severity": "low",
            "suggested_check": "确认最近 release/tag 和 README 安装命令是否一致。",
            "title": "发布节奏不明确",
            "user_impact": "安装命令和文档可能落后于代码，用户踩坑概率升高。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 15 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：安装坑 - 来源证据：Integration: Governance and compliance signals for LLM observability。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 69,
        "forks": 276,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 2446
      },
      "source_url": "https://github.com/openlit/openlit",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.",
      "title": "openlit 能力包",
      "trial_prompt": "# openlit - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for openlit/openlit.\n\nProject:\n- Name: openlit\n- Repository: https://github.com/openlit/openlit\n- Summary: Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.\n- Host target: local_cli\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. overview: OpenLIT Overview. Produce one small intermediate artifact and wait for confirmation.\n2. quickstart: Quick Start Guide. Produce one small intermediate artifact and wait for confirmation.\n3. architecture: System Architecture. Produce one small intermediate artifact and wait for confirmation.\n4. data-flow: Data Flow and Management. Produce one small intermediate artifact and wait for confirmation.\n5. python-sdk: Python SDK Architecture. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/openlit/openlit\n- https://github.com/openlit/openlit#readme\n- README.md\n- src/README.md\n- docker-compose.yml\n- examples/linux/docker-compose.yaml\n- examples/kubernetes/setup.sh\n- src/client/src/lib/platform/clickhouse/clickhouse-client.ts\n- src/dev-docker-compose.yml\n- sdk/python/src/openlit/otel/tracing.py\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped（https://github.com/openlit/openlit/issues/1186）；github/github_issue: [Bug]: Docker Image doesn't run on windows 64bit（https://github.com/openlit/openlit/issues/786）；github/github_issue: Bug: OpenAI API key in operator example test-application is not using OP（https://github.com/openlit/openlit/issues/1135）；github/github_issue: Integration: Governance and compliance signals for LLM observability（https://github.com/openlit/openlit/issues/1106）；github/github_release: openlit-1.20.0（https://github.com/openlit/openlit/releases/tag/openlit-1.20.0）；github/github_release: controller-0.2.0（https://github.com/openlit/openlit/releases/tag/controller-0.2.0）；github/github_release: openlit-1.19.1（https://github.com/openlit/openlit/releases/tag/openlit-1.19.1）；github/github_release: controller-0.1.0（https://github.com/openlit/openlit/releases/tag/controller-0.1.0）；github/github_release: openlit-1.19.0（https://github.com/openlit/openlit/releases/tag/openlit-1.19.0）；github/github_release: py-1.41.2（https://github.com/openlit/openlit/releases/tag/py-1.41.2）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped",
              "url": "https://github.com/openlit/openlit/issues/1186"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "[Bug]: Docker Image doesn't run on windows 64bit",
              "url": "https://github.com/openlit/openlit/issues/786"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Bug: OpenAI API key in operator example test-application is not using OP",
              "url": "https://github.com/openlit/openlit/issues/1135"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Integration: Governance and compliance signals for LLM observability",
              "url": "https://github.com/openlit/openlit/issues/1106"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "openlit-1.20.0",
              "url": "https://github.com/openlit/openlit/releases/tag/openlit-1.20.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "controller-0.2.0",
              "url": "https://github.com/openlit/openlit/releases/tag/controller-0.2.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "openlit-1.19.1",
              "url": "https://github.com/openlit/openlit/releases/tag/openlit-1.19.1"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "controller-0.1.0",
              "url": "https://github.com/openlit/openlit/releases/tag/controller-0.1.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "openlit-1.19.0",
              "url": "https://github.com/openlit/openlit/releases/tag/openlit-1.19.0"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "py-1.41.2",
              "url": "https://github.com/openlit/openlit/releases/tag/py-1.41.2"
            }
          ],
          "status": "已收录 10 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "软件开发与交付",
      "desc": "Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.",
      "effort": "安装已验证",
      "forks": 276,
      "icon": "code",
      "name": "openlit 能力包",
      "risk": "可发布",
      "slug": "openlit",
      "stars": 2446,
      "tags": [
        "浏览器 Agent",
        "网页任务自动化",
        "浏览器自动化",
        "断点恢复流程",
        "评测体系"
      ],
      "thumb": "gray",
      "type": "Prompt Preview"
    },
    "manual": {
      "markdown": "# https://github.com/openlit/openlit 项目说明书\n\n生成时间：2026-05-16 21:33:24 UTC\n\n## 目录\n\n- [OpenLIT Overview](#overview)\n- [Quick Start Guide](#quickstart)\n- [System Architecture](#architecture)\n- [Data Flow and Management](#data-flow)\n- [Python SDK Architecture](#python-sdk)\n- [TypeScript SDK Architecture](#typescript-sdk)\n- [Go SDK Architecture](#go-sdk)\n- [LLM and Framework Integrations](#integrations)\n- [OpenLIT Controller](#controller)\n- [GPU Collector](#gpu-collector)\n\n<a id='overview'></a>\n\n## OpenLIT Overview\n\n### 相关页面\n\n相关主题：[Quick Start Guide](#quickstart), [System Architecture](#architecture)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/client/README.md](https://github.com/openlit/openlit/blob/main/src/client/README.md)\n- [src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n- [src/client/src/components/(playground)/getting-started/tracing/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n- [src/client/src/components/(playground)/getting-started/secrets/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/secrets/index.tsx)\n- [src/client/src/components/(playground)/getting-started/prompts/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/prompts/index.tsx)\n</details>\n\n# OpenLIT Overview\n\n## What is OpenLIT?\n\nOpenLIT is an **OpenTelemetry-native GenAI and LLM Application Observability tool** designed to simplify the integration process for sending OpenTelemetry traces and metrics from your LLM applications. It provides comprehensive monitoring capabilities for both GenAI and LLM applications.\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:127]()\n\n## Key Features\n\nOpenLIT offers several core capabilities for observability:\n\n| Feature Category | Description |\n|------------------|-------------|\n| Tracing | Capture detailed traces of LLM application requests |\n| Metrics | Collect and analyze performance metrics |\n| Evaluations | Assess response quality and model performance |\n| Context Management | Manage evaluation contexts and prompts |\n| Secrets Management | Securely store and manage API keys and credentials |\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx]()\n资料来源：[src/client/src/components/(playground)/getting-started/secrets/index.tsx]()\n资料来源：[src/client/src/components/(playground)/getting-started/prompts/index.tsx]()\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    A[LLM Application] --> B[OpenLIT SDK]\n    B --> C[OTLP Endpoint<br/>127.0.0.1:4318]\n    C --> D[OpenLIT Backend]\n    D --> E[OpenLIT UI<br/>127.0.0.1:3000]\n    F[Database] <--> D\n```\n\n## SDK Support\n\nOpenLIT provides official SDKs for multiple programming languages:\n\n### Python SDK\n\nThe Python SDK enables Python-based LLM applications to send telemetry data to OpenLIT.\n\n```python\nimport openlit\n\nopenlit.init()\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n### TypeScript/JavaScript SDK\n\nThe TypeScript SDK provides similar functionality for Node.js and browser-based applications.\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n**Example Usage with OpenAI:**\n\n```typescript\nimport OpenAI from 'openai';\nimport openlit from 'openlit';\n\nopenlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" });\n\nconst client = new OpenAI({\n  apiKey: process.env.OPENAI_API_KEY\n});\n\nconst chatCompletion = await client.chat.completions.create({\n  messages: [{ role: 'user', content: 'What is LLM Observability?' }],\n  model: 'gpt-3.5-turbo',\n});\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx]()\n\n## Configuration Options\n\n### OTLP Endpoint Configuration\n\nYou can configure the OTLP endpoint in two ways:\n\n| Method | Configuration |\n|--------|---------------|\n| Code | `openlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" })` |\n| Environment Variable | `OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"` |\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n### Environment Variables\n\n| Variable | Purpose | Default Value |\n|----------|---------|---------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP collector endpoint | http://127.0.0.1:4318 |\n\n## Deployment\n\n### Docker Compose Deployment\n\nOpenLIT can be deployed using Docker Compose from the root directory:\n\n```bash\ngit clone git@github.com:openlit/openlit.git\ndocker compose up -d\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n### Default Ports\n\n| Service | Default Address |\n|---------|-----------------|\n| OpenLIT UI | http://127.0.0.1:3000 |\n| OTLP Endpoint | http://127.0.0.1:4318 |\n\n## Default Credentials\n\nAfter deployment, access the OpenLIT UI using the following default credentials:\n\n| Field | Default Value |\n|-------|---------------|\n| Email | user@openlit.io |\n| Password | openlituser |\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n## SDK Repository Locations\n\n| SDK | Repository Path |\n|-----|-----------------|\n| Python SDK | `sdk/python` |\n| TypeScript SDK | `sdk/typescript` |\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n## Community and Support\n\nOpenLIT maintains active community channels for support and discussions:\n\n| Platform | Link |\n|----------|------|\n| GitHub | https://github.com/openlit/openlit |\n| Documentation | https://docs.openlit.io |\n| Slack | Join via invitation link |\n| X (Twitter) | @openlit_io |\n\n资料来源：[src/client/README.md]()\n\n## Evaluation Features\n\nOpenLIT supports custom evaluation types with configurable prompts and context:\n\n```typescript\n// Evaluation prompt format example\n[Domain Accuracy evaluation context]\nConsider: whether the response aligns with domain-specific knowledge and terminology.\nLook for incorrect use of domain terms, inaccurate domain-specific claims, and deviations from established domain practices.\n```\n\nEvaluations provide the following metrics:\n- **Score**: Numerical rating\n- **Classification**: Categorical classification\n- **Explanation**: Detailed reasoning\n- **Verdict**: Pass/fail determination\n\n资料来源：[src/client/src/app/(playground)/evaluations/types/new/page.tsx]()\n资料来源：[src/client/src/components/(playground)/request/components/evaluations.tsx]()\n\n## Pricing Integration\n\nOpenLIT can calculate costs for LLM usage based on token consumption:\n\n```\ncost = (input_tokens / 1M) × input_price + (output_tokens / 1M) × output_price\n```\n\nThis includes:\n- Input token pricing per million tokens\n- Output token pricing per million tokens\n- Context window size tracking\n\n资料来源：[src/client/src/components/(playground)/chat/chat-settings-form.tsx]()\n\n---\n\n<a id='quickstart'></a>\n\n## Quick Start Guide\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/client/src/components/(playground)/getting-started/tracing/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n- [src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n- [src/client/src/app/(playground)/agents/no-controller.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/agents/no-controller.tsx)\n- [src/client/src/app/(playground)/context/new/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/context/new/page.tsx)\n- [src/client/src/app/(playground)/context/[id]/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/context/[id]/page.tsx)\n- [src/client/src/components/(playground)/openground/sdk-usage-dialog.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/openground/sdk-usage-dialog.tsx)\n- [src/client/src/app/not-found.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/not-found.tsx)\n</details>\n\n# Quick Start Guide\n\nOpenLIT is an OpenTelemetry-native GenAI and LLM Application Observability tool designed to simplify the integration of tracing and metrics collection for AI applications. This guide provides comprehensive instructions for deploying OpenLIT and instrumenting your applications using the Python and TypeScript SDKs.\n\n## Prerequisites\n\nBefore beginning, ensure you have the following installed:\n\n| Requirement | Version | Purpose |\n|-------------|---------|---------|\n| Docker | Latest | Container runtime for OpenLIT deployment |\n| Docker Compose | Latest | Orchestration tool |\n| Node.js | 18+ | Required for TypeScript SDK |\n| Python | 3.8+ | Required for Python SDK |\n| npm/pip | Latest | Package managers |\n\n## Deployment Options\n\nOpenLIT can be deployed using multiple methods depending on your infrastructure requirements.\n\n### Docker Compose Deployment\n\nThe recommended approach for local development and testing is Docker Compose.\n\n```bash\ngit clone git@github.com:openlit/openlit.git\ncd openlit\ndocker compose up -d\n```\n\nOnce deployed, access the OpenLIT UI at `http://127.0.0.1:3000` using the default credentials:\n\n- **Email:** user@openlit.io\n- **Password:** openlituser\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:50-55]()\n\n### Controller Deployment\n\nFor infrastructure-level observability, the OpenLIT Controller can be deployed as a system service or containerized application.\n\n#### Linux System Service\n\n```bash\nsudo tee /etc/systemd/system/openlit-controller.service <<EOF\n[Unit]\nDescription=OpenLIT Controller\nAfter=network.target\n\n[Service]\nType=simple\nUser=root\nWorkingDirectory=/opt/openlit\nExecStart=/opt/openlit/openlit-controller\nRestart=always\nRestartSec=5\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsystemctl daemon-reload\nsystemctl enable --now openlit-controller\n```\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:12-25]()\n\n#### Docker Deployment\n\n```bash\ndocker run -d --privileged --pid=host \\\n  -e OPENLIT_URL=\"<openlit-url>\" \\\n  -e OTEL_EXPORTER_OTLP_ENDPOINT=\"<openlit-url>:4318\" \\\n  -v /proc:/host/proc:ro \\\n  -v /sys/kernel/debug:/sys/kernel/debug:ro \\\n  -v /sys/fs/bpf:/sys/fs/bpf:rw \\\n  -v /var/run/docker.sock:/var/run/docker.sock \\\n  -e OPENLIT_PROC_ROOT=\"/host/proc\" \\\n  ghcr.io/openlit/controller:latest\n```\n\n#### Kubernetes Deployment\n\n```bash\nhelm repo add openlit https://openlit.github.io/helm\nhelm repo update\nhelm upgrade --install openlit openlit/openlit \\\n  --set openlit-controller.enabled=true\n```\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:27-45]()\n\n## SDK Integration\n\nOpenLIT provides SDKs for both Python and TypeScript environments to enable application-level observability.\n\n### Python SDK\n\n#### Installation\n\nInstall the Python SDK using pip:\n\n```bash\npip install openlit\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:85-92]()\n\n#### Initialization\n\nAdd the following initialization code to your application:\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n\nAlternatively, set the endpoint using the environment variable:\n\n```bash\nexport OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"\n```\n\n#### Complete Example with OpenAI\n\n```python\nimport openlit\nfrom openai import OpenAI\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n\nclient = OpenAI(api_key=os.environ.get(\"OPENAI_API_KEY\"))\n\nresponse = client.chat.completions.create(\n    model=\"gpt-3.5-turbo\",\n    messages=[{\"role\": \"user\", \"content\": \"What is LLM Observability?\"}]\n)\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:45-65]()\n\n### TypeScript SDK\n\n#### Installation\n\nInstall the TypeScript SDK using npm:\n\n```bash\nnpm install openlit\n```\n\n#### Initialization\n\nAdd the following initialization code to your application:\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\nAlternatively, set the endpoint using the environment variable `OTEL_EXPORTER_OTLP_ENDPOINT`.\n\n#### Complete Example with OpenAI\n\n```typescript\nimport OpenAI from 'openai';\nimport openlit from 'openlit';\n\nopenlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" });\n\nconst client = new OpenAI({\n  apiKey: process.env.OPENAI_API_KEY\n});\n\nconst chatCompletion = await client.chat.completions.create({\n  messages: [{ role: 'user', content: 'What is LLM Observability?' }],\n  model: 'gpt-3.5-turbo',\n});\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:95-120]()\n\n## Configuration Reference\n\n### SDK Configuration Options\n\n| Parameter | Type | Environment Variable | Description |\n|-----------|------|---------------------|-------------|\n| `otlp_endpoint` | string | `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP exporter endpoint URL |\n| `api_key` | string | `OPENLIT_API_KEY` | API key for authenticated endpoints |\n\n### Controller Environment Variables\n\n| Variable | Description |\n|----------|-------------|\n| `OPENLIT_URL` | Base URL for the OpenLIT instance |\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint for trace export |\n| `OPENLIT_API_KEY` | API key for OpenLIT authentication |\n| `OPENLIT_PROC_ROOT` | Root path for process information (default: `/host/proc`) |\n\n## Application Workflow\n\n```mermaid\ngraph TD\n    A[Deploy OpenLIT with Docker Compose] --> B[Access OpenLIT UI]\n    B --> C{Choose Deployment Mode}\n    C -->|Local Development| D[Install SDK in Application]\n    C -->|System-wide| E[Deploy Controller]\n    D --> F[Initialize SDK]\n    F --> G[Instrument LLM Calls]\n    G --> H[View Traces & Metrics in UI]\n    E --> I[Auto-discover Services]\n    I --> J[View Infrastructure Metrics]\n```\n\n## Additional Resources\n\nFor more advanced configurations and use cases, refer to the following repositories:\n\n- [OpenLIT Python SDK](https://github.com/openlit/openlit/tree/main/sdk/python)\n- [OpenLIT TypeScript SDK](https://github.com/openlit/openlit/tree/main/sdk/typescript)\n- [Official Documentation](https://docs.openlit.io)\n- [GitHub Repository](https://github.com/openlit/openlit)\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:100-115]()\n资料来源：[src/client/src/app/not-found.tsx:20-35]()\n\n---\n\n<a id='architecture'></a>\n\n## System Architecture\n\n### 相关页面\n\n相关主题：[Data Flow and Management](#data-flow), [Python SDK Architecture](#python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/client/src/lib/platform/clickhouse/clickhouse-client.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/clickhouse/clickhouse-client.ts)\n- [docker-compose.yml](https://github.com/openlit/openlit/blob/main/docker-compose.yml)\n- [src/dev-docker-compose.yml](https://github.com/openlit/openlit/blob/main/src/dev-docker-compose.yml)\n</details>\n\n# System Architecture\n\n## Overview\n\nOpenLIT is an **OpenTelemetry-native GenAI and LLM Application Observability tool** designed to simplify the integration of observability into AI applications. The system enables developers to send OpenTelemetry traces and metrics from their LLM applications with minimal configuration changes.\n\nThe architecture follows a distributed microservices pattern with clear separation between data collection (SDK instrumentation), data transmission (OTLP protocol), and data visualization (frontend dashboard).\n\n## High-Level Architecture\n\n```mermaid\ngraph TB\n    subgraph \"Client Applications\"\n        PythonApp[\"Python Application\"]\n        TypeScriptApp[\"TypeScript/JS Application\"]\n    end\n\n    subgraph \"OpenLIT SDKs\"\n        PythonSDK[\"Python SDK<br/>pip install openlit\"]\n        TSSDK[\"TypeScript SDK<br/>npm install openlit\"]\n    end\n\n    subgraph \"Data Transport\"\n        OTLP[\"OTLP Endpoint<br/>:4318\"]\n    end\n\n    subgraph \"OpenLIT Backend\"\n        Frontend[\"Web Dashboard<br/>Port 3000\"]\n        API[\"API Services\"]\n        DB[( \"ClickHouse<br/>Database\" )]\n    end\n\n    PythonApp --> PythonSDK\n    TypeScriptApp --> TSSDK\n    PythonSDK --> OTLP\n    TSSDK --> OTLP\n    OTLP --> API\n    API --> DB\n    Frontend --> API\n```\n\n## Core Components\n\n### SDK Layer\n\nOpenLIT provides language-specific SDKs for instrumenting AI applications:\n\n| SDK | Package Manager | Installation | Repository |\n|-----|-----------------|--------------|------------|\n| Python | pip | `pip install openlit` | [sdk/python](https://github.com/openlit/openlit/tree/main/sdk/python) |\n| TypeScript | npm | `npm install openlit` | [sdk/typescript](https://github.com/openlit/openlit/tree/main/sdk/typescript) |\n\n**Python SDK Initialization**\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:73-74]()\n\n**TypeScript SDK Initialization**\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:115-118]()\n\n### Data Transport Layer\n\nThe system uses the **OpenTelemetry Protocol (OTLP)** for transmitting telemetry data:\n\n| Parameter | Default Value | Description |\n|-----------|---------------|-------------|\n| OTLP Endpoint | `http://127.0.0.1:4318` | gRPC/HTTP endpoint for traces |\n| Environment Variable | `OTEL_EXPORTER_OTLP_ENDPOINT` | Alternative endpoint configuration |\n\nThe OTLP endpoint can be configured either programmatically via SDK initialization or through environment variables.\n\n### Backend Services\n\n#### Web Dashboard (Frontend)\n\nThe frontend is a Next.js application providing the user interface for:\n\n- **Tracing View** - Visualize request traces and spans\n- **Agents Management** - Configure and monitor AI agents\n- **Model Management** - Configure AI model providers and pricing\n- **Getting Started** - Onboarding documentation\n- **Chat Interface** - Interactive testing environment\n\nThe application runs on **port 3000** by default and provides a login interface with default credentials:\n\n- **Email:** user@openlit.io\n- **Password:** openlituser\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:40-44]()\n\n#### Agent Lifecycle Management\n\nOpenLIT supports managing AI agents with lifecycle operations:\n\n```mermaid\nstateDiagram-v2\n    [*] --> Starting\n    Starting --> Running\n    Running --> Restarting\n    Restarting --> Running\n    Running --> Stopping\n    Stopping --> [*]\n```\n\nLifecycle actions include:\n- **Start** - Initialize the agent service\n- **Stop** - Terminate with confirmation dialog\n- **Restart** - Restart the agent process\n\n资料来源：[src/client/src/app/(playground)/agents/lifecycle-actions.tsx:1-60]()\n\n### Controller Services\n\nThe OpenLIT Controller provides infrastructure-level observability for containerized and orchestrated environments:\n\n| Deployment Method | Command/Configuration |\n|-------------------|----------------------|\n| Docker | `docker run -d --privileged --pid=host ... ghcr.io/openlit/controller:latest` |\n| Kubernetes | `helm upgrade --install openlit openlit/openlit --set openlit-controller.enabled=true` |\n| Systemd | Service unit file with systemctl enable |\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:45-60]()\n\n#### Controller Environment Variables\n\n| Variable | Purpose |\n|----------|---------|\n| `OPENLIT_URL` | Main OpenLIT instance URL |\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint for telemetry |\n| `OPENLIT_API_KEY` | Authentication key (optional) |\n| `OPENLIT_PROC_ROOT` | Process root for host monitoring |\n\n## Deployment Architecture\n\n### Docker Compose Deployment\n\nFor development and testing, OpenLIT can be deployed using Docker Compose:\n\n```bash\ngit clone git@github.com:openlit/openlit.git\ncd openlit\ndocker compose up -d\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:50-55]()\n\n### Multi-Platform Support\n\n```mermaid\ngraph LR\n    subgraph \"Deployment Platforms\"\n        Docker[\"Docker\"]\n        K8s[\"Kubernetes\"]\n        SystemD[\"Systemd\"]\n    end\n\n    subgraph \"Monitoring Targets\"\n        Containers[\"Containers\"]\n        Processes[\"Host Processes\"]\n        Services[\"System Services\"]\n    end\n\n    Docker --> Containers\n    K8s --> Containers\n    K8s --> Services\n    SystemD --> Services\n    SystemD --> Processes\n```\n\n## Feature Architecture\n\n### Tracing Integration\n\nOpenLIT's tracing feature provides comprehensive observability:\n\n| Feature | Description |\n|---------|-------------|\n| **Auto-Instrumentation** | Automatic capture of LLM calls |\n| **Span Attributes** | Model, provider, token usage, latency |\n| **Context Propagation** | Request tracing across services |\n| **Error Tracking** | Exception and failure monitoring |\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:1-100]()\n\n### Agent Schema Capture\n\nThe system captures tool schemas from agents for documentation and analysis:\n\n```typescript\ninterface ToolSchema {\n  name: string;\n  description?: string;\n  schema: object;\n}\n```\n\nSchemas are displayed in an expandable accordion format with JSON visualization.\n\n资料来源：[src/client/src/components/(playground)/agents/tools-card.tsx:35-55]()\n\n### Model Configuration\n\nOpenLIT supports custom model configurations with pricing information:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `providerName` | string | AI provider name |\n| `modelId` | string | Model identifier |\n| `modelName` | string | Display name |\n| `inputPricePerMToken` | number | Input cost per million tokens |\n| `outputPricePerMToken` | number | Output cost per million tokens |\n| `contextWindow` | number | Maximum context length |\n\n资料来源：[src/client/src/components/(playground)/chat/message-input.tsx:25-45]()\n\n## Data Flow\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant SDK as OpenLIT SDK\n    participant OTLP as OTLP Endpoint\n    participant API as OpenLIT API\n    participant CH as ClickHouse\n    participant UI as Web Dashboard\n\n    App->>SDK: Initialize with config\n    App->>SDK: LLM API Call\n    SDK->>SDK: Capture trace/metrics\n    SDK->>OTLP: Export telemetry\n    OTLP->>API: Process spans\n    API->>CH: Store data\n    UI->>API: Query traces\n    API->>UI: Return results\n    UI->>UI: Render dashboard\n```\n\n## Configuration Reference\n\n### SDK Configuration Options\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `otlp_endpoint` | string | `http://127.0.0.1:4318` | OTLP collector endpoint |\n| `service_name` | string | auto-detect | Service identifier |\n| `api_key` | string | none | Authentication for hosted services |\n\n### Environment Variables\n\n| Variable | SDK Support | Description |\n|----------|-------------|-------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | Python, TS | Global OTLP endpoint override |\n| `OPENLIT_API_KEY` | All | API authentication key |\n| `OPENLIT_SERVICE_NAME` | All | Override service name |\n\n## Security Considerations\n\n### Authentication\n\nThe system supports multiple authentication providers:\n\n- **Email/Password** - Local authentication with default credentials\n- **OAuth Providers** - Google and GitHub SSO integration\n\n资料来源：[src/client/src/components/(auth)/auth-form.tsx:1-50]()\n\n### API Security\n\nAPI endpoints are protected and require valid session tokens. The controller service supports optional API key authentication:\n\n```bash\n-e OPENLIT_API_KEY=\"your-api-key\"\n```\n\n## Technology Stack\n\n| Layer | Technology |\n|-------|------------|\n| Frontend | Next.js, React, TypeScript, TailwindCSS |\n| SDKs | Python, TypeScript |\n| Telemetry | OpenTelemetry Protocol (OTLP) |\n| Database | ClickHouse |\n| Containerization | Docker, Kubernetes |\n| Service Management | Systemd |\n\n## External Resources\n\n| Resource | URL |\n|----------|-----|\n| Documentation | https://docs.openlit.io |\n| GitHub Repository | https://github.com/openlit/openlit |\n| TypeScript SDK | https://github.com/openlit/openlit/tree/main/sdk/typescript |\n| Python SDK | https://github.com/openlit/openlit/tree/main/sdk/python |\n\n---\n\n*Last updated: Based on repository state at main branch*\n\n---\n\n<a id='data-flow'></a>\n\n## Data Flow and Management\n\n### 相关页面\n\n相关主题：[System Architecture](#architecture), [Python SDK Architecture](#python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/python/src/openlit/otel/tracing.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/tracing.py)\n- [sdk/python/src/openlit/otel/metrics.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/metrics.py)\n- [src/client/src/lib/platform/clickhouse/helpers.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/clickhouse/helpers.ts)\n- [src/client/src/lib/platform/request/index.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/request/index.ts)\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n- [sdk/python/src/openlit/instrumentation/langgraph/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/langgraph/__init__.py)\n- [sdk/typescript/src/instrumentation/llamaindex/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/llamaindex/index.ts)\n</details>\n\n# Data Flow and Management\n\n## Overview\n\nOpenLIT is an OpenTelemetry-native observability platform designed for GenAI and LLM applications. The data flow architecture encompasses the entire lifecycle of telemetry data—from instrumentation at the application level through processing, storage, and visualization in the frontend UI.\n\nThe system follows a standard OpenTelemetry Collector pattern with platform-specific optimizations for handling GenAI-specific semantic conventions and metrics. Data flows through multiple layers: SDK instrumentation, OTLP export, backend processing, ClickHouse storage, and client-side data management for the playground UI.\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    subgraph Application_Layer[\"Application Layer\"]\n        PySDK[\"Python SDK\"]\n        TsSDK[\"TypeScript SDK\"]\n    end\n    \n    subgraph Instrumentation[\"Instrumentation\"]\n        LangGraph[\"LangGraph\"]\n        ClaudeAgent[\"Claude Agent SDK\"]\n        LlamaIndex[\"LlamaIndex\"]\n        OpenAI[\"OpenAI\"]\n    end\n    \n    subgraph Export[\"OTLP Export\"]\n        OTLP[\"OTLP Endpoint<br/>:4318\"]\n    end\n    \n    subgraph Backend[\"OpenLIT Backend\"]\n        Processor[\"Data Processor\"]\n        Storage[\"ClickHouse\"]\n    end\n    \n    subgraph Frontend[\"Frontend Client\"]\n        Client[\"Playground UI\"]\n        APIClient[\"API Client\"]\n    end\n    \n    PySDK -->|HTTP/gRPC| OTLP\n    TsSDK -->|HTTP/gRPC| OTLP\n    LangGraph --> PySDK\n    ClaudeAgent --> PySDK\n    OpenAI --> PySDK\n    LlamaIndex --> TsSDK\n    OTLP --> Processor\n    Processor --> Storage\n    Storage --> APIClient\n    APIClient --> Client\n```\n\n## Tracing Data Flow\n\n### Python SDK Tracing Architecture\n\nThe Python SDK provides comprehensive tracing capabilities through the OpenTelemetry SDK integration. The tracing module (`tracing.py`) establishes the foundation for all trace collection and export operations.\n\n**Core Tracing Components:**\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| `TracerProvider` | Manages trace creation and propagation | `sdk/python/src/openlit/otel/tracing.py` |\n| `SpanProcessor` | Processes individual spans before export | `sdk/python/src/openlit/otel/tracing.py` |\n| `OTLPExporter` | Exports spans to OTLP endpoint | `sdk/python/src/openlit/otel/tracing.py` |\n| `ContextPropagation` | Maintains trace context across async operations | `sdk/python/src/openlit/otel/tracing.py` |\n\nThe tracing initialization follows a standard pattern:\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n\nThis initialization configures the tracer provider with the specified OTLP endpoint, enabling automatic span collection from all instrumented LLM frameworks.\n\n**资料来源：** [sdk/python/src/openlit/otel/tracing.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/tracing.py)\n\n### Span Lifecycle\n\nSpans are created and managed through a structured lifecycle that ensures complete telemetry capture:\n\n```mermaid\nsequenceDiagram\n    participant App as Application Code\n    participant SDK as OpenLIT SDK\n    participant Inst as Instrumentation\n    participant Exporter as OTLP Exporter\n    participant Backend as OpenLIT Backend\n    \n    App->>Inst: LLM/Framework Call\n    Inst->>SDK: Create Span\n    SDK->>SDK: Set Attributes\n    SDK->>SDK: Record Metrics\n    App->>SDK: Response Received\n    SDK->>SDK: Complete Span\n    SDK->>Exporter: Export Span\n    Exporter->>Backend: OTLP Stream\n```\n\nThe span lifecycle includes:\n1. **Creation**: Span is initialized with parent context\n2. **Attribute Setting**: GenAI-specific attributes (model, tokens, cost) are attached\n3. **Timing**: Start and end times are recorded for duration calculation\n4. **Status**: Span status is set based on success/failure\n5. **Export**: Spans are batched and exported to OTLP endpoint\n\n**资料来源：** [sdk/python/src/openlit/instrumentation/langgraph/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/langgraph/__init__.py)\n\n### Instrumentation Framework Integration\n\nOpenLIT provides instrumentation for multiple LLM frameworks, each with framework-specific span attributes:\n\n**Supported Instrumentations:**\n\n| Framework | Operations Traced | Semantic Convention |\n|-----------|-------------------|---------------------|\n| OpenAI | chat completions, embeddings | `gen_ai.operation.type` |\n| LangGraph | execution, checkpointing, construction | `framework` + `gen_ai` |\n| Claude Agent SDK | invoke_agent, execute_tool | `gen_ai.operation.type` |\n| LlamaIndex | query_engine, retriever, document | `retrieve` + `framework` |\n\n**LangGraph Instrumentation Pattern:**\n\nThe LangGraph instrumentation wraps execution operations with both sync and async variants:\n\n```python\n# From langgraph/__init__.py\ndef _wrap_execution_operations(self, operations, ...):\n    for module, method, operation_type, sync_type in operations:\n        if sync_type == \"async\":\n            wrapper = async_general_wrap(operation_type, ...)\n        else:\n            wrapper = general_wrap(operation_type, ...)\n```\n\nThis pattern ensures consistent telemetry regardless of whether the underlying framework uses synchronous or asynchronous execution models.\n\n**资料来源：** [sdk/python/src/openlit/instrumentation/langgraph/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/langgraph/__init__.py)\n\n## Metrics Data Flow\n\n### Metrics Collection Architecture\n\nThe metrics module handles quantitative measurements that complement trace data. Metrics provide aggregated views of system performance, cost, and usage patterns.\n\n**Metrics Data Points:**\n\n| Metric Type | Description | Aggregation |\n|-------------|-------------|-------------|\n| Request Count | Total number of LLM requests | Count |\n| Token Usage | Input/output tokens consumed | Sum |\n| Cost | Calculated cost based on pricing | Sum |\n| Latency | Request duration in milliseconds | Histogram |\n| Error Rate | Failed requests percentage | Ratio |\n\n**资料来源：** [sdk/python/src/openlit/otel/metrics.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/metrics.py)\n\n### Metric Recording Flow\n\nMetrics are recorded during span processing using the OpenTelemetry Metrics API:\n\n```mermaid\ngraph LR\n    A[LLM Request] --> B[Create Span]\n    B --> C[Extract Request Data]\n    C --> D[Calculate Pricing]\n    D --> E[Record Metrics]\n    E --> F[Complete Span]\n    \n    G[Pricing Info] --> D\n    H[Model Config] --> D\n```\n\nThe metric recording includes:\n- `start_time` and `end_time` for duration calculation\n- `request_model` for token and pricing lookup\n- `environment` and `application_name` for filtering\n- `pricing_info` dictionary for cost calculation\n\n**资料来源：** [sdk/python/src/openlit/instrumentation/openai/async_openai.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/openai/async_openai.py)\n\n## Client-Side Data Management\n\n### Frontend API Client Architecture\n\nThe frontend client manages data fetching and state management for the playground UI. The API client layer provides a typed interface to the backend services.\n\n**API Client Structure:**\n\n```typescript\n// Simplified from request/index.ts\nexport class RequestClient {\n  async fetchTraces(params: TraceParams): Promise<Trace[]>;\n  async fetchMetrics(params: MetricParams): Promise<Metrics>;\n  async fetchSpans(traceId: string): Promise<Span[]>;\n}\n```\n\n**Key Data Operations:**\n\n| Operation | Endpoint | Purpose |\n|-----------|----------|---------|\n| Fetch Traces | `/api/traces` | List traces with filtering |\n| Fetch Spans | `/api/traces/:id/spans` | Get detailed span data |\n| Fetch Metrics | `/api/metrics` | Aggregated metrics data |\n| Export Data | `/api/openground/models/export` | Export pricing data |\n\n**资料来源：** [src/client/src/lib/platform/request/index.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/request/index.ts)\n\n### ClickHouse Data Access\n\nThe client uses ClickHouse as the primary data store and accesses it through helper functions that construct and execute queries.\n\n**Query Helper Functions:**\n\n| Function | Purpose |\n|----------|---------|\n| `buildTraceQuery()` | Construct trace listing query |\n| `buildSpanQuery()` | Construct span detail query |\n| `applyFilters()` | Apply time range and attribute filters |\n| `parseResponse()` | Parse ClickHouse response format |\n\n**资料来源：** [src/client/src/lib/platform/clickhouse/helpers.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/clickhouse/helpers.ts)\n\n### State Management Pattern\n\nThe frontend uses React Query or similar state management for data fetching:\n\n```mermaid\ngraph TD\n    A[Component Mount] --> B[Trigger Query]\n    B --> C[Show Loading State]\n    C --> D{Request Complete?}\n    D -->|Yes| E[Update Cache]\n    E --> F[Render Data]\n    D -->|No| G[Show Error]\n    G --> H[Retry Option]\n```\n\nThe state management includes:\n- **Loading states**: Visual feedback during data fetch\n- **Error handling**: Graceful degradation on failures\n- **Cache invalidation**: Automatic refresh on mutations\n- **Pagination**: Support for large result sets with \"Load More\" patterns\n\n**资料来源：** [src/client/src/components/(playground)/agents/version-drawer.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/agents/version-drawer.tsx)\n\n## Timeline View Data Structure\n\n### Span Timeline Rendering\n\nThe timeline view component renders trace data as a visual timeline, parsing span data into a hierarchical structure.\n\n**Span Data Model:**\n\n```typescript\ninterface SpanData {\n  spanId: string;\n  parentSpanId?: string;\n  startTime: number;\n  endTime: number;\n  name: string;\n  kind: 'client' | 'server' | 'producer' | 'consumer';\n  status: 'ok' | 'error';\n  attributes: Record<string, any>;\n  duration: number;\n  cost?: number;\n}\n```\n\n**Timeline Calculation:**\n\n| Column | Width | Content |\n|--------|-------|---------|\n| Name Column | 30% | Span name and kind indicator |\n| Timeline Column | 60% | Visual timeline bar |\n| Stats Column | 10% | Duration and cost |\n\nThe timeline calculates relative positions using `traceWindowMs` to determine the overall trace window, then positions each span proportionally within that window.\n\n**资料来源：** [src/client/src/components/(playground)/request/components/timeline-view.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/request/components/timeline-view.tsx)\n\n## TypeScript SDK Data Flow\n\n### LlamaIndex Instrumentation\n\nThe TypeScript SDK provides similar capabilities for JavaScript/TypeScript applications, particularly for LlamaIndex integration.\n\n**LlamaIndex Traced Operations:**\n\n| Operation | Semantic Convention | Description |\n|-----------|---------------------|-------------|\n| `document_load` | `retrieve` | Document loading operations |\n| `document_split` | `framework` | Text splitting/splitting |\n| `retriever_retrieve` | `retrieve` | Retrieval operations |\n| `query_engine_query` | `retrieve` | Query execution |\n| `response_synthesize` | `chat` | Response generation |\n\n**资料来源：** [sdk/typescript/src/instrumentation/llamaindex/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/llamaindex/index.ts)\n\n### TypeScript Initialization Pattern\n\n```typescript\nimport openlit from 'openlit';\n\n// Initialize with OTLP endpoint\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n\n// Or use environment variable\n// OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"\n```\n\n## Environment Configuration\n\n### Data Flow Configuration Options\n\n| Environment Variable | Default | Purpose |\n|---------------------|---------|---------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | `http://127.0.0.1:4318` | OTLP gRPC endpoint |\n| `OTEL_EXPORTER_OTLP_PROTOCOL` | `grpc` | Protocol (grpc/http/proto) |\n| `OTEL_SERVICE_NAME` | `default` | Service identification |\n| `OTEL_EXPORTER_OTLP_HEADERS` | - | Authentication headers |\n\n**资料来源：** [src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n\n## Data Management Best Practices\n\n### Efficient Data Handling\n\n1. **Batching**: Spans are batched before export to reduce network overhead\n2. **Sampling**: Configure appropriate sampling rates for high-volume applications\n3. **Filtering**: Apply attribute filters at the query layer to reduce data transfer\n4. **Pagination**: Use paginated queries for large result sets\n\n### Error Handling Flow\n\n```mermaid\ngraph TD\n    A[Span Error] --> B[Record Exception]\n    B --> C[Set Span Status ERROR]\n    C --> D[Record Error Metrics]\n    D --> E[Export Span]\n    E --> F{Backend Available?}\n    F -->|Yes| G[Store Data]\n    F -->|No| H[Retry Queue]\n    H -->|Retry| G\n```\n\nThe error handling ensures that even when backend connectivity fails, error information is preserved for debugging.\n\n## Summary\n\nThe data flow in OpenLIT follows a well-structured pipeline from SDK instrumentation through to frontend visualization. Key aspects include:\n\n- **Unified Telemetry**: Both traces and metrics are collected through OpenTelemetry SDKs\n- **Framework Integration**: Multiple LLM frameworks are automatically instrumented\n- **Efficient Export**: OTLP protocol ensures standardized data transfer\n- **Flexible Storage**: ClickHouse provides scalable storage and querying\n- **Responsive UI**: The playground client efficiently fetches and displays telemetry data\n\nThis architecture enables comprehensive observability for GenAI applications while maintaining performance and scalability through batching, caching, and pagination strategies.\n\n---\n\n<a id='python-sdk'></a>\n\n## Python SDK Architecture\n\n### 相关页面\n\n相关主题：[TypeScript SDK Architecture](#typescript-sdk), [Go SDK Architecture](#go-sdk), [LLM and Framework Integrations](#integrations)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/python/src/openlit/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/__init__.py)\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/claude_agent_sdk.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/claude_agent_sdk.py)\n- [sdk/python/src/openlit/instrumentation/agent_framework/utils.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/agent_framework/utils.py)\n- [sdk/python/src/openlit/guard/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/guard/__init__.py)\n- [sdk/python/src/openlit/instrumentation/google_adk/utils.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/google_adk/utils.py)\n</details>\n\n# Python SDK Architecture\n\n## 概述\n\nOpenLIT Python SDK 是一个 OpenTelemetry 原生的 GenAI 和 LLM 应用可观测性工具。该 SDK 通过自动插桩框架集成到各种 AI 应用中，自动捕获 OpenTelemetry traces 和 metrics，无需手动埋点。\n\n核心职责包括：\n\n- 自动插桩主流 AI SDK（OpenAI、Anthropic、LangChain、CrewAI 等）\n- 遵循 OTel GenAI 语义约定（Semantic Conventions）\n- 提供基于 OpenTelemetry 的 tracing 和 metrics 收集\n- 实现生产级 guardrails（内容安全、审计）\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:1-15]()\n\n## 核心架构组件\n\n```mermaid\ngraph TD\n    subgraph \"OpenLIT Python SDK\"\n        A[\"openlit.init()\"]\n        B[\"Instrumentors<br/>BaseInstrumentor\"]\n        C[\"Guard System\"]\n        D[\"OTel Layer\"]\n    end\n    \n    subgraph \"Instrumented Frameworks\"\n        E[\"OpenAI\"]\n        F[\"Anthropic\"]\n        G[\"Claude Agent SDK\"]\n        H[\"LangChain / CrewAI\"]\n        I[\"Google ADK\"]\n        J[\"Agent Framework\"]\n    end\n    \n    subgraph \"OpenTelemetry Backend\"\n        K[\"OTLP Exporter\"]\n        L[\"Traces\"]\n        M[\"Metrics\"]\n    end\n    \n    A --> B\n    A --> C\n    B --> D\n    C --> D\n    D --> K\n    K --> L\n    K --> M\n    \n    B --> E\n    B --> F\n    B --> G\n    B --> H\n    B --> I\n    B --> J\n```\n\n### 组件说明\n\n| 组件 | 位置 | 职责 |\n|------|------|------|\n| **Instrumentors** | `openlit.instrumentation.*` | 各 AI 框架的自动插桩实现 |\n| **Guard System** | `openlit.guard.*` | 内容安全、审计和合规检查 |\n| **OTel Layer** | `openlit.otel.*` | OpenTelemetry traces 和 metrics 的核心实现 |\n| **Config** | `openlit._config` | 全局配置管理和指标字典 |\n| **Semcov** | `openlit.semcov` | GenAI 语义约定常量定义 |\n\n## 初始化流程\n\n### Python SDK 初始化\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n\n初始化时 SDK 执行以下操作：\n\n1. 配置 OpenTelemetry tracer provider\n2. 加载全局配置（环境、应用名称、指标开关）\n3. 注入所有依赖的 instrumentors\n4. 初始化 guard pipeline（如配置）\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:30-42]()\n\n### 配置参数\n\n| 参数 | 类型 | 默认值 | 说明 |\n|------|------|--------|------|\n| `otlp_endpoint` | str | `\"http://127.0.0.1:4318\"` | OTLP gRPC endpoint |\n| `environment` | str | `\"default\"` | 部署环境标识 |\n| `application_name` | str | `\"default\"` | 应用名称 |\n| `pricing_info` | dict | `{}` | 模型定价信息 |\n| `capture_message_content` | bool | `False` | 是否捕获消息内容 |\n| `metrics` | dict | None | 指标配置字典 |\n| `disable_metrics` | bool | None | 禁用指标收集 |\n| `guards` | list | None | Guard 配置列表 |\n\n## 插桩系统架构\n\n### BaseInstrumentor 模式\n\n所有框架插桩器继承自 `BaseInstrumentor`，采用统一模式：\n\n```python\nclass ClaudeAgentSDKInstrumentor(BaseInstrumentor):\n    def instrumentation_dependencies(self) -> Collection[str]:\n        return _instruments  # 如 (\"claude-agent-sdk >= 0.1.0\",)\n    \n    def _instrument(self, **kwargs):\n        # 1. 获取 tracer 和配置\n        tracer = trace.get_tracer(__name__)\n        \n        # 2. 使用 wrapt 包装目标函数\n        wrap_function_wrapper(\n            \"module.path\",\n            \"function_name\",\n            wrap_query\n        )\n```\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:27-45]()\n\n### 插桩覆盖范围\n\n| 框架 | 支持版本 | 追踪操作 |\n|------|----------|----------|\n| Claude Agent SDK | >= 0.1.0 | `invoke_agent`, `execute_tool` |\n| Google ADK | - | `execute_tool` |\n| Agent Framework | - | `agent_init`, `agent_run`, `tool_execute`, `workflow_run` |\n| CrewAI | - | Agent 和 Tool 调用 |\n| LangGraph | - | Graph 节点执行 |\n\n### Span 命名规范\n\n遵循 OTel GenAI 语义约定生成规范化的 span 名称：\n\n| 操作类型 | Span 名称格式 | 示例 |\n|----------|---------------|------|\n| Agent 创建 | `create_agent {name}` | `create_agent my_agent` |\n| Agent 调用 | `invoke_agent {name}` | `invoke_agent my_agent` |\n| Tool 执行 | `execute_tool {name}` | `execute_tool calculator` |\n| Workflow | `invoke_workflow {name}` | `invoke_workflow pipeline` |\n\n资料来源：[sdk/python/src/openlit/instrumentation/agent_framework/utils.py:1-60]()\n\n### 语义约定属性\n\n所有 span 遵循 `gen_ai.*` 语义约定：\n\n| 属性键 | 说明 | 示例值 |\n|--------|------|--------|\n| `gen_ai.operation.name` | 操作类型 | `invoke_agent`, `execute_tool` |\n| `gen_ai.operation.type` | 操作分类 | `agent`, `tool` |\n| `gen_ai.system` | AI 系统 | `openai`, `anthropic`, `google.adk` |\n| `gen_ai.provider.name` | 提供商名称 | `google` |\n| `gen_ai.tool.name` | 工具名称 | `calculator` |\n| `gen_ai.tool.type` | 工具类型 | `function` |\n| `gen_ai.tool.description` | 工具描述 | Truncated 描述文本 |\n| `gen_ai.tool.call.arguments` | 工具调用参数 | JSON 字符串 |\n\n资料来源：[sdk/python/src/openlit/instrumentation/google_adk/utils.py:1-50]()\n\n## Guard 系统\n\nOpenLIT 提供生产级 guardrails 用于 LLM 应用安全：\n\n```python\nimport openlit\n\nopenlit.init(guards=[openlit.PII(action=\"redact\")])\n```\n\n### 可用 Guard 类型\n\n| Guard 类 | 位置 | 功能 |\n|----------|------|------|\n| `PII` | `openlit.guard.pii` | 个人身份信息检测和脱敏 |\n| `PromptInjection` | `openlit.guard.prompt_injection` | 提示注入攻击检测 |\n| `SensitiveTopic` | `openlit.guard.sensitive_topic` | 敏感话题检测 |\n| `TopicRestriction` | `openlit.guard.topic_restriction` | 话题限制 |\n| `Moderation` | `openlit.guard.moderation` | 内容审核 |\n| `Schema` | `openlit.guard.schema` | 输出结构验证 |\n| `Custom` | `openlit.guard.custom` | 自定义 guard 逻辑 |\n\n### Guard 核心类型\n\n```python\nfrom openlit.guard import (\n    Guard,\n    GuardAction,\n    GuardConfigError,\n    GuardDeniedError,\n    GuardPhase,\n    GuardResult,\n    GuardTimeoutError,\n    PipelineResult,\n)\n```\n\n| 类型 | 说明 |\n|------|------|\n| `Guard` | Base guard 类 |\n| `GuardAction` | Guard 执行动作 |\n| `GuardPhase` | 执行阶段（pre/post） |\n| `GuardResult` | Guard 执行结果 |\n| `PipelineResult` | Pipeline 聚合结果 |\n\n资料来源：[sdk/python/src/openlit/guard/__init__.py:1-60]()\n\n### Pipeline 机制\n\nGuard 使用 Pipeline 模式按序执行多个 guard：\n\n```python\nfrom openlit.guard import Pipeline\n\npipeline = Pipeline([\n    PII(action=\"redact\"),\n    PromptInjection(threshold=0.8),\n    Moderation()\n])\n```\n\n## Claude Agent SDK 插桩详解\n\n### 架构设计\n\n```mermaid\nsequenceDiagram\n    participant User as User Code\n    participant SDK as Claude Agent SDK\n    participant Wrap as wrap_query\n    participant Hook as _ToolSpanTracker\n    participant Span as OTel Span\n    \n    User->>SDK: query(...)\n    SDK->>Wrap: invoke wrapper\n    Wrap->>Span: create invoke_agent span\n    Wrap->>SDK: proceed with query\n    SDK->>Hook: PreToolUse event\n    Hook->>Span: create execute_tool span\n    SDK->>Hook: PostToolUse event\n    Hook->>Span: finalize tool span\n    SDK-->>Wrap: response\n    Wrap->>Span: finalize agent span\n    Wrap-->>User: return response\n```\n\n### Tool Span 追踪\n\n使用 `_ToolSpanTracker` 管理 in-flight tool spans：\n\n```python\nclass _ToolSpanTracker:\n    \"\"\"Manages in-flight tool spans created by SDK hooks.\"\"\"\n    \n    def __init__(\n        self,\n        tracer,\n        parent_span,\n        version,\n        environment,\n        application_name,\n        capture_message_content\n    ):\n        # 初始化追踪器\n```\n\n### Fallback 机制\n\n当 SDK hooks 无法注入时，使用消息流回退方案：\n\n```python\n# 检查 hooks 是否已注入\nif hasattr(client, _HOOKS_INJECTED_ATTR):\n    # 使用 hooks 追踪\nelse:\n    # 使用消息流追踪\n```\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/claude_agent_sdk.py:1-80]()\n\n## OpenTelemetry 集成\n\n### Tracing 实现\n\nSDK 使用 OpenTelemetry Python API 创建 spans：\n\n```python\nfrom opentelemetry import trace as trace_api\nfrom opentelemetry.trace import SpanKind, Status, StatusCode\n\ntracer = trace.get_tracer(__name__)\n\nwith tracer.start_as_current_span(\n    name=\"invoke_agent\",\n    kind=SpanKind.CLIENT\n) as span:\n    span.set_attribute(...)\n    # 执行操作\n```\n\n### Metrics 实现\n\n支持以下指标类型：\n\n| 指标类型 | 指标名称 | 说明 |\n|----------|----------|------|\n| Counter | `gen_ai.*.token_usage` | Token 使用计数 |\n| Histogram | `gen_ai.*.duration` | 请求耗时分布 |\n| Gauge | - | 当前活跃请求数 |\n\n### 语义约定常量\n\n所有语义约定常量集中定义在 `openlit.semcov` 模块：\n\n```python\nclass SemanticConvention:\n    GEN_AI_OPERATION = \"gen_ai.operation.name\"\n    GEN_AI_SYSTEM = \"gen_ai.system\"\n    GEN_AI_TOOL_NAME = \"gen_ai.tool.name\"\n    GEN_AI_TOOL_TYPE = \"gen_ai.tool.type\"\n    GEN_AI_SYSTEM_VALUE = \"gen_ai.system.openai\"\n```\n\n## 错误处理\n\n### Exception 传播\n\nSDK 使用统一的异常处理机制：\n\n```python\nfrom openlit.__helpers import handle_exception\n\ndef some_wrapper(func, *args, **kwargs):\n    try:\n        return func(*args, **kwargs)\n    except Exception as e:\n        handle_exception(span, e)\n        raise\n```\n\n### Guard 特定错误\n\n| 错误类型 | 说明 |\n|----------|------|\n| `GuardError` | 基础 guard 错误 |\n| `GuardDeniedError` | Guard 拒绝请求 |\n| `GuardTimeoutError` | Guard 执行超时 |\n| `GuardConfigError` | Guard 配置错误 |\n\n## 使用示例\n\n### 基础集成\n\n```python\nfrom openai import OpenAI\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n\nclient = OpenAI(api_key=\"YOUR_OPENAI_KEY\")\n\nchat_completion = client.chat.completions.create(\n    messages=[{\"role\": \"user\", \"content\": \"Hello!\"}],\n    model=\"gpt-3.5-turbo\"\n)\n```\n\n### 带 Guard 的集成\n\n```python\nimport openlit\nfrom openlit.guard import PII, PromptInjection\n\nopenlit.init(\n    otlp_endpoint=\"http://127.0.0.1:4318\",\n    guards=[\n        PII(action=\"redact\"),\n        PromptInjection(threshold=0.7)\n    ]\n)\n```\n\n### 环境变量配置\n\n```bash\nexport OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"\n```\n\n```python\nimport openlit\n\nopenlit.init()  # 自动读取环境变量\n```\n\n## 扩展开发\n\n### 自定义 Instrumentor\n\n```python\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom wrapt import wrap_function_wrapper\n\nclass CustomSDKInstrumentor(BaseInstrumentor):\n    def instrumentation_dependencies(self):\n        return (\"custom-sdk >= 1.0.0\",)\n    \n    def _instrument(self, **kwargs):\n        tracer = kwargs.get(\"tracer\")\n        wrap_function_wrapper(\n            \"custom_sdk\",\n            \"Client.query\",\n            wrap_custom_query\n        )\n```\n\n### 自定义 Guard\n\n```python\nfrom openlit.guard import Guard, GuardAction, GuardResult\n\nclass CustomGuard(Guard):\n    def _evaluate(self, text: str) -> GuardResult:\n        # 自定义检测逻辑\n        if \"forbidden\" in text.lower():\n            return GuardResult(\n                action=GuardAction.DENY,\n                reason=\"Forbidden content detected\"\n            )\n        return GuardResult(action=GuardAction.ALLOW)\n\n---\n\n<a id='typescript-sdk'></a>\n\n## TypeScript SDK Architecture\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk), [Go SDK Architecture](#go-sdk), [LLM and Framework Integrations](#integrations)\n\n<details>\n<summary>Related Source Files</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/typescript/src/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/index.ts)\n- [sdk/typescript/src/config.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/config.ts)\n- [sdk/typescript/src/instrumentation/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/index.ts)\n- [sdk/typescript/src/guard/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/guard/index.ts)\n- [sdk/typescript/package.json](https://github.com/openlit/openlit/blob/main/sdk/typescript/package.json)\n\n</details>\n\n# TypeScript SDK Architecture\n\n## Overview\n\nThe OpenLIT TypeScript SDK provides an OpenTelemetry-native observability solution for GenAI and LLM applications. It enables developers to instrument their TypeScript/JavaScript applications with automatic tracing and metrics collection, forwarding telemetry data to OpenLIT or any OTLP-compatible backend.\n\n**Key Characteristics:**\n\n| Attribute | Value |\n|-----------|-------|\n| Package Name | `openlit` |\n| Installation | `npm install openlit` |\n| Entry Point | `sdk/typescript/src/index.ts` |\n| Primary Dependency | OpenTelemetry SDK |\n| Transport Protocol | OTLP (OpenTelemetry Protocol) |\n\n资料来源：[sdk/typescript/package.json](https://github.com/openlit/openlit/blob/main/sdk/typescript/package.json)\n\n## Core Architecture\n\nThe SDK follows a modular architecture with clear separation of concerns:\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[openlit.init]\n    B --> C[Config Module]\n    C --> D[Instrumentation Module]\n    D --> E[Guard Module]\n    E --> F[OTLP Exporter]\n    F --> G[OpenLIT Backend / OTEL Collector]\n    \n    C --> C1[OTLP Endpoint]\n    C --> C2[Custom Attributes]\n    C --> C3[Service Name]\n    \n    D --> D1[LLM Instrumentation]\n    D --> D2[Vector DB Instrumentation]\n    D --> D3[Framework Hooks]\n```\n\n### Entry Point Module\n\nThe main entry point (`index.ts`) exposes a simple initialization API:\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n资料来源：[sdk/typescript/src/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/index.ts)\n\n### Configuration Module\n\nThe config module (`config.ts`) handles SDK configuration including:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `otlpEndpoint` | `string` | Environment variable `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP-compatible endpoint URL |\n| `serviceName` | `string` | Application-defined | Name of the instrumented service |\n| `resourceAttributes` | `Record<string, string>` | `{}` | Custom resource attributes |\n\n资料来源：[sdk/typescript/src/config.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/config.ts)\n\n## Instrumentation Subsystem\n\nThe instrumentation module (`instrumentation/index.ts`) provides automatic observability for AI workloads:\n\n### Supported Integrations\n\n| Category | Instrumented Components |\n|----------|-------------------------|\n| LLM Providers | OpenAI, Anthropic, Azure OpenAI, Google AI, AWS Bedrock, Cohere, Ollama |\n| Vector Databases | ChromaDB, Pinecone, Weaviate, Qdrant, Milvus, PGVector |\n| Frameworks | LangChain, LlamaIndex, LangFlow, AutoGen |\n\n资料来源：[sdk/typescript/src/instrumentation/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/index.ts)\n\n### Tracing Capabilities\n\nThe SDK automatically captures:\n\n- **LLM Request/Response traces** with prompt and completion data\n- **Token usage metrics** (prompt tokens, completion tokens, total tokens)\n- **Latency measurements** for API calls\n- **Embeddings generation traces** with vector dimensions\n- **Tool/function calling traces** with parameters and results\n\n## Guard Module\n\nThe guard module (`guard/index.ts`) provides safety and compliance features:\n\n```typescript\nimport { openlit } from 'openlit';\n\n// Initialize with guardrails\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\nGuard capabilities include:\n\n- Input/output validation for LLM interactions\n- Content filtering hooks\n- Rate limiting enforcement\n- Custom rule application\n\n资料来源：[sdk/typescript/src/guard/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/guard/index.ts)\n\n## Initialization Flow\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant SDK as OpenLIT SDK\n    participant Config as Config Module\n    participant Inst as Instrumentation\n    participant OTEL as OTEL SDK\n    \n    App->>SDK: openlit.init(options)\n    SDK->>Config: Validate & merge config\n    Config->>Config: Check env vars\n    Config-->>SDK: Resolved config\n    SDK->>OTEL: Initialize OTEL SDK\n    SDK->>Inst: Register instrumentations\n    Inst->>OTEL: Add span processors\n    OTEL-->>SDK: Ready\n    SDK-->>App: Initialization complete\n```\n\n## Environment Variable Support\n\nThe SDK supports configuration via environment variables as an alternative to programmatic configuration:\n\n| Environment Variable | Description |\n|----------------------|-------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint URL |\n| `OTEL_SERVICE_NAME` | Service name for traces |\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:42](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n\n## Usage Patterns\n\n### Basic Initialization\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n### OpenAI Integration Example\n\n```typescript\nimport OpenAI from 'openai';\nimport openlit from 'openlit';\n\nopenlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" });\n\nconst client = new OpenAI({\n  apiKey: process.env.OPENAI_API_KEY\n});\n\nconst chatCompletion = await client.chat.completions.create({\n  messages: [{ role: 'user', content: 'What is LLM Observability?' }],\n  model: 'gpt-3.5-turbo',\n});\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:28-39](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n\n## Package Dependencies\n\nKey dependencies in `package.json`:\n\n```json\n{\n  \"dependencies\": {\n    \"@opentelemetry/sdk-node\": \"^0.50.0\",\n    \"@opentelemetry/exporter-trace-otlp-http\": \"^0.50.0\",\n    \"@opentelemetry/resources\": \"^1.22.0\",\n    \"@opentelemetry/semantic-conventions\": \"^1.22.0\"\n  }\n}\n```\n\n资料来源：[sdk/typescript/package.json](https://github.com/openlit/openlit/blob/main/sdk/typescript/package.json)\n\n## Design Principles\n\n1. **Zero-Configuration Defaults**: The SDK works out-of-the-box with sensible defaults\n2. **OpenTelemetry Native**: Built on OTEL SDK for vendor-agnostic telemetry export\n3. **Automatic Instrumentation**: No code changes required for supported libraries\n4. **Environment Variable Fallback**: Configuration can be entirely environment-based\n5. **Minimal Footprint**: Instrumentation adds minimal latency overhead\n\n## Summary\n\nThe OpenLIT TypeScript SDK architecture provides a developer-friendly interface for adding observability to GenAI applications. By abstracting OpenTelemetry complexity and providing automatic instrumentation for popular LLM providers and vector databases, it enables comprehensive telemetry collection with minimal configuration. The SDK exports all data via OTLP, ensuring compatibility with OpenLIT's backend as well as any other OTEL-compatible observability platform.\n\n---\n\n<a id='go-sdk'></a>\n\n## Go SDK Architecture\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk), [TypeScript SDK Architecture](#typescript-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n- [sdk/go/go.mod](https://github.com/openlit/openlit/blob/main/sdk/go/go.mod)\n</details>\n\n# Go SDK Architecture\n\n## Overview\n\nThe OpenLIT Go SDK is a lightweight instrumentation library that enables observability for GenAI applications built with Go. It provides automatic tracing and metrics collection for LLM calls, supporting OpenAI and Anthropic providers out of the box. The SDK follows OpenTelemetry-native principles, allowing seamless integration with the OpenLIT observability platform.\n\n## Core Components\n\nThe Go SDK is organized into several key packages:\n\n| Component | Purpose |\n|-----------|---------|\n| `openlit` | Core initialization, configuration, and shutdown |\n| `openlit.Config` | Central configuration struct for SDK settings |\n| `openlit.EvaluateRule()` | Standalone rule engine evaluation function |\n| `instrumentation/openai` | OpenAI client instrumentation |\n| `instrumentation/anthropic` | Anthropic client instrumentation |\n\n## Initialization Flow\n\nThe SDK must be initialized before instrumenting any LLM clients. The initialization process configures the OTLP endpoint and establishes the connection to the OpenLIT backend.\n\n```go\nerr := openlit.Init(openlit.Config{\n    OtlpEndpoint:    \"http://127.0.0.1:4318\",\n    Environment:     \"production\",\n    ApplicationName: \"my-go-app\",\n})\nif err != nil {\n    log.Fatalf(\"Failed to initialize OpenLIT: %v\", err)\n}\ndefer openlit.Shutdown(context.Background())\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Configuration Options\n\nThe `openlit.Config` struct provides the following configuration parameters:\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `OtlpEndpoint` | `string` | OTLP collector endpoint (default: `http://127.0.0.1:4318`) |\n| `Environment` | `string` | Deployment environment name |\n| `ApplicationName` | `string` | Application identifier for grouping traces |\n| `PricingInfo` | `map[string]ModelPricing` | Custom pricing configuration per model |\n| `OtlpHeaders` | `map[string]string` | Custom headers for OTLP exports |\n\n### Custom Pricing Configuration\n\nThe SDK supports custom pricing information for models that require non-default cost calculations:\n\n```go\nconfig := openlit.Config{\n    PricingInfo: map[string]openlit.ModelPricing{\n        \"gpt-4-custom\": {\n            InputCostPerToken:  0.00003,\n            OutputCostPerToken: 0.00006,\n        },\n    },\n}\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n### Custom Headers for OTLP Exports\n\nAuthentication and custom headers can be added to OTLP exports:\n\n```go\nconfig := openlit.Config{\n    OtlpHeaders: map[string]string{\n        \"Authorization\": \"Bearer token\",\n        \"X-Custom-Header\": \"value\",\n    },\n}\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Instrumentation Architecture\n\nThe SDK uses a decorator/wrapper pattern for instrumenting LLM clients. This approach allows automatic tracing without modifying the original client interface.\n\n```mermaid\ngraph TD\n    A[User Application] --> B[Instrumented Client]\n    B --> C[Original SDK Client]\n    B --> D[OpenLIT Tracer]\n    D --> E[OTLP Exporter]\n    E --> F[OpenLIT Backend]\n    C --> G[LLM Provider API]\n    G --> C\n```\n\n### OpenAI Instrumentation\n\nThe OpenAI instrumentation wraps the `sashabaranov/go-openai` client:\n\n```go\nimport (\n    \"github.com/openlit/openlit/sdk/go/instrumentation/openai\"\n    openai_sdk \"github.com/sashabaranov/go-openai\"\n)\n\n// Create and instrument OpenAI client\nclient := openai_sdk.NewClient(\"your-api-key\")\ninstrumentedClient := openai.Instrument(client)\n\n// Use as normal - automatically traced!\nresp, err := instrumentedClient.CreateChatCompletion(ctx, openai_sdk.ChatCompletionRequest{\n    Model: openai_sdk.GPT4,\n    Messages: []openai_sdk.ChatCompletionMessage{\n        {\n            Role:    openai_sdk.ChatMessageRoleUser,\n            Content: \"Hello!\",\n        },\n    },\n})\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n### Anthropic Instrumentation\n\nThe Anthropic instrumentation follows the same pattern:\n\n```go\nimport (\n    \"github.com/openlit/openlit/sdk/go/instrumentation/anthropic\"\n)\n\n// Create and instrument Anthropic client\nclient := anthropic.NewClient(\"your-api-key\")\ninstrumentedClient := anthropic.Instrument(client)\n```\n\n## Rule Engine Integration\n\nThe SDK provides a standalone rule evaluation function that does not require initialization:\n\n```go\n// EvaluateRule does NOT require openlit.Init()\nrules, err := openlit.EvaluateRule(ctx, &openlit.EvaluateRuleRequest{\n    TraceAttributes: attributes,\n})\n```\n\nThis function evaluates trace attributes against the OpenLIT Rule Engine to retrieve matching rules and associated entities including contexts, prompts, and evaluation configurations.\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Integration with OpenLIT Dashboard\n\nThe complete observability workflow involves:\n\n1. **Start OpenLIT Stack**: Deploy using Docker Compose\n   ```bash\n   docker compose up -d\n   ```\n\n2. **Configure SDK**: Initialize the Go SDK with the OTLP endpoint\n   ```go\n   openlit.Init(openlit.Config{\n       OtlpEndpoint: \"http://localhost:4318\",\n   })\n   ```\n\n3. **View Traces**: Access the dashboard at `http://localhost:3000`\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Example Projects\n\nThe SDK includes complete working examples in the `examples/` directory:\n\n| Example | Path |\n|---------|------|\n| OpenAI Chat Completion | `examples/openai/chat/` |\n| OpenAI Streaming | `examples/openai/streaming/` |\n| Anthropic Messages | `examples/anthropic/messages/` |\n| Anthropic Streaming | `examples/anthropic/streaming/` |\n\n## Module Dependencies\n\nThe Go SDK depends on core OpenTelemetry packages for trace export and propagation:\n\n- OpenTelemetry OTLP exporter\n- OpenTelemetry trace propagation\n- Context propagation utilities\n\n资料来源：[sdk/go/go.mod](https://github.com/openlit/openlit/blob/main/sdk/go/go.mod)\n\n---\n\n<a id='integrations'></a>\n\n## LLM and Framework Integrations\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk), [TypeScript SDK Architecture](#typescript-sdk)\n\n<details>\n<summary>Relevant Source Files</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n- [sdk/python/src/openlit/instrumentation/llamaindex/utils.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/llamaindex/utils.py)\n- [sdk/python/src/openlit/_config.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/llamaindex/llamaindex.py)\n- [sdk/python/src/openlit/__helpers.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/__helpers.py)\n- [sdk/python/src/openlit/guard/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/guard/__init__.py)\n</details>\n\n# LLM and Framework Integrations\n\nOpenLIT provides comprehensive instrumentation for a wide range of LLMs and AI frameworks, enabling automatic OpenTelemetry-native observability for GenAI applications. This page documents the architecture, supported integrations, and implementation patterns.\n\n## Overview\n\nOpenLIT's instrumentation layer wraps SDK calls from various LLM providers and AI frameworks to automatically capture traces and metrics without requiring manual instrumentation code.\n\n### Supported Integrations\n\n| Category | Integration | Python SDK | TypeScript SDK | Go SDK |\n|----------|-------------|------------|----------------|--------|\n| **LLM Providers** | OpenAI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Anthropic | ✅ | ✅ | ✅ |\n| **LLM Providers** | Azure OpenAI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Vertex AI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Mistral AI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Cohere | ✅ | ✅ | ✅ |\n| **LLM Providers** | HuggingFace | ✅ | ✅ | ✅ |\n| **AI Frameworks** | LangChain | ✅ | ✅ | - |\n| **AI Frameworks** | LlamaIndex | ✅ | - | - |\n| **AI Frameworks** | CrewAI | ✅ | - | - |\n| **AI Frameworks** | LangGraph | ✅ | - | - |\n| **AI Frameworks** | Claude Agent SDK | ✅ | - | - |\n| **Vector Stores** | Pinecone | ✅ | - | - |\n| **Vector Stores** | Chroma | ✅ | - | - |\n| **Vector Stores** | Qdrant | ✅ | - | - |\n| **Vector Stores** | Weaviate | ✅ | - | - |\n\n资料来源：[sdk/python/README.md](https://github.com/openlit/openlit/blob/main/sdk/python/README.md)\n\n## Architecture\n\n### Instrumentation Pattern\n\nAll instrumentations follow a consistent pattern based on OpenTelemetry's `BaseInstrumentor` class:\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[Instrumented SDK]\n    B --> C[Wrapper Function]\n    C --> D[OpenTelemetry Tracer]\n    C --> E[Metrics Recorder]\n    D --> F[OTLP Exporter]\n    E --> F\n    F --> G[OpenLIT Backend]\n```\n\n### Core Components\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| `BaseInstrumentor` | Base class for all instrumentors | `opentelemetry.instrumentation.instrumentor` |\n| `wrap_function_wrapper` | Wraps SDK functions dynamically | `wrapt` library |\n| `OpenlitConfig` | Singleton configuration management | `sdk/python/src/openlit/_config.py` |\n| Semantic Conventions | Standardized attribute naming | `openlit.semcov` module |\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:17-21](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n\n## Python SDK Instrumentation\n\n### Instrumentor Base Class\n\nAll Python SDK instrumentors extend `BaseInstrumentor` and implement two required methods:\n\n```python\nclass ClaudeAgentSDKInstrumentor(BaseInstrumentor):\n    \"\"\"OTel GenAI semantic convention compliant instrumentor for Claude Agent SDK.\"\"\"\n\n    def instrumentation_dependencies(self) -> Collection[str]:\n        return _instruments  # e.g., (\"claude-agent-sdk >= 0.1.0\",)\n\n    def _instrument(self, **kwargs):\n        # Initialize tracer, config, and wrap functions\n```\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:26-35](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n\n### Initialization Parameters\n\nWhen calling `openlit.init()`, the following parameters are passed to all instrumentors:\n\n| Parameter | Type | Description | Default |\n|-----------|------|-------------|---------|\n| `environment` | `str` | Deployment environment name | `\"default\"` |\n| `application_name` | `str` | Application identifier | `\"default\"` |\n| `pricing_info` | `Dict[str, ModelPricing]` | Custom model pricing | `{}` |\n| `capture_message_content` | `bool` | Enable/disable content tracing | `True` |\n| `disable_metrics` | `bool` | Disable metrics collection | `None` |\n| `otlp_endpoint` | `str` | OTLP exporter endpoint | Configured endpoint |\n\n资料来源：[sdk/python/src/openlit/_config.py:20-35](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/_config.py)\n\n### OpenlitConfig Singleton\n\nThe `OpenlitConfig` class manages centralized configuration:\n\n```python\nclass OpenlitConfig:\n    \"\"\"Singleton configuration class for OpenLIT.\"\"\"\n    \n    _instance = None\n    \n    # Class-level attributes\n    environment = \"default\"\n    application_name = \"default\"\n    pricing_info = {}\n    metrics_dict = {}\n    otlp_endpoint = None\n    otlp_headers = None\n    disable_batch = False\n    capture_message_content = True\n```\n\n资料来源：[sdk/python/src/openlit/_config.py:18-42](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/_config.py)\n\n## LlamaIndex Integration\n\n### Operation Type Mapping\n\nThe LlamaIndex instrumentation uses a semantic convention-based operation mapping system:\n\n```mermaid\ngraph LR\n    A[Document Operations] --> B[RETRIEVE]\n    A --> C[FRAMEWORK]\n    D[Index Operations] --> C\n    E[Query Operations] --> B\n    F[Retriever Operations] --> B\n```\n\n### Supported Operations\n\n| Operation | Semantic Convention | Category |\n|-----------|---------------------|----------|\n| `document_load` | `RETRIEVE` | Document Loading |\n| `document_transform` | `FRAMEWORK` | Document Processing |\n| `document_split` | `FRAMEWORK` | Document Processing |\n| `index_construct` | `FRAMEWORK` | Index Management |\n| `index_insert` | `FRAMEWORK` | Index Management |\n| `query_engine_query` | `RETRIEVE` | Query Engine |\n| `retriever_retrieve` | `RETRIEVE` | Retrieval |\n\n资料来源：[sdk/python/src/openlit/instrumentation/llamaindex/utils.py:1-30](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/llamaindex/utils.py)\n\n## Helper Functions\n\n### Building Tool Definitions\n\nThe `__helpers.py` module provides utilities for extracting tool definitions from chat requests:\n\n```python\ndef build_tool_definitions(tools):\n    \"\"\"\n    Extract tool/function definitions from a chat request's ``tools`` parameter.\n    \n    Supports both OpenAI-style schema and flat schema formats.\n    \"\"\"\n```\n\nSupported formats:\n\n| Format | Structure |\n|--------|-----------|\n| OpenAI-style | `{\"type\": \"function\", \"function\": {...}}` |\n| Flat (dict) | `{\"name\": ..., \"description\": ..., \"parameters\": ...}` |\n| Flat (object) | Object with `name`, `description`, `input_schema` attributes |\n\n资料来源：[sdk/python/src/openlit/__helpers.py:1-40](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/__helpers.py)\n\n### System Instructions Builder\n\nExtracts and formats system instructions from various input formats:\n\n```python\ndef build_system_instructions(instructions, **kwargs):\n    \"\"\"Builds system instructions from various input formats.\"\"\"\n```\n\n## Guardrails Integration\n\nOpenLIT includes a production-grade guardrails system:\n\n### Available Guards\n\n| Guard Class | Purpose |\n|-------------|---------|\n| `PII` | Detect and redact Personally Identifiable Information |\n| `PromptInjection` | Detect prompt injection attacks |\n| `SensitiveTopic` | Filter sensitive topics |\n| `TopicRestriction` | Restrict to allowed topics |\n| `Moderation` | Content moderation |\n| `Schema` | Output schema validation |\n| `Custom` | Custom guard implementation |\n\n资料来源：[sdk/python/src/openlit/guard/__init__.py:1-30](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/guard/__init__.py)\n\n### Guard Architecture\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Pipeline]\n    B --> C[Guard 1: PII]\n    C --> D[Guard 2: PromptInjection]\n    D --> E[Guard N: Custom]\n    E --> F[GuardResult]\n    C -.->|Denied| G[GuardDeniedError]\n    D -.->|Timeout| H[GuardTimeoutError]\n```\n\n### Usage Example\n\n```python\nimport openlit\n\n# Initialize with guards\nopenlit.init(guards=[openlit.PII(action=\"redact\")])\n\n# Or with direct imports\nfrom openlit import PII, PromptInjection, Moderation\n\nguards = [PII(), PromptInjection(), Moderation()]\nopenlit.init(guards=guards)\n```\n\n## TypeScript SDK Instrumentation\n\n### Wrapper Pattern\n\nThe TypeScript SDK uses a similar wrapping pattern:\n\n```typescript\n// Wrapped in wrapper.ts for each integration\nexport function wrapOpenAI() {\n  // Wrap OpenAI SDK methods\n}\n```\n\n资料来源：[sdk/typescript/src/instrumentation/openai/wrapper.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/openai/wrapper.ts)\n\n### Initialization\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n## Configuration Reference\n\n### Environment Variables\n\n| Variable | Description | Example |\n|----------|-------------|---------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint URL | `http://127.0.0.1:4318` |\n| `OTEL_EXPORTER_OTLP_HEADERS` | Authentication headers | `Authorization=Bearer token` |\n\n### SDK Configuration Options\n\n```python\nimport openlit\n\nopenlit.init(\n    otlp_endpoint=\"http://127.0.0.1:4318\",\n    otlp_headers={\"Authorization\": \"Bearer token\"},\n    environment=\"production\",\n    application_name=\"my-llm-app\",\n    pricing_info={\n        \"gpt-4\": {\"input_cost_per_token\": 0.00003, \"output_cost_per_token\": 0.00006}\n    },\n    capture_message_content=True\n)\n```\n\n## Best Practices\n\n### 1. Instrument Before Usage\n\nAlways initialize OpenLIT before importing instrumented SDKs:\n\n```python\n# Correct order\nimport openlit\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n\nfrom openai import OpenAI  # Now automatically instrumented\n```\n\n### 2. Custom Pricing\n\nDefine custom pricing for accurate cost tracking:\n\n```python\nopenlit.init(\n    pricing_info={\n        \"custom-model\": {\n            \"input_cost_per_token\": 0.00001,\n            \"output_cost_per_token\": 0.00002\n        }\n    }\n)\n```\n\n### 3. Selective Content Capture\n\nDisable content capture for sensitive data:\n\n```python\nopenlit.init(\n    capture_message_content=False  # Won't trace message content\n)\n```\n\n## See Also\n\n- [OpenLIT Python SDK Documentation](https://github.com/openlit/openlit/tree/main/sdk/python)\n- [OpenLIT TypeScript SDK Documentation](https://github.com/openlit/openlit/tree/main/sdk/typescript)\n- [OpenLIT Go SDK Documentation](https://github.com/openlit/openlit/tree/main/sdk/go)\n- [OpenTelemetry Semantic Conventions](https://opentelemetry.io/docs/specs/otel/trace/semantic_conventions/)\n\n---\n\n<a id='controller'></a>\n\n## OpenLIT Controller\n\n### 相关页面\n\n相关主题：[GPU Collector](#gpu-collector)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [openlit-controller/cmd/controller/main.go](https://github.com/openlit/openlit/blob/main/openlit-controller/cmd/controller/main.go)\n- [openlit-controller/internal/engine/engine.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/engine/engine.go)\n- [openlit-controller/internal/engine/lifecycle.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/engine/lifecycle.go)\n- [openlit-controller/internal/engine/python_sdk_runtime.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/engine/python_sdk_runtime.go)\n- [openlit-controller/internal/server/handlers.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/server/handlers.go)\n- [openlit-controller/internal/scanner/scanner.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/scanner/scanner.go)\n</details>\n\n# OpenLIT Controller\n\nThe OpenLIT Controller is a standalone, lightweight binary agent designed to automatically instrument Python-based LLM applications with OpenLIT's observability SDK. It operates as a background service that runs alongside your application, providing seamless OpenTelemetry-native tracing and metrics collection without requiring code modifications.\n\n## Overview\n\nThe Controller serves as an autonomous agent that:\n\n- **Discovers** Python applications running in various environments (bare metal, containers, Kubernetes)\n- **Injects** the OpenLIT Python SDK into target applications at runtime\n- **Manages** the lifecycle of instrumentation (enable, disable, status monitoring)\n- **Reports** service metadata back to the OpenLIT platform\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:1-60]()\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[OpenLIT Platform] -->|Manage & Monitor| B[OpenLIT Controller]\n    B -->|Discover Services| C[Scanner Module]\n    B -->|Instrument Apps| D[Engine Module]\n    D -->|Python SDK Injection| E[Python Runtime]\n    E -->|Traces & Metrics| F[OpenTelemetry Collector]\n    \n    G[Kubernetes Pod] -->|Contains| H[Python Application]\n    H -->|Auto-instrumented by| D\n    \n    I[Linux Host] -->|Systemd Service| B\n```\n\n### Core Components\n\n| Component | Location | Responsibility |\n|-----------|----------|----------------|\n| **cmd/controller** | `cmd/controller/main.go` | Entry point, configuration, signal handling |\n| **Server** | `internal/server/handlers.go` | HTTP API for platform communication |\n| **Engine** | `internal/engine/engine.go` | Orchestrates instrumentation operations |\n| **Lifecycle** | `internal/engine/lifecycle.go` | Manages enable/disable transitions |\n| **Python SDK Runtime** | `internal/engine/python_sdk_runtime.go` | Runtime injection of Python SDK |\n| **Scanner** | `internal/scanner/scanner.go` | Discovers Python applications |\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:1-25]()\n\n## Supported Environments\n\nThe Controller supports multiple deployment scenarios:\n\n| Environment | Installation Method | Status |\n|-------------|---------------------|--------|\n| **Linux (systemd)** | Direct binary download + systemd service | ✅ Primary |\n| **Docker** | Privileged container with PID host mode | ✅ Supported |\n| **Kubernetes** | DaemonSet or sidecar pattern | ✅ Supported |\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:1-50]()\n\n## Installation\n\n### Linux (systemd)\n\nDownload the latest binary and configure as a systemd service:\n\n```bash\ncurl -fsSL https://github.com/openlit/openlit/releases/latest/download/openlit-controller-linux-amd64 \\\n  -o /usr/local/bin/openlit-controller\nchmod +x /usr/local/bin/openlit-controller\n\n# Create systemd service\ncat > /etc/systemd/system/openlit-controller.service << 'EOF'\n[Unit]\nDescription=OpenLIT Controller\nAfter=network.target\n\n[Service]\nEnvironment=\"OPENLIT_URL=${openlitUrl}\"\nEnvironment=\"OTEL_EXPORTER_OTLP_ENDPOINT=${openlitUrl.replace(/:\\d+$/, \":4318\")}\"\nEnvironment=\"OPENLIT_API_KEY=${apiKey}\"\nExecStart=/usr/local/bin/openlit-controller\nRestart=always\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsystemctl daemon-reload\nsystemctl enable --now openlit-controller\n```\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:10-35]()\n\n### Docker\n\n```bash\ndocker run -d --privileged --pid=host \\\n  -e OPENLIT_URL=http://openlit:3000 \\\n  -e OTEL_EXPORTER_OTLP_ENDPOINT=http://openlit:4318 \\\n  openlit-controller\n```\n\n## Configuration\n\nThe Controller is configured via environment variables:\n\n| Environment Variable | Description | Required |\n|---------------------|-------------|----------|\n| `OPENLIT_URL` | URL of the OpenLIT platform | Yes |\n| `OPENLIT_API_KEY` | API key for authentication | No |\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint for telemetry | Yes |\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:15-25]()\n\n## Agent Operations\n\nThe Controller exposes three primary operations:\n\n### Enable Instrumentation\n\nActivates OpenLIT SDK injection for target Python applications.\n\n```json\n{\n  \"operation\": \"enable\",\n  \"serviceId\": \"string\"\n}\n```\n\n### Disable Instrumentation\n\nDeactivates SDK injection and removes runtime hooks.\n\n```json\n{\n  \"operation\": \"disable\",\n  \"serviceId\": \"string\"\n}\n```\n\n### Status Check\n\nRetrieves current instrumentation state for a service.\n\n```json\n{\n  \"operation\": \"status\",\n  \"serviceId\": \"string\"\n}\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:25-45]()\n\n## Service State Model\n\n```mermaid\nstateDiagram-v2\n    [*] --> disabled: Initial State\n    disabled --> enabled: enable operation\n    enabled --> disabled: disable operation\n    enabled --> manual: explicit override\n    manual --> enabled: resume auto\n    disabled --> manual: partial config\n    manual --> disabled: full removal\n```\n\n### State Definitions\n\n| State | Description |\n|-------|-------------|\n| `enabled` | SDK actively injecting traces |\n| `disabled` | No instrumentation active |\n| `manual` | User-controlled state (not auto-managed) |\n| `automatable` | Service eligible for auto-instrumentation |\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:15-30]()\n\n## Python SDK Runtime Integration\n\nThe Controller's Python SDK Runtime module handles the actual SDK injection:\n\n1. **Process Discovery**: Identifies Python processes running user applications\n2. **Runtime Injection**: Injects OpenLIT SDK using Python's import hooks\n3. **Configuration Propagation**: Sets OTLP endpoint and API keys via environment\n4. **Health Monitoring**: Ensures instrumentation remains active\n\nThe runtime is specifically optimized for **Python-only** services:\n\n```typescript\nsupported: service.language_runtime === \"python\"\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:20]()\n\n## Kubernetes Integration\n\nWhen running in Kubernetes, the Controller respects workload metadata:\n\n| Attribute | Description |\n|-----------|-------------|\n| `k8s.workload.kind` | Workload type (Deployment, StatefulSet, etc.) |\n| `service.service_name` | Name of the service |\n| `service.namespace` | Kubernetes namespace |\n\n### Naked Pod Handling\n\nThe Controller automatically detects and handles \"naked pods\" (pods without a workload controller):\n\n```typescript\nconst isNakedPod = mode === \"kubernetes\" && (!workloadKind || workloadKind === \"Pod\");\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:8-12]()\n\n## Validation\n\nOperations are validated before execution:\n\n```typescript\nvalidatePayload(operation: string, _payload: Record<string, unknown>) {\n    if (\n        operation !== \"enable\" &&\n        operation !== \"disable\" &&\n        operation !== \"status\"\n    ) {\n        return `Unknown operation \"${operation}\" for feature \"${FEATURE}\". \n                Expected \"enable\", \"disable\", or \"status\".`;\n    }\n    return null;\n}\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:28-40]()\n\n## Summary\n\nThe OpenLIT Controller is a critical component for zero-code instrumentation of Python LLM applications. It provides:\n\n- **Automated Discovery**: Scans and identifies Python services automatically\n- **Runtime Injection**: Injects observability SDK without application restarts\n- **Multi-Platform Support**: Works on Linux, Docker, and Kubernetes\n- **Platform Integration**: Connects to OpenLIT platform for centralized management\n- **Lifecycle Management**: Full control over enable/disable operations\n\n---\n\n<a id='gpu-collector'></a>\n\n## GPU Collector\n\n### 相关页面\n\n相关主题：[OpenLIT Controller](#controller), [System Architecture](#architecture)\n\n<details>\n<summary>Relevant Source Files</summary>\n\n以下源码文件用于生成本页说明：\n\n- [opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n- [opentelemetry-gpu-collector/cmd/collector/main.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/cmd/collector/main.go)\n- [opentelemetry-gpu-collector/internal/gpu/nvidia/nvidia.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/gpu/nvidia/nvidia.go)\n- [opentelemetry-gpu-collector/internal/gpu/amd/amd.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/gpu/amd/amd.go)\n- [opentelemetry-gpu-collector/internal/gpu/intel/intel.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/gpu/intel/intel.go)\n- [opentelemetry-gpu-collector/internal/ebpf/tracer.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/ebpf/tracer.go)\n- [opentelemetry-gpu-collector/internal/export/metrics.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/export/metrics.go)\n</details>\n\n# GPU Collector\n\nThe **OpenTelemetry GPU Collector** (also referred to as `opentelemetry-gpu-collector`) is a specialized telemetry agent built and maintained by OpenLIT. It provides real-time GPU hardware telemetry collection for NVIDIA, AMD, and Intel GPUs, emitting metrics in compliance with the OpenTelemetry semantic conventions under the `hw.gpu.*` namespace.\n\n## Overview\n\nThe GPU Collector serves as a standalone service that monitors GPU hardware metrics and exports them via the OTLP protocol to any OpenTelemetry-compatible backend, including the OpenLIT observability platform.\n\n**Key Responsibilities:**\n\n- Collect GPU hardware telemetry from NVIDIA GPUs via NVML (NVIDIA Management Library)\n- Collect GPU hardware telemetry from AMD and Intel GPUs via `sysfs/hwmon` interfaces\n- Perform eBPF-based CUDA kernel tracing for detailed operation insights\n- Emit metrics following OpenTelemetry semantic conventions (`hw.gpu.*`)\n- Export metrics over OTLP for integration with observability platforms\n\n**License:** Apache-2.0\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Architecture\n\nThe GPU Collector follows a modular architecture with distinct internal components for GPU detection, metric collection, and telemetry export.\n\n```mermaid\ngraph TD\n    subgraph GPU Collector\n        A[main.go] --> B[GPU Detection Layer]\n        B --> C[NVIDIA Provider]\n        B --> D[AMD Provider]\n        B --> E[Intel Provider]\n        C --> F[NVML Interface]\n        D --> G[sysfs/hwmon]\n        E --> G\n        C --> H[Metrics Processor]\n        D --> H\n        E --> H\n        F --> H\n        G --> H\n        H --> I[eBPF Tracer]\n        H --> J[OTLP Exporter]\n        I --> J\n    end\n    \n    K[OpenTelemetry Backend] --> J\n    L[OpenLIT Dashboard] --> K\n```\n\n### Core Components\n\n| Component | Path | Purpose |\n|-----------|------|---------|\n| Entry Point | `cmd/collector/main.go` | Application initialization and configuration |\n| NVIDIA Provider | `internal/gpu/nvidia/nvidia.go` | NVML-based telemetry collection for NVIDIA GPUs |\n| AMD Provider | `internal/gpu/amd/amd.go` | sysfs/hwmon-based telemetry for AMD GPUs |\n| Intel Provider | `internal/gpu/intel/intel.go` | sysfs/hwmon-based telemetry for Intel GPUs |\n| eBPF Tracer | `internal/ebpf/tracer.go` | CUDA kernel tracing via eBPF |\n| Metrics Exporter | `internal/export/metrics.go` | OTLP metric export logic |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Supported Hardware and Vendors\n\nThe GPU Collector supports GPU telemetry collection from three major hardware vendors.\n\n### Vendor Support Matrix\n\n| Vendor | Collection Method | Status | Features |\n|--------|------------------|--------|----------|\n| **NVIDIA** | NVML (NVIDIA Management Library) | Done | Power, energy, clock, utilization, errors |\n| **AMD** | sysfs/hwmon | Done | Power, energy, clock, utilization |\n| **Intel** | sysfs/hwmon | Done | Power, clock, utilization* |\n\n*Intel support depends on driver (i915/Xe) and kernel version.\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n### Hardware Telemetry Features\n\n| Feature | Status |\n|---------|--------|\n| NVIDIA GPU hardware telemetry (NVML) | Done |\n| AMD GPU hardware telemetry (sysfs/hwmon) | Done |\n| Intel GPU hardware telemetry (sysfs/hwmon) | Done |\n| eBPF CUDA kernel tracing | Done |\n| OTel semantic convention compliance (`hw.gpu.*`) | Done |\n| Prometheus `/metrics` endpoint | Planned |\n| ROCm HIP tracing (AMD eBPF) | Planned |\n| Per-process GPU utilization (DRM fdinfo) | Planned |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Metrics Reference\n\nAll GPU metrics follow the OpenTelemetry semantic conventions with the `hw.gpu.*` prefix.\n\n### Metric Definitions\n\n| Metric Name | Type | Unit | Description | NVIDIA | AMD | Intel |\n|-------------|------|------|-------------|--------|-----|-------|\n| `hw.gpu.power.draw` | Gauge | W | Current power draw | Yes | Yes | Yes |\n| `hw.gpu.power.limit` | Gauge | W | Power limit/cap | Yes | Yes | Yes |\n| `hw.gpu.energy.consumed` | Counter | J | Cumulative energy consumed | Yes | Yes | Yes |\n| `hw.gpu.clock.graphics` | Gauge | MHz | Graphics/SM clock frequency | Yes | Yes | —* |\n| `hw.gpu.clock.memory` | Gauge | MHz | Memory clock frequency | Yes | Yes | — |\n| `hw.errors` | Counter | {error} | ECC and PCIe errors via `error.type` + `hw.type=gpu` | Yes | — | — |\n\n*Intel support depends on driver (i915/Xe) and kernel version.\n\n### Utilization Metrics\n\n| Metric | Extra Attribute | Values |\n|--------|-----------------|--------|\n| `hw.gpu.utilization` | `hw.gpu.task` | `general`, `encoder`, `decoder` |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Attributes\n\nAll GPU metrics include the following attributes for device identification and categorization.\n\n### Common Attributes\n\n| Attribute | Description | Example |\n|-----------|-------------|---------|\n| `hw.id` | Unique device identifier (required by spec) | `GPU-a1b2c3d4-...` |\n| `hw.name` | Product name | `NVIDIA A100-SXM4-80GB` |\n| `hw.vendor` | Vendor name | `nvidia`, `amd`, `intel` |\n| `gpu.index` | Device index | `0`, `1` |\n| `gpu.pci_address` | PCI bus address | `0000:01:00.0` |\n\n### Error Attributes\n\n| Attribute | Description |\n|-----------|-------------|\n| `error.type` | Type of hardware error |\n| `hw.type` | Set to `gpu` for GPU-specific errors |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Deployment Options\n\nThe GPU Collector can be deployed using multiple methods based on infrastructure requirements.\n\n### Docker\n\n```bash\ndocker run -d \\\n    --name otel-gpu-collector \\\n    --restart always \\\n    --gpus all \\\n    -e OTEL_SERVICE_NAME=my-gpu-app \\\n    -e OTEL_RESOURCE_ATTRIBUTES=deployment.environment=production \\\n    -e OTEL_EXPORTER_OTLP_ENDPOINT=\"http://otel-collector:4317\" \\\n    ghcr.io/openlit/otel-gpu-collector:latest\n```\n\n### Docker Compose\n\n```yaml\nservices:\n  otel-gpu-collector:\n    image: ghcr.io/openlit/otel-gpu-collector:latest\n    environment:\n      OTEL_SERVICE_NAME: my-app\n      OTEL_RESOURCE_ATTRIBUTES: \"deployment.environment=production\"\n      OTEL_EXPORTER_OTLP_ENDPOINT: \"http://otel-collector:4317\"\n    deploy:\n      resources:\n        reservations:\n          devices:\n            - driver: nvidia\n              count: all\n              capabilities: [gpu]\n    depends_on:\n      - otel-collector\n    restart: always\n```\n\n### Pre-built Binary\n\n```sh\n# Linux amd64\ncurl -L https://github.com/openlit/openlit/releases/latest/download/opentelemetry-gpu-collector-<version>-linux-amd64 \\\n    -o opentelemetry-gpu-collector\nchmod +x opentelemetry-gpu-collector\n\nOTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 ./opentelemetry-gpu-collector\n```\n\n### Build from Source\n\n```sh\ngit clone https://github.com/openlit/openlit.git\ncd openlit/opentelemetry-gpu-collector\nmake build\n./opentelemetry-gpu-collector\n```\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Configuration\n\nThe GPU Collector uses standard OpenTelemetry environment variables for configuration.\n\n### Configuration Variables\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | *(required)* | OTLP exporter endpoint |\n| `OTEL_SERVICE_NAME` | — | Service name for telemetry |\n| `OTEL_RESOURCE_ATTRIBUTES` | — | Additional resource attributes |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Data Flow\n\n```mermaid\ngraph LR\n    A[GPU Hardware] -->|NVML/sysfs| B[GPU Provider]\n    B -->|Raw Metrics| C[Metrics Processor]\n    D[eBPF Kernel Tracer] -->|Kernel Events| C\n    C -->|Structured Metrics| E[OTLP Exporter]\n    E -->|OTLP Protocol| F[OpenTelemetry Backend]\n    F --> G[OpenLIT Dashboard]\n```\n\n### Collection Pipeline\n\n1. **GPU Detection**: The collector detects available GPUs on the host system\n2. **Vendor-specific Collection**: Each GPU type uses its native interface:\n   - NVIDIA: NVML API calls\n   - AMD/Intel: Reading from `/sys/class/hwmon/`\n3. **Metric Processing**: Raw values are transformed into OpenTelemetry metric format\n4. **eBPF Enrichment**: CUDA kernel tracing data enriches the telemetry\n5. **OTLP Export**: Metrics are exported to the configured endpoint\n\n---\n\n## Integration with OpenLIT\n\nThe GPU Collector integrates seamlessly with the OpenLIT observability platform for GPU monitoring.\n\n```mermaid\ngraph TD\n    subgraph Collection Layer\n        A[GPU Collector] -->|OTLP|gRPC[OTLP gRPC]\n        A -->|OTLP|HTTP[OTLP HTTP]\n    end\n    \n    subgraph OpenLIT Stack\n        B[OpenLIT Backend] --> C[PostgreSQL]\n        B --> D[ClickHouse]\n        B --> E[Redis]\n    end\n    \n    gRPC --> B\n    HTTP --> B\n    B --> F[OpenLIT Dashboard:3000]\n```\n\n### Prerequisites\n\n1. Deploy the OpenLIT stack using Docker Compose:\n   ```bash\n   docker compose up -d\n   ```\n\n2. Configure the GPU Collector endpoint:\n   ```bash\n   OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 ./opentelemetry-gpu-collector\n   ```\n\n3. Access the OpenLIT Dashboard at `http://localhost:3000`\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n\n---\n\n## Project Structure\n\n```\nopentelemetry-gpu-collector/\n├── cmd/\n│   └── collector/\n│       └── main.go              # Application entry point\n├── internal/\n│   ├── gpu/\n│   │   ├── nvidia/\n│   │   │   └── nvidia.go        # NVIDIA GPU provider (NVML)\n│   │   ├── amd/\n│   │   │   └── amd.go           # AMD GPU provider (sysfs)\n│   │   └── intel/\n│   │       └── intel.go        # Intel GPU provider (sysfs)\n│   ├── ebpf/\n│   │   └── tracer.go           # eBPF CUDA kernel tracer\n│   └── export/\n│       └── metrics.go           # OTLP metrics exporter\n├── Dockerfile\n├── Makefile\n└── README.md\n```\n\n---\n\n## See Also\n\n- [OpenLIT Documentation](https://docs.openlit.io)\n- [OpenLIT GitHub Repository](https://github.com/openlit/openlit)\n- [OpenTelemetry Semantic Conventions - Hardware Metrics](https://opentelemetry.io/docs/specs/semconv/hardware-metrics/)\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：openlit/openlit\n\n摘要：发现 15 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：安装坑 - 来源证据：Integration: Governance and compliance signals for LLM observability。\n\n## 1. 安装坑 · 来源证据：Integration: Governance and compliance signals for LLM observability\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Integration: Governance and compliance signals for LLM observability\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_16e8a1979e4646f18ae6d36da1fd46fe | https://github.com/openlit/openlit/issues/1106 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 2. 安装坑 · 来源证据：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_9788255c9fb34a7eae64ba6413a52030 | https://github.com/openlit/openlit/issues/1186 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 安装坑 · 来源证据：[Bug]: Docker Image doesn't run on windows 64bit\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：[Bug]: Docker Image doesn't run on windows 64bit\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e25a08120daf4deb81b9193aeab1f929 | https://github.com/openlit/openlit/issues/786 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n\n## 4. 安装坑 · 来源证据：openlit-1.19.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：openlit-1.19.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0504e467960f4bbe919ff101c6a14d7b | https://github.com/openlit/openlit/releases/tag/openlit-1.19.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 5. 配置坑 · 来源证据：controller-0.2.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：controller-0.2.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_addec19eec37420da207487d5a685eaa | https://github.com/openlit/openlit/releases/tag/controller-0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 6. 配置坑 · 来源证据：openlit-1.20.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：openlit-1.20.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_217968c917e9426f9f8fbb4b50bebdb5 | https://github.com/openlit/openlit/releases/tag/openlit-1.20.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 7. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:747319327 | https://github.com/openlit/openlit | README/documentation is current enough for a first validation pass.\n\n## 8. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | last_activity_observed missing\n\n## 9. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n\n## 10. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n\n## 11. 安全/权限坑 · 来源证据：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_bfba0945570d4cbbaead1257e8f70dfe | https://github.com/openlit/openlit/issues/1135 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 12. 安全/权限坑 · 来源证据：openlit-1.19.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：openlit-1.19.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_b5088506959947828f2d740f9297d5b5 | https://github.com/openlit/openlit/releases/tag/openlit-1.19.1 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 13. 安全/权限坑 · 来源证据：py-1.41.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：py-1.41.2\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_ff3f4dfa2dc04616be73482b2145ac5c | https://github.com/openlit/openlit/releases/tag/py-1.41.2 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n\n## 14. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | issue_or_pr_quality=unknown\n\n## 15. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | release_recency=unknown\n\n<!-- canonical_name: openlit/openlit; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "openlit",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:747319327",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/openlit/openlit"
        },
        {
          "evidence_id": "art_5748a4a0423c40c19b67f0f72e239443",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/openlit/openlit#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "openlit 说明书",
      "toc": [
        "https://github.com/openlit/openlit 项目说明书",
        "目录",
        "OpenLIT Overview",
        "What is OpenLIT?",
        "Key Features",
        "Architecture Overview",
        "SDK Support",
        "Configuration Options",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "7ca59852f63177cdfd8f5b40924b6126c7b37fcc",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "README.md",
      "docker-compose.yml",
      "docs/docs.json",
      "docs/snippets/helm-repo-setup.mdx",
      "docs/snippets/llm-as-a-judge.mdx",
      "docs/snippets/quickstart-gpu.mdx",
      "docs/snippets/integration-methods-python.mdx",
      "docs/snippets/quickstart-programmatic-evals.mdx",
      "docs/snippets/quickstart-mcp.mdx",
      "docs/snippets/quickstart-vectordb.mdx",
      "docs/snippets/quickstart-guard.mdx",
      "docs/snippets/integration-methods.mdx",
      "docs/snippets/quickstart-observability.mdx",
      "docs/snippets/openlit-platform-install.mdx",
      "docs/latest/overview.mdx",
      "docs/snippets/destinations/victoriametrics-stack/conclusion.mdx",
      "docs/snippets/destinations/victoriametrics-stack/intro.mdx",
      "docs/snippets/destinations/victoriametrics-stack/sdk.mdx",
      "docs/snippets/destinations/signoz/conclusion.mdx",
      "docs/snippets/destinations/signoz/intro.mdx",
      "docs/snippets/destinations/signoz/sdk.mdx",
      "docs/snippets/destinations/langfuse/conclusion.mdx",
      "docs/snippets/destinations/langfuse/intro.mdx",
      "docs/snippets/destinations/langfuse/sdk.mdx",
      "docs/snippets/destinations/highlight/conclusion.mdx",
      "docs/snippets/destinations/highlight/intro.mdx",
      "docs/snippets/destinations/highlight/sdk.mdx",
      "docs/snippets/destinations/dynatrace/conclusion.mdx",
      "docs/snippets/destinations/dynatrace/intro.mdx",
      "docs/snippets/destinations/dynatrace/sdk.mdx",
      "docs/snippets/destinations/middleware/conclusion.mdx",
      "docs/snippets/destinations/middleware/intro.mdx",
      "docs/snippets/destinations/middleware/sdk.mdx",
      "docs/snippets/destinations/hyperdx/conclusion.mdx",
      "docs/snippets/destinations/hyperdx/intro.mdx",
      "docs/snippets/destinations/hyperdx/sdk.mdx",
      "docs/snippets/destinations/elastic/conclusion.mdx",
      "docs/snippets/destinations/elastic/intro.mdx",
      "docs/snippets/destinations/elastic/sdk.mdx",
      "docs/snippets/destinations/datadog/conclusion.mdx"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# openlit - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 openlit 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **正在使用 Claude/Codex/Cursor/Gemini 等宿主 AI 的开发者**：README 或插件配置提到多个宿主 AI。 证据：`README.md` Claim：`clm_0002` supported 0.86\n\n## 它能做什么\n\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n## 怎么开始\n\n- `git clone git@github.com:openlit/openlit.git` 证据：`README.md` Claim：`clm_0003` supported 0.86\n- `pip install openlit` 证据：`README.md` Claim：`clm_0004` supported 0.86\n\n## 继续前判断卡\n\n- **当前建议**：先做权限沙盒试用\n- **为什么**：项目存在安装命令、宿主配置或本地写入线索，不建议直接进入主力环境，应先在隔离环境试装。\n\n### 30 秒判断\n\n- **现在怎么做**：先做权限沙盒试用\n- **最小安全下一步**：先跑 Prompt Preview；若仍要安装，只在隔离环境试装\n- **先别相信**：工具权限边界不能在安装前相信。\n- **继续会触碰**：命令执行、本地环境或项目文件、宿主 AI 上下文\n\n### 现在可以相信\n\n- **适合人群线索：正在使用 Claude/Codex/Cursor/Gemini 等宿主 AI 的开发者**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0002` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n- **存在 Quick Start / 安装命令线索**（supported）：可以相信项目文档出现过启动或安装入口；不要因此直接在主力环境运行。 证据：`README.md` Claim：`clm_0003` supported 0.86\n\n### 现在还不能相信\n\n- **工具权限边界不能在安装前相信。**（unverified）：MCP/tool 类项目通常会触碰文件、网络、浏览器或外部 API，必须真实检查权限和日志。\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n- **安装命令是否需要网络、权限或全局写入？**（unverified）：这影响企业环境和个人环境的安装风险。 证据：`README.md`\n\n### 继续会触碰什么\n\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`README.md`\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`README.md`\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：用安装前交互式试用判断工作方式是否匹配，不需要授权或改环境。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0005` inferred 0.45\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`README.md` Claim：`clm_0006` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`README.md` Claim：`clm_0001` supported 0.86\n\n### 上下文规模\n\n- 文件总数：1727\n- 重要文件覆盖：40/1727\n- 证据索引条目：62\n- 角色 / Skill 条目：17\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 openlit 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 openlit 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 openlit 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 17 个角色 / Skill / 项目文档条目。\n\n- **Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub**（project_doc）：Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`README.md`\n- **Features**（project_doc）：Documentation https://docs.openlit.io/latest/features/gpu Quickstart -getting-started Metrics -metrics Configuration -configuration 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`opentelemetry-gpu-collector/README.md`\n- **Testing and Developing Locally**（project_doc）：This guide covers the steps needed to set up the development environment for OpenLIT using Docker Compose. The setup is orchestrated by dev-docker-compose.yml and supports two modes: 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`src/README.md`\n- **OpenLIT Kubernetes Example**（project_doc）：A complete local Kubernetes setup with all OpenLIT components and sample LLM apps on a 3-node k3d cluster. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`examples/kubernetes/README.md`\n- **OBI Patches**（project_doc）：This directory contains patch files applied on top of the upstream OBI OpenTelemetry eBPF Instrumentation v0.8.0 during Docker build. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`openlit-controller/patches/README.md`\n- **OpenLIT Go SDK**（project_doc）：OpenTelemetry-native observability SDK for LLM applications in Go. Monitor your AI applications with automatic instrumentation for popular LLM providers. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`sdk/go/README.md`\n- **⚡ Features**（project_doc）：OpenTelemetry-native AI Observability, Evaluation and Guardrails Framework 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`sdk/python/README.md`\n- **⚡ Features**（project_doc）：OpenTelemetry-native AI Observability, Monitoring and Evaluation Framework 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`sdk/typescript/README.md`\n- **Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub**（project_doc）：Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`src/client/README.md`\n- **Standalone OpAMP Server**（project_doc）：This is a simplified version of the OpenTelemetry OpAMP server that can be used independently. It provides the core functionality of the OpAMP protocol without the UI server component. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`src/opamp-server/README.md`\n- **Contributing to OpenLIT**（project_doc）：We welcome contributions to the OpenLIT project and are grateful for every contribution from bug reports to new features. If you are looking to contribute to the codebase, improve documentation, report issues, or suggest new features, this document is a set of guidelines to help you get started. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`CONTRIBUTING.md`\n- **Change description:**（project_doc）：!IMPORTANT 1. We strictly follow a issue-first approach, please first open an issue https://github.com/openlit/openlit/issues relating to this Pull Request. 2. PR name follows conventional commit format: feat: ... or fix: .... 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`.github/PULL_REQUEST_TEMPLATE.md`\n- **Contributor Covenant Code of Conduct**（project_doc）：Contributor Covenant Code of Conduct 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`CODE_OF_CONDUCT.md`\n- **OpenLIT OpAMP Server Deployment Guide**（project_doc）：OpenLIT OpAMP Server Deployment Guide 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`OPAMP_DEPLOYMENT.md`\n- **Security Policy**（project_doc）：We are committed to maintaining the security of OpenLIT and provide updates to address vulnerabilities. The following table shows which versions of our software currently receive security updates: 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`SECURITY.md`\n- **Changelog**（project_doc）：All notable changes to the OpenLIT Go SDK will be documented in this file. 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`sdk/go/CHANGELOG.md`\n- **OpenLIT OpAMP Server - TLS Certificate Management**（project_doc）：OpenLIT OpAMP Server - TLS Certificate Management 激活提示：当用户需要理解项目结构、安装方式或边界时参考。 证据：`src/opamp-server/CERTIFICATES.md`\n\n## 证据索引\n\n- 共索引 62 条证据。\n\n- **Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub**（documentation）：Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub 证据：`README.md`\n- **Features**（documentation）：Documentation https://docs.openlit.io/latest/features/gpu Quickstart -getting-started Metrics -metrics Configuration -configuration 证据：`opentelemetry-gpu-collector/README.md`\n- **Testing and Developing Locally**（documentation）：This guide covers the steps needed to set up the development environment for OpenLIT using Docker Compose. The setup is orchestrated by dev-docker-compose.yml and supports two modes: 证据：`src/README.md`\n- **OpenLIT Kubernetes Example**（documentation）：A complete local Kubernetes setup with all OpenLIT components and sample LLM apps on a 3-node k3d cluster. 证据：`examples/kubernetes/README.md`\n- **OBI Patches**（documentation）：This directory contains patch files applied on top of the upstream OBI OpenTelemetry eBPF Instrumentation v0.8.0 during Docker build. 证据：`openlit-controller/patches/README.md`\n- **OpenLIT Go SDK**（documentation）：OpenTelemetry-native observability SDK for LLM applications in Go. Monitor your AI applications with automatic instrumentation for popular LLM providers. 证据：`sdk/go/README.md`\n- **⚡ Features**（documentation）：OpenTelemetry-native AI Observability, Evaluation and Guardrails Framework 证据：`sdk/python/README.md`\n- **⚡ Features**（documentation）：OpenTelemetry-native AI Observability, Monitoring and Evaluation Framework 证据：`sdk/typescript/README.md`\n- **Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub**（documentation）：Observability, Evaluations, Rule Engine, Guardrails, Prompts, Vault, Playground, FleetHub 证据：`src/client/README.md`\n- **Standalone OpAMP Server**（documentation）：This is a simplified version of the OpenTelemetry OpAMP server that can be used independently. It provides the core functionality of the OpAMP protocol without the UI server component. 证据：`src/opamp-server/README.md`\n- **Contributing to OpenLIT**（documentation）：We welcome contributions to the OpenLIT project and are grateful for every contribution from bug reports to new features. If you are looking to contribute to the codebase, improve documentation, report issues, or suggest new features, this document is a set of guidelines to help you get started. 证据：`CONTRIBUTING.md`\n- **Package**（package_manifest）：{ \"name\": \"openlit\", \"version\": \"1.13.0\", \"homepage\": \"https://github.com/openlit/openlit readme\", \"bugs\": { \"url\": \"https://github.com/openlit/openlit/issues\", \"email\": \"developer@openlit.io\" }, \"repository\": { \"type\": \"git\", \"url\": \"https://github.com/openlit/openlit\" }, \"scripts\": { \"build\": \"tsc --build\", \"lint\": \"eslint src/\", \"test\": \"jest\" }, \"files\": \"dist\", \"README.md\", \"LICENSE\", \"package.json\" , \"description\": \"OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects\", \"main\": \"dist/index.js\", \"types\": \"dist/index.d.ts\", \"keywords\": \"OpenTelemetry\", \"otel\", \"otlp\", \"llm\", \"trac… 证据：`sdk/typescript/package.json`\n- **Package**（package_manifest）：{ \"name\": \"openlit\", \"version\": \"1.20.0\", \"private\": true, \"scripts\": { \"dev\": \"next dev\", \"build\": \"next build\", \"start\": \"next start\", \"lint\": \"next lint\", \"seed\": \"npx prisma db seed\", \"test\": \"jest\", \"test:watch\": \"jest --watch\", \"test:coverage\": \"jest --coverage\" }, \"dependencies\": { \"@ai-sdk/anthropic\": \"^3.0.6\", \"@ai-sdk/cohere\": \"^3.0.3\", \"@ai-sdk/google\": \"^3.0.3\", \"@ai-sdk/mistral\": \"^3.0.4\", \"@ai-sdk/openai\": \"^3.0.4\", \"@anthropic-ai/sdk\": \"^0.21.1\", \"@clickhouse/client\": \"^1.1.0\", \"@dhmk/zustand-lens\": \"^4.0.0\", \"@kubernetes/client-node\": \"^1.4.0\", \"@mistralai/mistralai\": \"^0.4.0\", \"@monaco-editor/react\": \"^4.7.0\", \"@next-auth/prisma-adapter\": \"^1.0.7\", \"@prisma/client\": \"^5.15.… 证据：`src/client/package.json`\n- **License**（source_file）：Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ 证据：`LICENSE`\n- **License**（source_file）：Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ 证据：`sdk/python/LICENSE`\n- **License**（source_file）：Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ 证据：`sdk/typescript/LICENSE`\n- **Change description:**（documentation）：!IMPORTANT 1. We strictly follow a issue-first approach, please first open an issue https://github.com/openlit/openlit/issues relating to this Pull Request. 2. PR name follows conventional commit format: feat: ... or fix: .... 证据：`.github/PULL_REQUEST_TEMPLATE.md`\n- **Contributor Covenant Code of Conduct**（documentation）：Contributor Covenant Code of Conduct 证据：`CODE_OF_CONDUCT.md`\n- **OpenLIT OpAMP Server Deployment Guide**（documentation）：OpenLIT OpAMP Server Deployment Guide 证据：`OPAMP_DEPLOYMENT.md`\n- **Security Policy**（documentation）：We are committed to maintaining the security of OpenLIT and provide updates to address vulnerabilities. The following table shows which versions of our software currently receive security updates: 证据：`SECURITY.md`\n- **Changelog**（documentation）：All notable changes to the OpenLIT Go SDK will be documented in this file. 证据：`sdk/go/CHANGELOG.md`\n- **OpenLIT OpAMP Server - TLS Certificate Management**（documentation）：OpenLIT OpAMP Server - TLS Certificate Management 证据：`src/opamp-server/CERTIFICATES.md`\n- **Pricing**（structured_config）：{ \"embeddings\": { \"text-embedding-ada-002\": 0.0001, \"text-embedding-3-small\": 0.00002, \"text-embedding-3-large\": 0.00013, \"ada\": 0.0001, \"ada-v2\": 0.00010, \"text-ada-001\": 0.0001, \"azure text-embedding-ada-002\": 0.0001, \"azure text-embedding-3-small\": 0.00002, \"azure text-embedding-3-large\": 0.00013, \"azure ada\": 0.0001, \"azure ada-v2\": 0.00010, \"azure text-ada-001\": 0.0001, \"embed-english-v3.0\": 0.0001, \"embed-multilingual-v3.0\": 0.0001, \"embed-english-light-v3.0\": 0.0001, \"embed-multilingual-light-v3.0\": 0.0001, \"embed-english-v2.0\": 0.0001, \"embed-english-light-v2.0\": 0.0001, \"embed-multilingual-v2.0\": 0.0001, \"mistral-embed\": 0.0001, \"amazon.titan-embed-text-v1\": 0.0001, \"amazon.titan-e… 证据：`assets/pricing.json`\n- **Signoz Gpu Dashboard**（structured_config）：{ \"description\": \"This dashboard displays the GPU performance metrics like Utilization, Temperature, Power Consumption, Memory and more using the OpenTelemetry Metrics generated using OpenLIT SDK https://github.com/openlit/openlit or the OTel GPU Collector https://github.com/openlit/openlit/tree/main/otel-gpu-collector .\\n\", \"image\": \"data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTgiIGhlaWdodD0iMTgiIHZpZXdCb3g9IjAgMCAxOCAxOCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggZD0iTTE0Ljk0OTQgMTMuOTQyQzE2LjIzMTggMTIuNDI1OCAxNy4zMjY4IDkuNzAyMiAxNi4xOTU2IDYuNTc0ODdDMTUuNjQ0MyA1LjA1MjQ1IDE1LjAyMTkgNC4yMDI0OSAxNC4yOTY5IDMuNjYyNTJDMTMuODU1NyAzLjMzMzc5IDEyLjA5MzMgMi41MDYzMyA5Ljc1… 证据：`assets/signoz-gpu-dashboard.json`\n- **Signoz Openlit**（structured_config）：{ \"description\": \"This dashboard displays the usage of LLMs and VectorDBs, tracking OpenTelemetry Traces and Metrics sent using OpenLIT https://github.com/openlit/openlit .\", \"layout\": { \"h\": 2, \"i\": \"25009fee-69ae-46e0-9f7b-5570f56c66c0\", \"moved\": false, \"static\": false, \"w\": 4, \"x\": 0, \"y\": 0 }, { \"h\": 2, \"i\": \"7ba9f26e-5c17-4dcf-89c5-c81a6ce67330\", \"moved\": false, \"static\": false, \"w\": 4, \"x\": 4, \"y\": 0 }, { \"h\": 2, \"i\": \"1794131c-c3bd-44b9-abf8-bac0d6909900\", \"moved\": false, \"static\": false, \"w\": 4, \"x\": 8, \"y\": 0 }, { \"h\": 2, \"i\": \"be5be8ce-c6fb-4b99-b62b-58030ea09f67\", \"moved\": false, \"static\": false, \"w\": 8, \"x\": 0, \"y\": 2 }, { \"h\": 4, \"i\": \"38e3d7fa-db21-4900-9219-8e29ef624db5\", \"mo… 证据：`assets/signoz-openlit.json`\n- **Docs**（structured_config）：{ \"$schema\": \"https://mintlify.com/docs.json\", \"theme\": \"aspen\", \"name\": \"OpenLIT\", \"description\": \"OpenLIT is the leading open-source AI observability platform with zero-code instrumentation for LLMs, vector databases, and AI frameworks. Monitor OpenAI, Anthropic, LangChain, LlamaIndex with OpenTelemetry. Features include cost tracking, performance metrics, prompt management, and enterprise-grade security for production AI applications.\", \"colors\": { \"primary\": \" FFA500\", \"light\": \" FFA500\", \"dark\": \" FFA500\" }, \"favicon\": \"favicon.png\", \"feedback\": { \"thumbsRating\": true, \"suggestEdit\": true }, \"navigation\": { \"global\": { \"anchors\": { \"anchor\": \"GitHub\", \"href\": \"https://github.com/openli… 证据：`docs/docs.json`\n- **.Prettierrc**（structured_config）：{ \"printWidth\": 100, \"tabWidth\": 2, \"singleQuote\": true, \"jsxBracketSameLine\": true, \"trailingComma\": \"es5\" } 证据：`sdk/typescript/.prettierrc.json`\n- **Tsconfig**（structured_config）：{ \"compilerOptions\": { \"target\": \"ES2020\", \"resolveJsonModule\": true, \"module\": \"CommonJS\", \"outDir\": \"./dist\", \"strict\": true, \"esModuleInterop\": true, \"declaration\": true, \"skipLibCheck\": true, \"sourceMap\": true, \"forceConsistentCasingInFileNames\": true, \"baseUrl\": \"./src\", \"rootDir\": \"./src\", }, \"include\": \"src/ / \" , \"exclude\": \"node modules\", \" / .spec.ts\" } 证据：`sdk/typescript/tsconfig.json`\n- **.Eslintrc**（structured_config）：{ \"extends\": \"next/core-web-vitals\", \"rules\": { \"react-hooks/exhaustive-deps\": \"off\" } } 证据：`src/client/.eslintrc.json`\n- **Components**（structured_config）：{ \"$schema\": \"https://ui.shadcn.com/schema.json\", \"style\": \"default\", \"rsc\": true, \"tsx\": true, \"tailwind\": { \"config\": \"tailwind.config.ts\", \"css\": \"src/app/globals.css\", \"baseColor\": \"stone\", \"cssVariables\": false, \"prefix\": \"\" }, \"aliases\": { \"components\": \"@/components\", \"utils\": \"@/lib/utils\" } } 证据：`src/client/components.json`\n- **Openlit Dashboard Gpu Dashboard Layout**（structured_config）：{ \"id\": \"70072a30-a88c-4eb5-8058-a5dca6536b8d\", \"title\": \"GPU dashboard\", \"description\": \"The dashboard helps detect GPU performance bottlenecks, thermal or power inefficiencies, and optimize resource utilization across your infrastructure.\", \"parentId\": null, \"isMainDashboard\": false, \"isPinned\": true, \"createdAt\": \"2025-06-27 05:46:43\", \"updatedAt\": \"2025-06-27 05:46:43\", \"widgets\": { \"953b08e7-5158-4806-b52e-8b58880b0cfd\": { \"id\": \"953b08e7-5158-4806-b52e-8b58880b0cfd\", \"title\": \"Power Watt \", \"description\": \"Visualization to identify the Power over time\", \"type\": \"AREA CHART\", \"properties\": { \"xAxis\": \"request time\", \"yAxes\": { \"key\": \"power draw\", \"color\": \" F36C06\" }, { \"key\": \"power… 证据：`src/client/src/clickhouse/seed-data/openlit-dashboard-GPU-dashboard-layout.json`\n- **Openlit Dashboard Llm Dashboard Layout**（structured_config）：{ \"id\": \"f36c60be-5133-4727-b6f8-8999f0692943\", \"title\": \"LLM dashboard\", \"description\": \"The LLM dashboard helps monitor usage patterns, detect anomalies, and optimize performance and cost across large language model workloads.\", \"parentId\": null, \"isMainDashboard\": true, \"isPinned\": false, \"createdAt\": \"2025-06-16 07:39:17\", \"updatedAt\": \"2025-06-16 07:39:17\", \"widgets\": { \"3a680898-76da-4932-9499-ac8f13b0324d\": { \"id\": \"3a680898-76da-4932-9499-ac8f13b0324d\", \"title\": \"Total hallucination detected\", \"description\": \"This defines the total hallucination detected all in all for requests for the time period selected\", \"type\": \"STAT CARD\", \"properties\": { \"value\": \"0.total evaluation detected\"… 证据：`src/client/src/clickhouse/seed-data/openlit-dashboard-LLM-dashboard-layout.json`\n- **Openlit Dashboard Vector Db Layout**（structured_config）：{ \"id\": \"4b153228-45fb-46d9-88c9-5a2aa3674251\", \"title\": \"Vector DB\", \"description\": \"The vector DB dashboard helps track query performance, memory usage, and index efficiency to optimize similarity search and ensure reliable vector database operations.\", \"parentId\": null, \"isMainDashboard\": false, \"isPinned\": true, \"createdAt\": \"2025-06-25 08:41:39\", \"updatedAt\": \"2025-06-25 08:41:39\", \"widgets\": { \"af3786ef-5dd8-4b6c-929b-99005b27a784\": { \"id\": \"af3786ef-5dd8-4b6c-929b-99005b27a784\", \"title\": \"Generation by environment\", \"description\": \"This defines total number of request grouped by environment name\", \"type\": \"PIE CHART\", \"properties\": { \"labelPath\": \"environment\", \"valuePath\": \"count\",… 证据：`src/client/src/clickhouse/seed-data/openlit-dashboard-Vector-DB-layout.json`\n- **Tsconfig**（structured_config）：{ \"compilerOptions\": { \"target\": \"es5\", \"lib\": \"dom\", \"dom.iterable\", \"esnext\" , \"allowJs\": true, \"skipLibCheck\": true, \"strict\": true, \"noEmit\": true, \"esModuleInterop\": true, \"module\": \"esnext\", \"moduleResolution\": \"bundler\", \"resolveJsonModule\": true, \"isolatedModules\": true, \"jsx\": \"preserve\", \"incremental\": true, \"plugins\": { \"name\": \"next\" } , \"paths\": { \"@/ \": \"./src/ \" , \"@/lib/ \": \"./src/lib/ \" , \"@/components/ \": \"./src/components/ \" , \"@/components/ui/ \": \"./src/components/ui/ \" , \"@/prisma/ \": \"./prisma/ \" , \"@/utils/ \": \"./src/utils/ \" , \"@/constants/ \": \"./src/constants/ \" , \"@/store/ \": \"./src/store/ \" , \"@/helpers/ \": \"./src/helpers/ \" , \"@/selectors/ \": \"./src/selectors/ \"… 证据：`src/client/tsconfig.json`\n- **These are supported funding model platforms**（source_file）：These are supported funding model platforms 证据：`.github/FUNDING.yml`\n- **Dependabot**（source_file）：version: 2 updates: - package-ecosystem: \"github-actions\" directory: \"/\" schedule: interval: \"weekly\" - package-ecosystem: \"npm\" directory: \"/src/client/\" schedule: interval: \"weekly\" - package-ecosystem: \"pip\" directory: \"/sdk/python\" schedule: interval: \"weekly\" - package-ecosystem: \"pip\" directory: \"/sdk/python/tests/\" schedule: interval: \"weekly\" 证据：`.github/dependabot.yml`\n- **Labeler**（source_file）：python-sdk: - changed-files: - any-glob-to-any-file: 'sdk/python/ ' 证据：`.github/labeler.yml`\n- **.gitignore**（source_file）：node modules test.py .DS Store yala.py yala.yml speech.mp3 venv/ path/ pycache / bun.lockb logs/ .env .vscode/ .idea/ .cursor-rules/ .cursorrules .pem certs/index.txt .csr sdk/typescript-test/ 证据：`.gitignore`\n- **.gitmodules**（source_file）：submodule \"openlit-controller/.obi-src\" path = openlit-controller/.obi-src url = https://github.com/open-telemetry/opentelemetry-ebpf-instrumentation.git branch = main 证据：`.gitmodules`\n- **.Vale**（source_file）：formats mdx = md 证据：`.vale.ini`\n- **Default owners for everything in the repo, unless a later match takes precedence.**（source_file）：Default owners for everything in the repo, unless a later match takes precedence. @openlit/litters 证据：`CODEOWNERS`\n- **.Openlit.Env**（source_file）：export INIT DB HOST=\"127.0.0.1\" export INIT DB PORT=\"9000\" export INIT DB DATABASE=\" \" export INIT DB USERNAME=\" \" export INIT DB PASSWORD=\" \" export SQLITE DATABASE URL=\"file:/app/client/data/data.db\" 证据：`assets/.openlit.env.example`\n- **Clickhouse Config**（source_file）：warning true 6 120000 604800 warning 604800 120000 1000 60000 604800 warning 60000 120000 604800 60000 120000 604800 warning 120000 604800 120000 604800 证据：`assets/clickhouse-config.xml`\n- **!/bin/bash**（source_file）：echo \"==================== ClickHouse Initialization ====================\" 证据：`assets/clickhouse-init.sh`\n- **80% of maximum memory up to 2G**（source_file）：receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 http: endpoint: 0.0.0.0:4318 证据：`assets/otel-collector-config.yaml`\n- **Supervisor Dynamic**（source_file）：server: endpoint: wss://localhost:4320/v1/opamp 证据：`assets/supervisor-dynamic.yaml`\n- **The endpoint of the OpAMP server**（source_file）：server: The endpoint of the OpAMP server endpoint: wss://your-openlit-server:4320/v1/opamp tls: Production configuration with proper certificate verification insecure skip verify: false 证据：`assets/supervisor-production.yaml`\n- **The endpoint of the OpAMP server**（source_file）：server: The endpoint of the OpAMP server endpoint: wss://0.0.0.0:4320/v1/opamp tls: For development: set to true to skip certificate verification For production: set to false and provide ca file insecure skip verify: true 证据：`assets/supervisor.yaml`\n- **OAuth Environment Variables**（source_file）：services: clickhouse: image: clickhouse/clickhouse-server:24.4.1 container name: clickhouse environment: CLICKHOUSE PASSWORD: ${OPENLIT DB PASSWORD:-OPENLIT} CLICKHOUSE USER: ${OPENLIT DB USER:-default} CLICKHOUSE DATABASE: ${OPENLIT DB NAME:-openlit} CLICKHOUSE ALWAYS RUN INITDB SCRIPTS: true volumes: - clickhouse-data:/var/lib/clickhouse - ./assets/clickhouse-config.xml:/etc/clickhouse-server/config.d/custom-config.xml:ro - ./assets/clickhouse-init.sh:/docker-entrypoint-initdb.d/init.sh:ro ports: - \"9000:9000\" - \"8123:8123\" healthcheck: test: \"CMD-SHELL\", \"clickhouse-client --user=$${CLICKHOUSE USER} --password=$${CLICKHOUSE PASSWORD} --query='SELECT 1' exit 1\" interval: 5s timeout: 3s re… 证据：`docker-compose.yml`\n- **OpenLIT Environment Configuration**（source_file）：OpenLIT Environment Configuration Copy this file to .env and customize the values for your deployment 证据：`env.example`\n- **libbpf headers copied by make setup-bpf-headers vmlinux.h is committed**（source_file）：openlit-controller .obi-src/ .tar.gz dist/ 证据：`openlit-controller/.gitignore`\n- **--- Build OBI from source with multi-provider GenAI support ---**（source_file）：--- Build OBI from source with multi-provider GenAI support --- FROM ghcr.io/open-telemetry/obi-generator:0.2.11 AS obi-builder 证据：`openlit-controller/Dockerfile`\n- **Copy libbpf headers needed for BPF compilation.**（source_file）：VERSION ?= $ shell git describe --tags --always --dirty 2 /dev/null echo dev LDFLAGS := -ldflags \"-X github.com/openlit/openlit/openlit-controller/internal/server.Version=$ VERSION \" SCANNER DIR := ./internal/scanner 证据：`openlit-controller/Makefile`\n- **Go**（source_file）：module github.com/openlit/openlit/openlit-controller 证据：`openlit-controller/go.mod`\n- **Binary**（source_file）：Generated eBPF Go bindings output of go generate / bpf2go internal/ebpf/gpuevent bpfel .go internal/ebpf/gpuevent bpfel .o 证据：`opentelemetry-gpu-collector/.gitignore`\n- **Stage 1: Build the eBPF programs and Go binary**（source_file）：Stage 1: Build the eBPF programs and Go binary FROM golang:1.25-bookworm AS builder 证据：`opentelemetry-gpu-collector/Dockerfile`\n- **Makefile**（source_file）：.PHONY: all build generate setup-bpf test lint clean docker 证据：`opentelemetry-gpu-collector/Makefile`\n- **Go**（source_file）：module github.com/openlit/openlit/opentelemetry-gpu-collector 证据：`opentelemetry-gpu-collector/go.mod`\n- **Git**（source_file）：Node modules will be installed in container client/node modules client/.next client/.env .local 证据：`src/.dockerignore`\n- **Node.js builder - pinned to alpine 3.21 with digest**（source_file）：Node.js builder - pinned to alpine 3.21 with digest FROM alpine:3.21@sha256:c3f8e73fdb79deaebaa2037150150191b9dcbfba68b4a46d70103204c53f4709 AS builder 证据：`src/Dockerfile`\n- 其余 2 条证据见 `AI_CONTEXT_PACK.json` 或 `EVIDENCE_INDEX.json`。\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`README.md`, `opentelemetry-gpu-collector/README.md`, `src/README.md`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`README.md`, `opentelemetry-gpu-collector/README.md`, `src/README.md`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **OpenLIT Overview**：importance `high`\n  - source_paths: README.md, src/README.md\n- **Quick Start Guide**：importance `high`\n  - source_paths: docker-compose.yml, examples/linux/docker-compose.yaml, examples/kubernetes/setup.sh\n- **System Architecture**：importance `high`\n  - source_paths: src/client/src/lib/platform/clickhouse/clickhouse-client.ts, docker-compose.yml, src/dev-docker-compose.yml\n- **Data Flow and Management**：importance `high`\n  - source_paths: sdk/python/src/openlit/otel/tracing.py, sdk/python/src/openlit/otel/metrics.py, src/client/src/lib/platform/clickhouse/helpers.ts, src/client/src/lib/platform/request/index.ts\n- **Python SDK Architecture**：importance `high`\n  - source_paths: sdk/python/src/openlit/__init__.py, sdk/python/src/openlit/_instrumentors.py, sdk/python/src/openlit/_config.py, sdk/python/src/openlit/otel/tracing.py, sdk/python/src/openlit/otel/metrics.py\n- **TypeScript SDK Architecture**：importance `medium`\n  - source_paths: sdk/typescript/src/index.ts, sdk/typescript/src/config.ts, sdk/typescript/src/instrumentation/index.ts, sdk/typescript/src/guard/index.ts, sdk/typescript/package.json\n- **Go SDK Architecture**：importance `medium`\n  - source_paths: sdk/go/openlit.go, sdk/go/config.go, sdk/go/instrumentation/openai/instrumentor.go, sdk/go/instrumentation/anthropic/instrumentor.go, sdk/go/rule_engine.go\n- **LLM and Framework Integrations**：importance `high`\n  - source_paths: sdk/python/src/openlit/instrumentation/openai/openai.py, sdk/python/src/openlit/instrumentation/langchain/langchain.py, sdk/python/src/openlit/instrumentation/llamaindex/llamaindex.py, sdk/python/src/openlit/instrumentation/pinecone/pinecone.py, sdk/typescript/src/instrumentation/openai/wrapper.ts\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `7ca59852f63177cdfd8f5b40924b6126c7b37fcc`\n- inspected_files: `README.md`, `docker-compose.yml`, `docs/docs.json`, `docs/snippets/helm-repo-setup.mdx`, `docs/snippets/llm-as-a-judge.mdx`, `docs/snippets/quickstart-gpu.mdx`, `docs/snippets/integration-methods-python.mdx`, `docs/snippets/quickstart-programmatic-evals.mdx`, `docs/snippets/quickstart-mcp.mdx`, `docs/snippets/quickstart-vectordb.mdx`, `docs/snippets/quickstart-guard.mdx`, `docs/snippets/integration-methods.mdx`, `docs/snippets/quickstart-observability.mdx`, `docs/snippets/openlit-platform-install.mdx`, `docs/latest/overview.mdx`, `docs/snippets/destinations/victoriametrics-stack/conclusion.mdx`, `docs/snippets/destinations/victoriametrics-stack/intro.mdx`, `docs/snippets/destinations/victoriametrics-stack/sdk.mdx`, `docs/snippets/destinations/signoz/conclusion.mdx`, `docs/snippets/destinations/signoz/intro.mdx`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 来源证据：Integration: Governance and compliance signals for LLM observability\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Integration: Governance and compliance signals for LLM observability\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_16e8a1979e4646f18ae6d36da1fd46fe | https://github.com/openlit/openlit/issues/1106 | 来源类型 github_issue 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 来源证据：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n- Host AI rule: 来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_9788255c9fb34a7eae64ba6413a52030 | https://github.com/openlit/openlit/issues/1186 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 来源证据：[Bug]: Docker Image doesn't run on windows 64bit\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：[Bug]: Docker Image doesn't run on windows 64bit\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_e25a08120daf4deb81b9193aeab1f929 | https://github.com/openlit/openlit/issues/786 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 来源证据：openlit-1.19.0\n\n- Trigger: GitHub 社区证据显示该项目存在一个安装相关的待验证问题：openlit-1.19.0\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能增加新用户试用和生产接入成本。\n- Evidence: community_evidence:github | cevd_0504e467960f4bbe919ff101c6a14d7b | https://github.com/openlit/openlit/releases/tag/openlit-1.19.0 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: 来源证据：controller-0.2.0\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：controller-0.2.0\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能影响升级、迁移或版本选择。\n- Evidence: community_evidence:github | cevd_addec19eec37420da207487d5a685eaa | https://github.com/openlit/openlit/releases/tag/controller-0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 来源证据：openlit-1.20.0\n\n- Trigger: GitHub 社区证据显示该项目存在一个配置相关的待验证问题：openlit-1.20.0\n- Host AI rule: 来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- Why it matters: 可能影响升级、迁移或版本选择。\n- Evidence: community_evidence:github | cevd_217968c917e9426f9f8fbb4b50bebdb5 | https://github.com/openlit/openlit/releases/tag/openlit-1.20.0 | 来源类型 github_release 暴露的待验证使用条件。\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 7: 能力判断依赖假设\n\n- Trigger: README/documentation is current enough for a first validation pass.\n- Host AI rule: 将假设转成下游验证清单。\n- Why it matters: 假设不成立时，用户拿不到承诺的能力。\n- Evidence: capability.assumptions | github_repo:747319327 | https://github.com/openlit/openlit | README/documentation is current enough for a first validation pass.\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 8: 维护活跃度未知\n\n- Trigger: 未记录 last_activity_observed。\n- Host AI rule: 补 GitHub 最近 commit、release、issue/PR 响应信号。\n- Why it matters: 新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- Evidence: evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | last_activity_observed missing\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 9: 下游验证发现风险项\n\n- Trigger: no_demo\n- Host AI rule: 进入安全/权限治理复核队列。\n- Why it matters: 下游已经要求复核，不能在页面中弱化。\n- Evidence: downstream_validation.risk_items | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 10: 存在评分风险\n\n- Trigger: no_demo\n- Host AI rule: 把风险写入边界卡，并确认是否需要人工复核。\n- Why it matters: 风险会影响是否适合普通用户安装。\n- Evidence: risks.scoring_risks | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：openlit/openlit\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：local_cli\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 来源证据：Integration: Governance and compliance signals for LLM observability（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 来源证据：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 来源证据：[Bug]: Docker Image doesn't run on windows 64bit（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 来源证据：openlit-1.19.0（medium）：可能增加新用户试用和生产接入成本。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 来源证据：controller-0.2.0（medium）：可能影响升级、迁移或版本选择。 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/openlit/openlit 项目说明书\n\n生成时间：2026-05-16 21:33:24 UTC\n\n## 目录\n\n- [OpenLIT Overview](#overview)\n- [Quick Start Guide](#quickstart)\n- [System Architecture](#architecture)\n- [Data Flow and Management](#data-flow)\n- [Python SDK Architecture](#python-sdk)\n- [TypeScript SDK Architecture](#typescript-sdk)\n- [Go SDK Architecture](#go-sdk)\n- [LLM and Framework Integrations](#integrations)\n- [OpenLIT Controller](#controller)\n- [GPU Collector](#gpu-collector)\n\n<a id='overview'></a>\n\n## OpenLIT Overview\n\n### 相关页面\n\n相关主题：[Quick Start Guide](#quickstart), [System Architecture](#architecture)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/client/README.md](https://github.com/openlit/openlit/blob/main/src/client/README.md)\n- [src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n- [src/client/src/components/(playground)/getting-started/tracing/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n- [src/client/src/components/(playground)/getting-started/secrets/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/secrets/index.tsx)\n- [src/client/src/components/(playground)/getting-started/prompts/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/prompts/index.tsx)\n</details>\n\n# OpenLIT Overview\n\n## What is OpenLIT?\n\nOpenLIT is an **OpenTelemetry-native GenAI and LLM Application Observability tool** designed to simplify the integration process for sending OpenTelemetry traces and metrics from your LLM applications. It provides comprehensive monitoring capabilities for both GenAI and LLM applications.\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:127]()\n\n## Key Features\n\nOpenLIT offers several core capabilities for observability:\n\n| Feature Category | Description |\n|------------------|-------------|\n| Tracing | Capture detailed traces of LLM application requests |\n| Metrics | Collect and analyze performance metrics |\n| Evaluations | Assess response quality and model performance |\n| Context Management | Manage evaluation contexts and prompts |\n| Secrets Management | Securely store and manage API keys and credentials |\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx]()\n资料来源：[src/client/src/components/(playground)/getting-started/secrets/index.tsx]()\n资料来源：[src/client/src/components/(playground)/getting-started/prompts/index.tsx]()\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    A[LLM Application] --> B[OpenLIT SDK]\n    B --> C[OTLP Endpoint<br/>127.0.0.1:4318]\n    C --> D[OpenLIT Backend]\n    D --> E[OpenLIT UI<br/>127.0.0.1:3000]\n    F[Database] <--> D\n```\n\n## SDK Support\n\nOpenLIT provides official SDKs for multiple programming languages:\n\n### Python SDK\n\nThe Python SDK enables Python-based LLM applications to send telemetry data to OpenLIT.\n\n```python\nimport openlit\n\nopenlit.init()\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n### TypeScript/JavaScript SDK\n\nThe TypeScript SDK provides similar functionality for Node.js and browser-based applications.\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n**Example Usage with OpenAI:**\n\n```typescript\nimport OpenAI from 'openai';\nimport openlit from 'openlit';\n\nopenlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" });\n\nconst client = new OpenAI({\n  apiKey: process.env.OPENAI_API_KEY\n});\n\nconst chatCompletion = await client.chat.completions.create({\n  messages: [{ role: 'user', content: 'What is LLM Observability?' }],\n  model: 'gpt-3.5-turbo',\n});\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx]()\n\n## Configuration Options\n\n### OTLP Endpoint Configuration\n\nYou can configure the OTLP endpoint in two ways:\n\n| Method | Configuration |\n|--------|---------------|\n| Code | `openlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" })` |\n| Environment Variable | `OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"` |\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n### Environment Variables\n\n| Variable | Purpose | Default Value |\n|----------|---------|---------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP collector endpoint | http://127.0.0.1:4318 |\n\n## Deployment\n\n### Docker Compose Deployment\n\nOpenLIT can be deployed using Docker Compose from the root directory:\n\n```bash\ngit clone git@github.com:openlit/openlit.git\ndocker compose up -d\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n### Default Ports\n\n| Service | Default Address |\n|---------|-----------------|\n| OpenLIT UI | http://127.0.0.1:3000 |\n| OTLP Endpoint | http://127.0.0.1:4318 |\n\n## Default Credentials\n\nAfter deployment, access the OpenLIT UI using the following default credentials:\n\n| Field | Default Value |\n|-------|---------------|\n| Email | user@openlit.io |\n| Password | openlituser |\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n## SDK Repository Locations\n\n| SDK | Repository Path |\n|-----|-----------------|\n| Python SDK | `sdk/python` |\n| TypeScript SDK | `sdk/typescript` |\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx]()\n\n## Community and Support\n\nOpenLIT maintains active community channels for support and discussions:\n\n| Platform | Link |\n|----------|------|\n| GitHub | https://github.com/openlit/openlit |\n| Documentation | https://docs.openlit.io |\n| Slack | Join via invitation link |\n| X (Twitter) | @openlit_io |\n\n资料来源：[src/client/README.md]()\n\n## Evaluation Features\n\nOpenLIT supports custom evaluation types with configurable prompts and context:\n\n```typescript\n// Evaluation prompt format example\n[Domain Accuracy evaluation context]\nConsider: whether the response aligns with domain-specific knowledge and terminology.\nLook for incorrect use of domain terms, inaccurate domain-specific claims, and deviations from established domain practices.\n```\n\nEvaluations provide the following metrics:\n- **Score**: Numerical rating\n- **Classification**: Categorical classification\n- **Explanation**: Detailed reasoning\n- **Verdict**: Pass/fail determination\n\n资料来源：[src/client/src/app/(playground)/evaluations/types/new/page.tsx]()\n资料来源：[src/client/src/components/(playground)/request/components/evaluations.tsx]()\n\n## Pricing Integration\n\nOpenLIT can calculate costs for LLM usage based on token consumption:\n\n```\ncost = (input_tokens / 1M) × input_price + (output_tokens / 1M) × output_price\n```\n\nThis includes:\n- Input token pricing per million tokens\n- Output token pricing per million tokens\n- Context window size tracking\n\n资料来源：[src/client/src/components/(playground)/chat/chat-settings-form.tsx]()\n\n---\n\n<a id='quickstart'></a>\n\n## Quick Start Guide\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/client/src/components/(playground)/getting-started/tracing/index.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n- [src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n- [src/client/src/app/(playground)/agents/no-controller.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/agents/no-controller.tsx)\n- [src/client/src/app/(playground)/context/new/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/context/new/page.tsx)\n- [src/client/src/app/(playground)/context/[id]/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/context/[id]/page.tsx)\n- [src/client/src/components/(playground)/openground/sdk-usage-dialog.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/openground/sdk-usage-dialog.tsx)\n- [src/client/src/app/not-found.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/not-found.tsx)\n</details>\n\n# Quick Start Guide\n\nOpenLIT is an OpenTelemetry-native GenAI and LLM Application Observability tool designed to simplify the integration of tracing and metrics collection for AI applications. This guide provides comprehensive instructions for deploying OpenLIT and instrumenting your applications using the Python and TypeScript SDKs.\n\n## Prerequisites\n\nBefore beginning, ensure you have the following installed:\n\n| Requirement | Version | Purpose |\n|-------------|---------|---------|\n| Docker | Latest | Container runtime for OpenLIT deployment |\n| Docker Compose | Latest | Orchestration tool |\n| Node.js | 18+ | Required for TypeScript SDK |\n| Python | 3.8+ | Required for Python SDK |\n| npm/pip | Latest | Package managers |\n\n## Deployment Options\n\nOpenLIT can be deployed using multiple methods depending on your infrastructure requirements.\n\n### Docker Compose Deployment\n\nThe recommended approach for local development and testing is Docker Compose.\n\n```bash\ngit clone git@github.com:openlit/openlit.git\ncd openlit\ndocker compose up -d\n```\n\nOnce deployed, access the OpenLIT UI at `http://127.0.0.1:3000` using the default credentials:\n\n- **Email:** user@openlit.io\n- **Password:** openlituser\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:50-55]()\n\n### Controller Deployment\n\nFor infrastructure-level observability, the OpenLIT Controller can be deployed as a system service or containerized application.\n\n#### Linux System Service\n\n```bash\nsudo tee /etc/systemd/system/openlit-controller.service <<EOF\n[Unit]\nDescription=OpenLIT Controller\nAfter=network.target\n\n[Service]\nType=simple\nUser=root\nWorkingDirectory=/opt/openlit\nExecStart=/opt/openlit/openlit-controller\nRestart=always\nRestartSec=5\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsystemctl daemon-reload\nsystemctl enable --now openlit-controller\n```\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:12-25]()\n\n#### Docker Deployment\n\n```bash\ndocker run -d --privileged --pid=host \\\n  -e OPENLIT_URL=\"<openlit-url>\" \\\n  -e OTEL_EXPORTER_OTLP_ENDPOINT=\"<openlit-url>:4318\" \\\n  -v /proc:/host/proc:ro \\\n  -v /sys/kernel/debug:/sys/kernel/debug:ro \\\n  -v /sys/fs/bpf:/sys/fs/bpf:rw \\\n  -v /var/run/docker.sock:/var/run/docker.sock \\\n  -e OPENLIT_PROC_ROOT=\"/host/proc\" \\\n  ghcr.io/openlit/controller:latest\n```\n\n#### Kubernetes Deployment\n\n```bash\nhelm repo add openlit https://openlit.github.io/helm\nhelm repo update\nhelm upgrade --install openlit openlit/openlit \\\n  --set openlit-controller.enabled=true\n```\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:27-45]()\n\n## SDK Integration\n\nOpenLIT provides SDKs for both Python and TypeScript environments to enable application-level observability.\n\n### Python SDK\n\n#### Installation\n\nInstall the Python SDK using pip:\n\n```bash\npip install openlit\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:85-92]()\n\n#### Initialization\n\nAdd the following initialization code to your application:\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n\nAlternatively, set the endpoint using the environment variable:\n\n```bash\nexport OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"\n```\n\n#### Complete Example with OpenAI\n\n```python\nimport openlit\nfrom openai import OpenAI\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n\nclient = OpenAI(api_key=os.environ.get(\"OPENAI_API_KEY\"))\n\nresponse = client.chat.completions.create(\n    model=\"gpt-3.5-turbo\",\n    messages=[{\"role\": \"user\", \"content\": \"What is LLM Observability?\"}]\n)\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:45-65]()\n\n### TypeScript SDK\n\n#### Installation\n\nInstall the TypeScript SDK using npm:\n\n```bash\nnpm install openlit\n```\n\n#### Initialization\n\nAdd the following initialization code to your application:\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\nAlternatively, set the endpoint using the environment variable `OTEL_EXPORTER_OTLP_ENDPOINT`.\n\n#### Complete Example with OpenAI\n\n```typescript\nimport OpenAI from 'openai';\nimport openlit from 'openlit';\n\nopenlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" });\n\nconst client = new OpenAI({\n  apiKey: process.env.OPENAI_API_KEY\n});\n\nconst chatCompletion = await client.chat.completions.create({\n  messages: [{ role: 'user', content: 'What is LLM Observability?' }],\n  model: 'gpt-3.5-turbo',\n});\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:95-120]()\n\n## Configuration Reference\n\n### SDK Configuration Options\n\n| Parameter | Type | Environment Variable | Description |\n|-----------|------|---------------------|-------------|\n| `otlp_endpoint` | string | `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP exporter endpoint URL |\n| `api_key` | string | `OPENLIT_API_KEY` | API key for authenticated endpoints |\n\n### Controller Environment Variables\n\n| Variable | Description |\n|----------|-------------|\n| `OPENLIT_URL` | Base URL for the OpenLIT instance |\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint for trace export |\n| `OPENLIT_API_KEY` | API key for OpenLIT authentication |\n| `OPENLIT_PROC_ROOT` | Root path for process information (default: `/host/proc`) |\n\n## Application Workflow\n\n```mermaid\ngraph TD\n    A[Deploy OpenLIT with Docker Compose] --> B[Access OpenLIT UI]\n    B --> C{Choose Deployment Mode}\n    C -->|Local Development| D[Install SDK in Application]\n    C -->|System-wide| E[Deploy Controller]\n    D --> F[Initialize SDK]\n    F --> G[Instrument LLM Calls]\n    G --> H[View Traces & Metrics in UI]\n    E --> I[Auto-discover Services]\n    I --> J[View Infrastructure Metrics]\n```\n\n## Additional Resources\n\nFor more advanced configurations and use cases, refer to the following repositories:\n\n- [OpenLIT Python SDK](https://github.com/openlit/openlit/tree/main/sdk/python)\n- [OpenLIT TypeScript SDK](https://github.com/openlit/openlit/tree/main/sdk/typescript)\n- [Official Documentation](https://docs.openlit.io)\n- [GitHub Repository](https://github.com/openlit/openlit)\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:100-115]()\n资料来源：[src/client/src/app/not-found.tsx:20-35]()\n\n---\n\n<a id='architecture'></a>\n\n## System Architecture\n\n### 相关页面\n\n相关主题：[Data Flow and Management](#data-flow), [Python SDK Architecture](#python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [src/client/src/lib/platform/clickhouse/clickhouse-client.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/clickhouse/clickhouse-client.ts)\n- [docker-compose.yml](https://github.com/openlit/openlit/blob/main/docker-compose.yml)\n- [src/dev-docker-compose.yml](https://github.com/openlit/openlit/blob/main/src/dev-docker-compose.yml)\n</details>\n\n# System Architecture\n\n## Overview\n\nOpenLIT is an **OpenTelemetry-native GenAI and LLM Application Observability tool** designed to simplify the integration of observability into AI applications. The system enables developers to send OpenTelemetry traces and metrics from their LLM applications with minimal configuration changes.\n\nThe architecture follows a distributed microservices pattern with clear separation between data collection (SDK instrumentation), data transmission (OTLP protocol), and data visualization (frontend dashboard).\n\n## High-Level Architecture\n\n```mermaid\ngraph TB\n    subgraph \"Client Applications\"\n        PythonApp[\"Python Application\"]\n        TypeScriptApp[\"TypeScript/JS Application\"]\n    end\n\n    subgraph \"OpenLIT SDKs\"\n        PythonSDK[\"Python SDK<br/>pip install openlit\"]\n        TSSDK[\"TypeScript SDK<br/>npm install openlit\"]\n    end\n\n    subgraph \"Data Transport\"\n        OTLP[\"OTLP Endpoint<br/>:4318\"]\n    end\n\n    subgraph \"OpenLIT Backend\"\n        Frontend[\"Web Dashboard<br/>Port 3000\"]\n        API[\"API Services\"]\n        DB[( \"ClickHouse<br/>Database\" )]\n    end\n\n    PythonApp --> PythonSDK\n    TypeScriptApp --> TSSDK\n    PythonSDK --> OTLP\n    TSSDK --> OTLP\n    OTLP --> API\n    API --> DB\n    Frontend --> API\n```\n\n## Core Components\n\n### SDK Layer\n\nOpenLIT provides language-specific SDKs for instrumenting AI applications:\n\n| SDK | Package Manager | Installation | Repository |\n|-----|-----------------|--------------|------------|\n| Python | pip | `pip install openlit` | [sdk/python](https://github.com/openlit/openlit/tree/main/sdk/python) |\n| TypeScript | npm | `npm install openlit` | [sdk/typescript](https://github.com/openlit/openlit/tree/main/sdk/typescript) |\n\n**Python SDK Initialization**\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:73-74]()\n\n**TypeScript SDK Initialization**\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:115-118]()\n\n### Data Transport Layer\n\nThe system uses the **OpenTelemetry Protocol (OTLP)** for transmitting telemetry data:\n\n| Parameter | Default Value | Description |\n|-----------|---------------|-------------|\n| OTLP Endpoint | `http://127.0.0.1:4318` | gRPC/HTTP endpoint for traces |\n| Environment Variable | `OTEL_EXPORTER_OTLP_ENDPOINT` | Alternative endpoint configuration |\n\nThe OTLP endpoint can be configured either programmatically via SDK initialization or through environment variables.\n\n### Backend Services\n\n#### Web Dashboard (Frontend)\n\nThe frontend is a Next.js application providing the user interface for:\n\n- **Tracing View** - Visualize request traces and spans\n- **Agents Management** - Configure and monitor AI agents\n- **Model Management** - Configure AI model providers and pricing\n- **Getting Started** - Onboarding documentation\n- **Chat Interface** - Interactive testing environment\n\nThe application runs on **port 3000** by default and provides a login interface with default credentials:\n\n- **Email:** user@openlit.io\n- **Password:** openlituser\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:40-44]()\n\n#### Agent Lifecycle Management\n\nOpenLIT supports managing AI agents with lifecycle operations:\n\n```mermaid\nstateDiagram-v2\n    [*] --> Starting\n    Starting --> Running\n    Running --> Restarting\n    Restarting --> Running\n    Running --> Stopping\n    Stopping --> [*]\n```\n\nLifecycle actions include:\n- **Start** - Initialize the agent service\n- **Stop** - Terminate with confirmation dialog\n- **Restart** - Restart the agent process\n\n资料来源：[src/client/src/app/(playground)/agents/lifecycle-actions.tsx:1-60]()\n\n### Controller Services\n\nThe OpenLIT Controller provides infrastructure-level observability for containerized and orchestrated environments:\n\n| Deployment Method | Command/Configuration |\n|-------------------|----------------------|\n| Docker | `docker run -d --privileged --pid=host ... ghcr.io/openlit/controller:latest` |\n| Kubernetes | `helm upgrade --install openlit openlit/openlit --set openlit-controller.enabled=true` |\n| Systemd | Service unit file with systemctl enable |\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:45-60]()\n\n#### Controller Environment Variables\n\n| Variable | Purpose |\n|----------|---------|\n| `OPENLIT_URL` | Main OpenLIT instance URL |\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint for telemetry |\n| `OPENLIT_API_KEY` | Authentication key (optional) |\n| `OPENLIT_PROC_ROOT` | Process root for host monitoring |\n\n## Deployment Architecture\n\n### Docker Compose Deployment\n\nFor development and testing, OpenLIT can be deployed using Docker Compose:\n\n```bash\ngit clone git@github.com:openlit/openlit.git\ncd openlit\ndocker compose up -d\n```\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx:50-55]()\n\n### Multi-Platform Support\n\n```mermaid\ngraph LR\n    subgraph \"Deployment Platforms\"\n        Docker[\"Docker\"]\n        K8s[\"Kubernetes\"]\n        SystemD[\"Systemd\"]\n    end\n\n    subgraph \"Monitoring Targets\"\n        Containers[\"Containers\"]\n        Processes[\"Host Processes\"]\n        Services[\"System Services\"]\n    end\n\n    Docker --> Containers\n    K8s --> Containers\n    K8s --> Services\n    SystemD --> Services\n    SystemD --> Processes\n```\n\n## Feature Architecture\n\n### Tracing Integration\n\nOpenLIT's tracing feature provides comprehensive observability:\n\n| Feature | Description |\n|---------|-------------|\n| **Auto-Instrumentation** | Automatic capture of LLM calls |\n| **Span Attributes** | Model, provider, token usage, latency |\n| **Context Propagation** | Request tracing across services |\n| **Error Tracking** | Exception and failure monitoring |\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:1-100]()\n\n### Agent Schema Capture\n\nThe system captures tool schemas from agents for documentation and analysis:\n\n```typescript\ninterface ToolSchema {\n  name: string;\n  description?: string;\n  schema: object;\n}\n```\n\nSchemas are displayed in an expandable accordion format with JSON visualization.\n\n资料来源：[src/client/src/components/(playground)/agents/tools-card.tsx:35-55]()\n\n### Model Configuration\n\nOpenLIT supports custom model configurations with pricing information:\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `providerName` | string | AI provider name |\n| `modelId` | string | Model identifier |\n| `modelName` | string | Display name |\n| `inputPricePerMToken` | number | Input cost per million tokens |\n| `outputPricePerMToken` | number | Output cost per million tokens |\n| `contextWindow` | number | Maximum context length |\n\n资料来源：[src/client/src/components/(playground)/chat/message-input.tsx:25-45]()\n\n## Data Flow\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant SDK as OpenLIT SDK\n    participant OTLP as OTLP Endpoint\n    participant API as OpenLIT API\n    participant CH as ClickHouse\n    participant UI as Web Dashboard\n\n    App->>SDK: Initialize with config\n    App->>SDK: LLM API Call\n    SDK->>SDK: Capture trace/metrics\n    SDK->>OTLP: Export telemetry\n    OTLP->>API: Process spans\n    API->>CH: Store data\n    UI->>API: Query traces\n    API->>UI: Return results\n    UI->>UI: Render dashboard\n```\n\n## Configuration Reference\n\n### SDK Configuration Options\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `otlp_endpoint` | string | `http://127.0.0.1:4318` | OTLP collector endpoint |\n| `service_name` | string | auto-detect | Service identifier |\n| `api_key` | string | none | Authentication for hosted services |\n\n### Environment Variables\n\n| Variable | SDK Support | Description |\n|----------|-------------|-------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | Python, TS | Global OTLP endpoint override |\n| `OPENLIT_API_KEY` | All | API authentication key |\n| `OPENLIT_SERVICE_NAME` | All | Override service name |\n\n## Security Considerations\n\n### Authentication\n\nThe system supports multiple authentication providers:\n\n- **Email/Password** - Local authentication with default credentials\n- **OAuth Providers** - Google and GitHub SSO integration\n\n资料来源：[src/client/src/components/(auth)/auth-form.tsx:1-50]()\n\n### API Security\n\nAPI endpoints are protected and require valid session tokens. The controller service supports optional API key authentication:\n\n```bash\n-e OPENLIT_API_KEY=\"your-api-key\"\n```\n\n## Technology Stack\n\n| Layer | Technology |\n|-------|------------|\n| Frontend | Next.js, React, TypeScript, TailwindCSS |\n| SDKs | Python, TypeScript |\n| Telemetry | OpenTelemetry Protocol (OTLP) |\n| Database | ClickHouse |\n| Containerization | Docker, Kubernetes |\n| Service Management | Systemd |\n\n## External Resources\n\n| Resource | URL |\n|----------|-----|\n| Documentation | https://docs.openlit.io |\n| GitHub Repository | https://github.com/openlit/openlit |\n| TypeScript SDK | https://github.com/openlit/openlit/tree/main/sdk/typescript |\n| Python SDK | https://github.com/openlit/openlit/tree/main/sdk/python |\n\n---\n\n*Last updated: Based on repository state at main branch*\n\n---\n\n<a id='data-flow'></a>\n\n## Data Flow and Management\n\n### 相关页面\n\n相关主题：[System Architecture](#architecture), [Python SDK Architecture](#python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/python/src/openlit/otel/tracing.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/tracing.py)\n- [sdk/python/src/openlit/otel/metrics.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/metrics.py)\n- [src/client/src/lib/platform/clickhouse/helpers.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/clickhouse/helpers.ts)\n- [src/client/src/lib/platform/request/index.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/request/index.ts)\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n- [sdk/python/src/openlit/instrumentation/langgraph/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/langgraph/__init__.py)\n- [sdk/typescript/src/instrumentation/llamaindex/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/llamaindex/index.ts)\n</details>\n\n# Data Flow and Management\n\n## Overview\n\nOpenLIT is an OpenTelemetry-native observability platform designed for GenAI and LLM applications. The data flow architecture encompasses the entire lifecycle of telemetry data—from instrumentation at the application level through processing, storage, and visualization in the frontend UI.\n\nThe system follows a standard OpenTelemetry Collector pattern with platform-specific optimizations for handling GenAI-specific semantic conventions and metrics. Data flows through multiple layers: SDK instrumentation, OTLP export, backend processing, ClickHouse storage, and client-side data management for the playground UI.\n\n## Architecture Overview\n\n```mermaid\ngraph TD\n    subgraph Application_Layer[\"Application Layer\"]\n        PySDK[\"Python SDK\"]\n        TsSDK[\"TypeScript SDK\"]\n    end\n    \n    subgraph Instrumentation[\"Instrumentation\"]\n        LangGraph[\"LangGraph\"]\n        ClaudeAgent[\"Claude Agent SDK\"]\n        LlamaIndex[\"LlamaIndex\"]\n        OpenAI[\"OpenAI\"]\n    end\n    \n    subgraph Export[\"OTLP Export\"]\n        OTLP[\"OTLP Endpoint<br/>:4318\"]\n    end\n    \n    subgraph Backend[\"OpenLIT Backend\"]\n        Processor[\"Data Processor\"]\n        Storage[\"ClickHouse\"]\n    end\n    \n    subgraph Frontend[\"Frontend Client\"]\n        Client[\"Playground UI\"]\n        APIClient[\"API Client\"]\n    end\n    \n    PySDK -->|HTTP/gRPC| OTLP\n    TsSDK -->|HTTP/gRPC| OTLP\n    LangGraph --> PySDK\n    ClaudeAgent --> PySDK\n    OpenAI --> PySDK\n    LlamaIndex --> TsSDK\n    OTLP --> Processor\n    Processor --> Storage\n    Storage --> APIClient\n    APIClient --> Client\n```\n\n## Tracing Data Flow\n\n### Python SDK Tracing Architecture\n\nThe Python SDK provides comprehensive tracing capabilities through the OpenTelemetry SDK integration. The tracing module (`tracing.py`) establishes the foundation for all trace collection and export operations.\n\n**Core Tracing Components:**\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| `TracerProvider` | Manages trace creation and propagation | `sdk/python/src/openlit/otel/tracing.py` |\n| `SpanProcessor` | Processes individual spans before export | `sdk/python/src/openlit/otel/tracing.py` |\n| `OTLPExporter` | Exports spans to OTLP endpoint | `sdk/python/src/openlit/otel/tracing.py` |\n| `ContextPropagation` | Maintains trace context across async operations | `sdk/python/src/openlit/otel/tracing.py` |\n\nThe tracing initialization follows a standard pattern:\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n\nThis initialization configures the tracer provider with the specified OTLP endpoint, enabling automatic span collection from all instrumented LLM frameworks.\n\n**资料来源：** [sdk/python/src/openlit/otel/tracing.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/tracing.py)\n\n### Span Lifecycle\n\nSpans are created and managed through a structured lifecycle that ensures complete telemetry capture:\n\n```mermaid\nsequenceDiagram\n    participant App as Application Code\n    participant SDK as OpenLIT SDK\n    participant Inst as Instrumentation\n    participant Exporter as OTLP Exporter\n    participant Backend as OpenLIT Backend\n    \n    App->>Inst: LLM/Framework Call\n    Inst->>SDK: Create Span\n    SDK->>SDK: Set Attributes\n    SDK->>SDK: Record Metrics\n    App->>SDK: Response Received\n    SDK->>SDK: Complete Span\n    SDK->>Exporter: Export Span\n    Exporter->>Backend: OTLP Stream\n```\n\nThe span lifecycle includes:\n1. **Creation**: Span is initialized with parent context\n2. **Attribute Setting**: GenAI-specific attributes (model, tokens, cost) are attached\n3. **Timing**: Start and end times are recorded for duration calculation\n4. **Status**: Span status is set based on success/failure\n5. **Export**: Spans are batched and exported to OTLP endpoint\n\n**资料来源：** [sdk/python/src/openlit/instrumentation/langgraph/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/langgraph/__init__.py)\n\n### Instrumentation Framework Integration\n\nOpenLIT provides instrumentation for multiple LLM frameworks, each with framework-specific span attributes:\n\n**Supported Instrumentations:**\n\n| Framework | Operations Traced | Semantic Convention |\n|-----------|-------------------|---------------------|\n| OpenAI | chat completions, embeddings | `gen_ai.operation.type` |\n| LangGraph | execution, checkpointing, construction | `framework` + `gen_ai` |\n| Claude Agent SDK | invoke_agent, execute_tool | `gen_ai.operation.type` |\n| LlamaIndex | query_engine, retriever, document | `retrieve` + `framework` |\n\n**LangGraph Instrumentation Pattern:**\n\nThe LangGraph instrumentation wraps execution operations with both sync and async variants:\n\n```python\n# From langgraph/__init__.py\ndef _wrap_execution_operations(self, operations, ...):\n    for module, method, operation_type, sync_type in operations:\n        if sync_type == \"async\":\n            wrapper = async_general_wrap(operation_type, ...)\n        else:\n            wrapper = general_wrap(operation_type, ...)\n```\n\nThis pattern ensures consistent telemetry regardless of whether the underlying framework uses synchronous or asynchronous execution models.\n\n**资料来源：** [sdk/python/src/openlit/instrumentation/langgraph/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/langgraph/__init__.py)\n\n## Metrics Data Flow\n\n### Metrics Collection Architecture\n\nThe metrics module handles quantitative measurements that complement trace data. Metrics provide aggregated views of system performance, cost, and usage patterns.\n\n**Metrics Data Points:**\n\n| Metric Type | Description | Aggregation |\n|-------------|-------------|-------------|\n| Request Count | Total number of LLM requests | Count |\n| Token Usage | Input/output tokens consumed | Sum |\n| Cost | Calculated cost based on pricing | Sum |\n| Latency | Request duration in milliseconds | Histogram |\n| Error Rate | Failed requests percentage | Ratio |\n\n**资料来源：** [sdk/python/src/openlit/otel/metrics.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/otel/metrics.py)\n\n### Metric Recording Flow\n\nMetrics are recorded during span processing using the OpenTelemetry Metrics API:\n\n```mermaid\ngraph LR\n    A[LLM Request] --> B[Create Span]\n    B --> C[Extract Request Data]\n    C --> D[Calculate Pricing]\n    D --> E[Record Metrics]\n    E --> F[Complete Span]\n    \n    G[Pricing Info] --> D\n    H[Model Config] --> D\n```\n\nThe metric recording includes:\n- `start_time` and `end_time` for duration calculation\n- `request_model` for token and pricing lookup\n- `environment` and `application_name` for filtering\n- `pricing_info` dictionary for cost calculation\n\n**资料来源：** [sdk/python/src/openlit/instrumentation/openai/async_openai.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/openai/async_openai.py)\n\n## Client-Side Data Management\n\n### Frontend API Client Architecture\n\nThe frontend client manages data fetching and state management for the playground UI. The API client layer provides a typed interface to the backend services.\n\n**API Client Structure:**\n\n```typescript\n// Simplified from request/index.ts\nexport class RequestClient {\n  async fetchTraces(params: TraceParams): Promise<Trace[]>;\n  async fetchMetrics(params: MetricParams): Promise<Metrics>;\n  async fetchSpans(traceId: string): Promise<Span[]>;\n}\n```\n\n**Key Data Operations:**\n\n| Operation | Endpoint | Purpose |\n|-----------|----------|---------|\n| Fetch Traces | `/api/traces` | List traces with filtering |\n| Fetch Spans | `/api/traces/:id/spans` | Get detailed span data |\n| Fetch Metrics | `/api/metrics` | Aggregated metrics data |\n| Export Data | `/api/openground/models/export` | Export pricing data |\n\n**资料来源：** [src/client/src/lib/platform/request/index.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/request/index.ts)\n\n### ClickHouse Data Access\n\nThe client uses ClickHouse as the primary data store and accesses it through helper functions that construct and execute queries.\n\n**Query Helper Functions:**\n\n| Function | Purpose |\n|----------|---------|\n| `buildTraceQuery()` | Construct trace listing query |\n| `buildSpanQuery()` | Construct span detail query |\n| `applyFilters()` | Apply time range and attribute filters |\n| `parseResponse()` | Parse ClickHouse response format |\n\n**资料来源：** [src/client/src/lib/platform/clickhouse/helpers.ts](https://github.com/openlit/openlit/blob/main/src/client/src/lib/platform/clickhouse/helpers.ts)\n\n### State Management Pattern\n\nThe frontend uses React Query or similar state management for data fetching:\n\n```mermaid\ngraph TD\n    A[Component Mount] --> B[Trigger Query]\n    B --> C[Show Loading State]\n    C --> D{Request Complete?}\n    D -->|Yes| E[Update Cache]\n    E --> F[Render Data]\n    D -->|No| G[Show Error]\n    G --> H[Retry Option]\n```\n\nThe state management includes:\n- **Loading states**: Visual feedback during data fetch\n- **Error handling**: Graceful degradation on failures\n- **Cache invalidation**: Automatic refresh on mutations\n- **Pagination**: Support for large result sets with \"Load More\" patterns\n\n**资料来源：** [src/client/src/components/(playground)/agents/version-drawer.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/agents/version-drawer.tsx)\n\n## Timeline View Data Structure\n\n### Span Timeline Rendering\n\nThe timeline view component renders trace data as a visual timeline, parsing span data into a hierarchical structure.\n\n**Span Data Model:**\n\n```typescript\ninterface SpanData {\n  spanId: string;\n  parentSpanId?: string;\n  startTime: number;\n  endTime: number;\n  name: string;\n  kind: 'client' | 'server' | 'producer' | 'consumer';\n  status: 'ok' | 'error';\n  attributes: Record<string, any>;\n  duration: number;\n  cost?: number;\n}\n```\n\n**Timeline Calculation:**\n\n| Column | Width | Content |\n|--------|-------|---------|\n| Name Column | 30% | Span name and kind indicator |\n| Timeline Column | 60% | Visual timeline bar |\n| Stats Column | 10% | Duration and cost |\n\nThe timeline calculates relative positions using `traceWindowMs` to determine the overall trace window, then positions each span proportionally within that window.\n\n**资料来源：** [src/client/src/components/(playground)/request/components/timeline-view.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/request/components/timeline-view.tsx)\n\n## TypeScript SDK Data Flow\n\n### LlamaIndex Instrumentation\n\nThe TypeScript SDK provides similar capabilities for JavaScript/TypeScript applications, particularly for LlamaIndex integration.\n\n**LlamaIndex Traced Operations:**\n\n| Operation | Semantic Convention | Description |\n|-----------|---------------------|-------------|\n| `document_load` | `retrieve` | Document loading operations |\n| `document_split` | `framework` | Text splitting/splitting |\n| `retriever_retrieve` | `retrieve` | Retrieval operations |\n| `query_engine_query` | `retrieve` | Query execution |\n| `response_synthesize` | `chat` | Response generation |\n\n**资料来源：** [sdk/typescript/src/instrumentation/llamaindex/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/llamaindex/index.ts)\n\n### TypeScript Initialization Pattern\n\n```typescript\nimport openlit from 'openlit';\n\n// Initialize with OTLP endpoint\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n\n// Or use environment variable\n// OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"\n```\n\n## Environment Configuration\n\n### Data Flow Configuration Options\n\n| Environment Variable | Default | Purpose |\n|---------------------|---------|---------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | `http://127.0.0.1:4318` | OTLP gRPC endpoint |\n| `OTEL_EXPORTER_OTLP_PROTOCOL` | `grpc` | Protocol (grpc/http/proto) |\n| `OTEL_SERVICE_NAME` | `default` | Service identification |\n| `OTEL_EXPORTER_OTLP_HEADERS` | - | Authentication headers |\n\n**资料来源：** [src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n\n## Data Management Best Practices\n\n### Efficient Data Handling\n\n1. **Batching**: Spans are batched before export to reduce network overhead\n2. **Sampling**: Configure appropriate sampling rates for high-volume applications\n3. **Filtering**: Apply attribute filters at the query layer to reduce data transfer\n4. **Pagination**: Use paginated queries for large result sets\n\n### Error Handling Flow\n\n```mermaid\ngraph TD\n    A[Span Error] --> B[Record Exception]\n    B --> C[Set Span Status ERROR]\n    C --> D[Record Error Metrics]\n    D --> E[Export Span]\n    E --> F{Backend Available?}\n    F -->|Yes| G[Store Data]\n    F -->|No| H[Retry Queue]\n    H -->|Retry| G\n```\n\nThe error handling ensures that even when backend connectivity fails, error information is preserved for debugging.\n\n## Summary\n\nThe data flow in OpenLIT follows a well-structured pipeline from SDK instrumentation through to frontend visualization. Key aspects include:\n\n- **Unified Telemetry**: Both traces and metrics are collected through OpenTelemetry SDKs\n- **Framework Integration**: Multiple LLM frameworks are automatically instrumented\n- **Efficient Export**: OTLP protocol ensures standardized data transfer\n- **Flexible Storage**: ClickHouse provides scalable storage and querying\n- **Responsive UI**: The playground client efficiently fetches and displays telemetry data\n\nThis architecture enables comprehensive observability for GenAI applications while maintaining performance and scalability through batching, caching, and pagination strategies.\n\n---\n\n<a id='python-sdk'></a>\n\n## Python SDK Architecture\n\n### 相关页面\n\n相关主题：[TypeScript SDK Architecture](#typescript-sdk), [Go SDK Architecture](#go-sdk), [LLM and Framework Integrations](#integrations)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/python/src/openlit/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/__init__.py)\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/claude_agent_sdk.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/claude_agent_sdk.py)\n- [sdk/python/src/openlit/instrumentation/agent_framework/utils.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/agent_framework/utils.py)\n- [sdk/python/src/openlit/guard/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/guard/__init__.py)\n- [sdk/python/src/openlit/instrumentation/google_adk/utils.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/google_adk/utils.py)\n</details>\n\n# Python SDK Architecture\n\n## 概述\n\nOpenLIT Python SDK 是一个 OpenTelemetry 原生的 GenAI 和 LLM 应用可观测性工具。该 SDK 通过自动插桩框架集成到各种 AI 应用中，自动捕获 OpenTelemetry traces 和 metrics，无需手动埋点。\n\n核心职责包括：\n\n- 自动插桩主流 AI SDK（OpenAI、Anthropic、LangChain、CrewAI 等）\n- 遵循 OTel GenAI 语义约定（Semantic Conventions）\n- 提供基于 OpenTelemetry 的 tracing 和 metrics 收集\n- 实现生产级 guardrails（内容安全、审计）\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:1-15]()\n\n## 核心架构组件\n\n```mermaid\ngraph TD\n    subgraph \"OpenLIT Python SDK\"\n        A[\"openlit.init()\"]\n        B[\"Instrumentors<br/>BaseInstrumentor\"]\n        C[\"Guard System\"]\n        D[\"OTel Layer\"]\n    end\n    \n    subgraph \"Instrumented Frameworks\"\n        E[\"OpenAI\"]\n        F[\"Anthropic\"]\n        G[\"Claude Agent SDK\"]\n        H[\"LangChain / CrewAI\"]\n        I[\"Google ADK\"]\n        J[\"Agent Framework\"]\n    end\n    \n    subgraph \"OpenTelemetry Backend\"\n        K[\"OTLP Exporter\"]\n        L[\"Traces\"]\n        M[\"Metrics\"]\n    end\n    \n    A --> B\n    A --> C\n    B --> D\n    C --> D\n    D --> K\n    K --> L\n    K --> M\n    \n    B --> E\n    B --> F\n    B --> G\n    B --> H\n    B --> I\n    B --> J\n```\n\n### 组件说明\n\n| 组件 | 位置 | 职责 |\n|------|------|------|\n| **Instrumentors** | `openlit.instrumentation.*` | 各 AI 框架的自动插桩实现 |\n| **Guard System** | `openlit.guard.*` | 内容安全、审计和合规检查 |\n| **OTel Layer** | `openlit.otel.*` | OpenTelemetry traces 和 metrics 的核心实现 |\n| **Config** | `openlit._config` | 全局配置管理和指标字典 |\n| **Semcov** | `openlit.semcov` | GenAI 语义约定常量定义 |\n\n## 初始化流程\n\n### Python SDK 初始化\n\n```python\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n```\n\n初始化时 SDK 执行以下操作：\n\n1. 配置 OpenTelemetry tracer provider\n2. 加载全局配置（环境、应用名称、指标开关）\n3. 注入所有依赖的 instrumentors\n4. 初始化 guard pipeline（如配置）\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:30-42]()\n\n### 配置参数\n\n| 参数 | 类型 | 默认值 | 说明 |\n|------|------|--------|------|\n| `otlp_endpoint` | str | `\"http://127.0.0.1:4318\"` | OTLP gRPC endpoint |\n| `environment` | str | `\"default\"` | 部署环境标识 |\n| `application_name` | str | `\"default\"` | 应用名称 |\n| `pricing_info` | dict | `{}` | 模型定价信息 |\n| `capture_message_content` | bool | `False` | 是否捕获消息内容 |\n| `metrics` | dict | None | 指标配置字典 |\n| `disable_metrics` | bool | None | 禁用指标收集 |\n| `guards` | list | None | Guard 配置列表 |\n\n## 插桩系统架构\n\n### BaseInstrumentor 模式\n\n所有框架插桩器继承自 `BaseInstrumentor`，采用统一模式：\n\n```python\nclass ClaudeAgentSDKInstrumentor(BaseInstrumentor):\n    def instrumentation_dependencies(self) -> Collection[str]:\n        return _instruments  # 如 (\"claude-agent-sdk >= 0.1.0\",)\n    \n    def _instrument(self, **kwargs):\n        # 1. 获取 tracer 和配置\n        tracer = trace.get_tracer(__name__)\n        \n        # 2. 使用 wrapt 包装目标函数\n        wrap_function_wrapper(\n            \"module.path\",\n            \"function_name\",\n            wrap_query\n        )\n```\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:27-45]()\n\n### 插桩覆盖范围\n\n| 框架 | 支持版本 | 追踪操作 |\n|------|----------|----------|\n| Claude Agent SDK | >= 0.1.0 | `invoke_agent`, `execute_tool` |\n| Google ADK | - | `execute_tool` |\n| Agent Framework | - | `agent_init`, `agent_run`, `tool_execute`, `workflow_run` |\n| CrewAI | - | Agent 和 Tool 调用 |\n| LangGraph | - | Graph 节点执行 |\n\n### Span 命名规范\n\n遵循 OTel GenAI 语义约定生成规范化的 span 名称：\n\n| 操作类型 | Span 名称格式 | 示例 |\n|----------|---------------|------|\n| Agent 创建 | `create_agent {name}` | `create_agent my_agent` |\n| Agent 调用 | `invoke_agent {name}` | `invoke_agent my_agent` |\n| Tool 执行 | `execute_tool {name}` | `execute_tool calculator` |\n| Workflow | `invoke_workflow {name}` | `invoke_workflow pipeline` |\n\n资料来源：[sdk/python/src/openlit/instrumentation/agent_framework/utils.py:1-60]()\n\n### 语义约定属性\n\n所有 span 遵循 `gen_ai.*` 语义约定：\n\n| 属性键 | 说明 | 示例值 |\n|--------|------|--------|\n| `gen_ai.operation.name` | 操作类型 | `invoke_agent`, `execute_tool` |\n| `gen_ai.operation.type` | 操作分类 | `agent`, `tool` |\n| `gen_ai.system` | AI 系统 | `openai`, `anthropic`, `google.adk` |\n| `gen_ai.provider.name` | 提供商名称 | `google` |\n| `gen_ai.tool.name` | 工具名称 | `calculator` |\n| `gen_ai.tool.type` | 工具类型 | `function` |\n| `gen_ai.tool.description` | 工具描述 | Truncated 描述文本 |\n| `gen_ai.tool.call.arguments` | 工具调用参数 | JSON 字符串 |\n\n资料来源：[sdk/python/src/openlit/instrumentation/google_adk/utils.py:1-50]()\n\n## Guard 系统\n\nOpenLIT 提供生产级 guardrails 用于 LLM 应用安全：\n\n```python\nimport openlit\n\nopenlit.init(guards=[openlit.PII(action=\"redact\")])\n```\n\n### 可用 Guard 类型\n\n| Guard 类 | 位置 | 功能 |\n|----------|------|------|\n| `PII` | `openlit.guard.pii` | 个人身份信息检测和脱敏 |\n| `PromptInjection` | `openlit.guard.prompt_injection` | 提示注入攻击检测 |\n| `SensitiveTopic` | `openlit.guard.sensitive_topic` | 敏感话题检测 |\n| `TopicRestriction` | `openlit.guard.topic_restriction` | 话题限制 |\n| `Moderation` | `openlit.guard.moderation` | 内容审核 |\n| `Schema` | `openlit.guard.schema` | 输出结构验证 |\n| `Custom` | `openlit.guard.custom` | 自定义 guard 逻辑 |\n\n### Guard 核心类型\n\n```python\nfrom openlit.guard import (\n    Guard,\n    GuardAction,\n    GuardConfigError,\n    GuardDeniedError,\n    GuardPhase,\n    GuardResult,\n    GuardTimeoutError,\n    PipelineResult,\n)\n```\n\n| 类型 | 说明 |\n|------|------|\n| `Guard` | Base guard 类 |\n| `GuardAction` | Guard 执行动作 |\n| `GuardPhase` | 执行阶段（pre/post） |\n| `GuardResult` | Guard 执行结果 |\n| `PipelineResult` | Pipeline 聚合结果 |\n\n资料来源：[sdk/python/src/openlit/guard/__init__.py:1-60]()\n\n### Pipeline 机制\n\nGuard 使用 Pipeline 模式按序执行多个 guard：\n\n```python\nfrom openlit.guard import Pipeline\n\npipeline = Pipeline([\n    PII(action=\"redact\"),\n    PromptInjection(threshold=0.8),\n    Moderation()\n])\n```\n\n## Claude Agent SDK 插桩详解\n\n### 架构设计\n\n```mermaid\nsequenceDiagram\n    participant User as User Code\n    participant SDK as Claude Agent SDK\n    participant Wrap as wrap_query\n    participant Hook as _ToolSpanTracker\n    participant Span as OTel Span\n    \n    User->>SDK: query(...)\n    SDK->>Wrap: invoke wrapper\n    Wrap->>Span: create invoke_agent span\n    Wrap->>SDK: proceed with query\n    SDK->>Hook: PreToolUse event\n    Hook->>Span: create execute_tool span\n    SDK->>Hook: PostToolUse event\n    Hook->>Span: finalize tool span\n    SDK-->>Wrap: response\n    Wrap->>Span: finalize agent span\n    Wrap-->>User: return response\n```\n\n### Tool Span 追踪\n\n使用 `_ToolSpanTracker` 管理 in-flight tool spans：\n\n```python\nclass _ToolSpanTracker:\n    \"\"\"Manages in-flight tool spans created by SDK hooks.\"\"\"\n    \n    def __init__(\n        self,\n        tracer,\n        parent_span,\n        version,\n        environment,\n        application_name,\n        capture_message_content\n    ):\n        # 初始化追踪器\n```\n\n### Fallback 机制\n\n当 SDK hooks 无法注入时，使用消息流回退方案：\n\n```python\n# 检查 hooks 是否已注入\nif hasattr(client, _HOOKS_INJECTED_ATTR):\n    # 使用 hooks 追踪\nelse:\n    # 使用消息流追踪\n```\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/claude_agent_sdk.py:1-80]()\n\n## OpenTelemetry 集成\n\n### Tracing 实现\n\nSDK 使用 OpenTelemetry Python API 创建 spans：\n\n```python\nfrom opentelemetry import trace as trace_api\nfrom opentelemetry.trace import SpanKind, Status, StatusCode\n\ntracer = trace.get_tracer(__name__)\n\nwith tracer.start_as_current_span(\n    name=\"invoke_agent\",\n    kind=SpanKind.CLIENT\n) as span:\n    span.set_attribute(...)\n    # 执行操作\n```\n\n### Metrics 实现\n\n支持以下指标类型：\n\n| 指标类型 | 指标名称 | 说明 |\n|----------|----------|------|\n| Counter | `gen_ai.*.token_usage` | Token 使用计数 |\n| Histogram | `gen_ai.*.duration` | 请求耗时分布 |\n| Gauge | - | 当前活跃请求数 |\n\n### 语义约定常量\n\n所有语义约定常量集中定义在 `openlit.semcov` 模块：\n\n```python\nclass SemanticConvention:\n    GEN_AI_OPERATION = \"gen_ai.operation.name\"\n    GEN_AI_SYSTEM = \"gen_ai.system\"\n    GEN_AI_TOOL_NAME = \"gen_ai.tool.name\"\n    GEN_AI_TOOL_TYPE = \"gen_ai.tool.type\"\n    GEN_AI_SYSTEM_VALUE = \"gen_ai.system.openai\"\n```\n\n## 错误处理\n\n### Exception 传播\n\nSDK 使用统一的异常处理机制：\n\n```python\nfrom openlit.__helpers import handle_exception\n\ndef some_wrapper(func, *args, **kwargs):\n    try:\n        return func(*args, **kwargs)\n    except Exception as e:\n        handle_exception(span, e)\n        raise\n```\n\n### Guard 特定错误\n\n| 错误类型 | 说明 |\n|----------|------|\n| `GuardError` | 基础 guard 错误 |\n| `GuardDeniedError` | Guard 拒绝请求 |\n| `GuardTimeoutError` | Guard 执行超时 |\n| `GuardConfigError` | Guard 配置错误 |\n\n## 使用示例\n\n### 基础集成\n\n```python\nfrom openai import OpenAI\nimport openlit\n\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n\nclient = OpenAI(api_key=\"YOUR_OPENAI_KEY\")\n\nchat_completion = client.chat.completions.create(\n    messages=[{\"role\": \"user\", \"content\": \"Hello!\"}],\n    model=\"gpt-3.5-turbo\"\n)\n```\n\n### 带 Guard 的集成\n\n```python\nimport openlit\nfrom openlit.guard import PII, PromptInjection\n\nopenlit.init(\n    otlp_endpoint=\"http://127.0.0.1:4318\",\n    guards=[\n        PII(action=\"redact\"),\n        PromptInjection(threshold=0.7)\n    ]\n)\n```\n\n### 环境变量配置\n\n```bash\nexport OTEL_EXPORTER_OTLP_ENDPOINT=\"http://127.0.0.1:4318\"\n```\n\n```python\nimport openlit\n\nopenlit.init()  # 自动读取环境变量\n```\n\n## 扩展开发\n\n### 自定义 Instrumentor\n\n```python\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom wrapt import wrap_function_wrapper\n\nclass CustomSDKInstrumentor(BaseInstrumentor):\n    def instrumentation_dependencies(self):\n        return (\"custom-sdk >= 1.0.0\",)\n    \n    def _instrument(self, **kwargs):\n        tracer = kwargs.get(\"tracer\")\n        wrap_function_wrapper(\n            \"custom_sdk\",\n            \"Client.query\",\n            wrap_custom_query\n        )\n```\n\n### 自定义 Guard\n\n```python\nfrom openlit.guard import Guard, GuardAction, GuardResult\n\nclass CustomGuard(Guard):\n    def _evaluate(self, text: str) -> GuardResult:\n        # 自定义检测逻辑\n        if \"forbidden\" in text.lower():\n            return GuardResult(\n                action=GuardAction.DENY,\n                reason=\"Forbidden content detected\"\n            )\n        return GuardResult(action=GuardAction.ALLOW)\n\n---\n\n<a id='typescript-sdk'></a>\n\n## TypeScript SDK Architecture\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk), [Go SDK Architecture](#go-sdk), [LLM and Framework Integrations](#integrations)\n\n<details>\n<summary>Related Source Files</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/typescript/src/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/index.ts)\n- [sdk/typescript/src/config.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/config.ts)\n- [sdk/typescript/src/instrumentation/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/index.ts)\n- [sdk/typescript/src/guard/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/guard/index.ts)\n- [sdk/typescript/package.json](https://github.com/openlit/openlit/blob/main/sdk/typescript/package.json)\n\n</details>\n\n# TypeScript SDK Architecture\n\n## Overview\n\nThe OpenLIT TypeScript SDK provides an OpenTelemetry-native observability solution for GenAI and LLM applications. It enables developers to instrument their TypeScript/JavaScript applications with automatic tracing and metrics collection, forwarding telemetry data to OpenLIT or any OTLP-compatible backend.\n\n**Key Characteristics:**\n\n| Attribute | Value |\n|-----------|-------|\n| Package Name | `openlit` |\n| Installation | `npm install openlit` |\n| Entry Point | `sdk/typescript/src/index.ts` |\n| Primary Dependency | OpenTelemetry SDK |\n| Transport Protocol | OTLP (OpenTelemetry Protocol) |\n\n资料来源：[sdk/typescript/package.json](https://github.com/openlit/openlit/blob/main/sdk/typescript/package.json)\n\n## Core Architecture\n\nThe SDK follows a modular architecture with clear separation of concerns:\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[openlit.init]\n    B --> C[Config Module]\n    C --> D[Instrumentation Module]\n    D --> E[Guard Module]\n    E --> F[OTLP Exporter]\n    F --> G[OpenLIT Backend / OTEL Collector]\n    \n    C --> C1[OTLP Endpoint]\n    C --> C2[Custom Attributes]\n    C --> C3[Service Name]\n    \n    D --> D1[LLM Instrumentation]\n    D --> D2[Vector DB Instrumentation]\n    D --> D3[Framework Hooks]\n```\n\n### Entry Point Module\n\nThe main entry point (`index.ts`) exposes a simple initialization API:\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n资料来源：[sdk/typescript/src/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/index.ts)\n\n### Configuration Module\n\nThe config module (`config.ts`) handles SDK configuration including:\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `otlpEndpoint` | `string` | Environment variable `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP-compatible endpoint URL |\n| `serviceName` | `string` | Application-defined | Name of the instrumented service |\n| `resourceAttributes` | `Record<string, string>` | `{}` | Custom resource attributes |\n\n资料来源：[sdk/typescript/src/config.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/config.ts)\n\n## Instrumentation Subsystem\n\nThe instrumentation module (`instrumentation/index.ts`) provides automatic observability for AI workloads:\n\n### Supported Integrations\n\n| Category | Instrumented Components |\n|----------|-------------------------|\n| LLM Providers | OpenAI, Anthropic, Azure OpenAI, Google AI, AWS Bedrock, Cohere, Ollama |\n| Vector Databases | ChromaDB, Pinecone, Weaviate, Qdrant, Milvus, PGVector |\n| Frameworks | LangChain, LlamaIndex, LangFlow, AutoGen |\n\n资料来源：[sdk/typescript/src/instrumentation/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/index.ts)\n\n### Tracing Capabilities\n\nThe SDK automatically captures:\n\n- **LLM Request/Response traces** with prompt and completion data\n- **Token usage metrics** (prompt tokens, completion tokens, total tokens)\n- **Latency measurements** for API calls\n- **Embeddings generation traces** with vector dimensions\n- **Tool/function calling traces** with parameters and results\n\n## Guard Module\n\nThe guard module (`guard/index.ts`) provides safety and compliance features:\n\n```typescript\nimport { openlit } from 'openlit';\n\n// Initialize with guardrails\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\nGuard capabilities include:\n\n- Input/output validation for LLM interactions\n- Content filtering hooks\n- Rate limiting enforcement\n- Custom rule application\n\n资料来源：[sdk/typescript/src/guard/index.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/guard/index.ts)\n\n## Initialization Flow\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant SDK as OpenLIT SDK\n    participant Config as Config Module\n    participant Inst as Instrumentation\n    participant OTEL as OTEL SDK\n    \n    App->>SDK: openlit.init(options)\n    SDK->>Config: Validate & merge config\n    Config->>Config: Check env vars\n    Config-->>SDK: Resolved config\n    SDK->>OTEL: Initialize OTEL SDK\n    SDK->>Inst: Register instrumentations\n    Inst->>OTEL: Add span processors\n    OTEL-->>SDK: Ready\n    SDK-->>App: Initialization complete\n```\n\n## Environment Variable Support\n\nThe SDK supports configuration via environment variables as an alternative to programmatic configuration:\n\n| Environment Variable | Description |\n|----------------------|-------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint URL |\n| `OTEL_SERVICE_NAME` | Service name for traces |\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:42](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n\n## Usage Patterns\n\n### Basic Initialization\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n### OpenAI Integration Example\n\n```typescript\nimport OpenAI from 'openai';\nimport openlit from 'openlit';\n\nopenlit.init({ otlpEndpoint: \"http://127.0.0.1:4318\" });\n\nconst client = new OpenAI({\n  apiKey: process.env.OPENAI_API_KEY\n});\n\nconst chatCompletion = await client.chat.completions.create({\n  messages: [{ role: 'user', content: 'What is LLM Observability?' }],\n  model: 'gpt-3.5-turbo',\n});\n```\n\n资料来源：[src/client/src/components/(playground)/getting-started/tracing/index.tsx:28-39](https://github.com/openlit/openlit/blob/main/src/client/src/components/(playground)/getting-started/tracing/index.tsx)\n\n## Package Dependencies\n\nKey dependencies in `package.json`:\n\n```json\n{\n  \"dependencies\": {\n    \"@opentelemetry/sdk-node\": \"^0.50.0\",\n    \"@opentelemetry/exporter-trace-otlp-http\": \"^0.50.0\",\n    \"@opentelemetry/resources\": \"^1.22.0\",\n    \"@opentelemetry/semantic-conventions\": \"^1.22.0\"\n  }\n}\n```\n\n资料来源：[sdk/typescript/package.json](https://github.com/openlit/openlit/blob/main/sdk/typescript/package.json)\n\n## Design Principles\n\n1. **Zero-Configuration Defaults**: The SDK works out-of-the-box with sensible defaults\n2. **OpenTelemetry Native**: Built on OTEL SDK for vendor-agnostic telemetry export\n3. **Automatic Instrumentation**: No code changes required for supported libraries\n4. **Environment Variable Fallback**: Configuration can be entirely environment-based\n5. **Minimal Footprint**: Instrumentation adds minimal latency overhead\n\n## Summary\n\nThe OpenLIT TypeScript SDK architecture provides a developer-friendly interface for adding observability to GenAI applications. By abstracting OpenTelemetry complexity and providing automatic instrumentation for popular LLM providers and vector databases, it enables comprehensive telemetry collection with minimal configuration. The SDK exports all data via OTLP, ensuring compatibility with OpenLIT's backend as well as any other OTEL-compatible observability platform.\n\n---\n\n<a id='go-sdk'></a>\n\n## Go SDK Architecture\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk), [TypeScript SDK Architecture](#typescript-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n- [sdk/go/go.mod](https://github.com/openlit/openlit/blob/main/sdk/go/go.mod)\n</details>\n\n# Go SDK Architecture\n\n## Overview\n\nThe OpenLIT Go SDK is a lightweight instrumentation library that enables observability for GenAI applications built with Go. It provides automatic tracing and metrics collection for LLM calls, supporting OpenAI and Anthropic providers out of the box. The SDK follows OpenTelemetry-native principles, allowing seamless integration with the OpenLIT observability platform.\n\n## Core Components\n\nThe Go SDK is organized into several key packages:\n\n| Component | Purpose |\n|-----------|---------|\n| `openlit` | Core initialization, configuration, and shutdown |\n| `openlit.Config` | Central configuration struct for SDK settings |\n| `openlit.EvaluateRule()` | Standalone rule engine evaluation function |\n| `instrumentation/openai` | OpenAI client instrumentation |\n| `instrumentation/anthropic` | Anthropic client instrumentation |\n\n## Initialization Flow\n\nThe SDK must be initialized before instrumenting any LLM clients. The initialization process configures the OTLP endpoint and establishes the connection to the OpenLIT backend.\n\n```go\nerr := openlit.Init(openlit.Config{\n    OtlpEndpoint:    \"http://127.0.0.1:4318\",\n    Environment:     \"production\",\n    ApplicationName: \"my-go-app\",\n})\nif err != nil {\n    log.Fatalf(\"Failed to initialize OpenLIT: %v\", err)\n}\ndefer openlit.Shutdown(context.Background())\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Configuration Options\n\nThe `openlit.Config` struct provides the following configuration parameters:\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `OtlpEndpoint` | `string` | OTLP collector endpoint (default: `http://127.0.0.1:4318`) |\n| `Environment` | `string` | Deployment environment name |\n| `ApplicationName` | `string` | Application identifier for grouping traces |\n| `PricingInfo` | `map[string]ModelPricing` | Custom pricing configuration per model |\n| `OtlpHeaders` | `map[string]string` | Custom headers for OTLP exports |\n\n### Custom Pricing Configuration\n\nThe SDK supports custom pricing information for models that require non-default cost calculations:\n\n```go\nconfig := openlit.Config{\n    PricingInfo: map[string]openlit.ModelPricing{\n        \"gpt-4-custom\": {\n            InputCostPerToken:  0.00003,\n            OutputCostPerToken: 0.00006,\n        },\n    },\n}\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n### Custom Headers for OTLP Exports\n\nAuthentication and custom headers can be added to OTLP exports:\n\n```go\nconfig := openlit.Config{\n    OtlpHeaders: map[string]string{\n        \"Authorization\": \"Bearer token\",\n        \"X-Custom-Header\": \"value\",\n    },\n}\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Instrumentation Architecture\n\nThe SDK uses a decorator/wrapper pattern for instrumenting LLM clients. This approach allows automatic tracing without modifying the original client interface.\n\n```mermaid\ngraph TD\n    A[User Application] --> B[Instrumented Client]\n    B --> C[Original SDK Client]\n    B --> D[OpenLIT Tracer]\n    D --> E[OTLP Exporter]\n    E --> F[OpenLIT Backend]\n    C --> G[LLM Provider API]\n    G --> C\n```\n\n### OpenAI Instrumentation\n\nThe OpenAI instrumentation wraps the `sashabaranov/go-openai` client:\n\n```go\nimport (\n    \"github.com/openlit/openlit/sdk/go/instrumentation/openai\"\n    openai_sdk \"github.com/sashabaranov/go-openai\"\n)\n\n// Create and instrument OpenAI client\nclient := openai_sdk.NewClient(\"your-api-key\")\ninstrumentedClient := openai.Instrument(client)\n\n// Use as normal - automatically traced!\nresp, err := instrumentedClient.CreateChatCompletion(ctx, openai_sdk.ChatCompletionRequest{\n    Model: openai_sdk.GPT4,\n    Messages: []openai_sdk.ChatCompletionMessage{\n        {\n            Role:    openai_sdk.ChatMessageRoleUser,\n            Content: \"Hello!\",\n        },\n    },\n})\n```\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n### Anthropic Instrumentation\n\nThe Anthropic instrumentation follows the same pattern:\n\n```go\nimport (\n    \"github.com/openlit/openlit/sdk/go/instrumentation/anthropic\"\n)\n\n// Create and instrument Anthropic client\nclient := anthropic.NewClient(\"your-api-key\")\ninstrumentedClient := anthropic.Instrument(client)\n```\n\n## Rule Engine Integration\n\nThe SDK provides a standalone rule evaluation function that does not require initialization:\n\n```go\n// EvaluateRule does NOT require openlit.Init()\nrules, err := openlit.EvaluateRule(ctx, &openlit.EvaluateRuleRequest{\n    TraceAttributes: attributes,\n})\n```\n\nThis function evaluates trace attributes against the OpenLIT Rule Engine to retrieve matching rules and associated entities including contexts, prompts, and evaluation configurations.\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Integration with OpenLIT Dashboard\n\nThe complete observability workflow involves:\n\n1. **Start OpenLIT Stack**: Deploy using Docker Compose\n   ```bash\n   docker compose up -d\n   ```\n\n2. **Configure SDK**: Initialize the Go SDK with the OTLP endpoint\n   ```go\n   openlit.Init(openlit.Config{\n       OtlpEndpoint: \"http://localhost:4318\",\n   })\n   ```\n\n3. **View Traces**: Access the dashboard at `http://localhost:3000`\n\n资料来源：[sdk/go/README.md](https://github.com/openlit/openlit/blob/main/sdk/go/README.md)\n\n## Example Projects\n\nThe SDK includes complete working examples in the `examples/` directory:\n\n| Example | Path |\n|---------|------|\n| OpenAI Chat Completion | `examples/openai/chat/` |\n| OpenAI Streaming | `examples/openai/streaming/` |\n| Anthropic Messages | `examples/anthropic/messages/` |\n| Anthropic Streaming | `examples/anthropic/streaming/` |\n\n## Module Dependencies\n\nThe Go SDK depends on core OpenTelemetry packages for trace export and propagation:\n\n- OpenTelemetry OTLP exporter\n- OpenTelemetry trace propagation\n- Context propagation utilities\n\n资料来源：[sdk/go/go.mod](https://github.com/openlit/openlit/blob/main/sdk/go/go.mod)\n\n---\n\n<a id='integrations'></a>\n\n## LLM and Framework Integrations\n\n### 相关页面\n\n相关主题：[Python SDK Architecture](#python-sdk), [TypeScript SDK Architecture](#typescript-sdk)\n\n<details>\n<summary>Relevant Source Files</summary>\n\n以下源码文件用于生成本页说明：\n\n- [sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n- [sdk/python/src/openlit/instrumentation/llamaindex/utils.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/llamaindex/utils.py)\n- [sdk/python/src/openlit/_config.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/llamaindex/llamaindex.py)\n- [sdk/python/src/openlit/__helpers.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/__helpers.py)\n- [sdk/python/src/openlit/guard/__init__.py](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/guard/__init__.py)\n</details>\n\n# LLM and Framework Integrations\n\nOpenLIT provides comprehensive instrumentation for a wide range of LLMs and AI frameworks, enabling automatic OpenTelemetry-native observability for GenAI applications. This page documents the architecture, supported integrations, and implementation patterns.\n\n## Overview\n\nOpenLIT's instrumentation layer wraps SDK calls from various LLM providers and AI frameworks to automatically capture traces and metrics without requiring manual instrumentation code.\n\n### Supported Integrations\n\n| Category | Integration | Python SDK | TypeScript SDK | Go SDK |\n|----------|-------------|------------|----------------|--------|\n| **LLM Providers** | OpenAI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Anthropic | ✅ | ✅ | ✅ |\n| **LLM Providers** | Azure OpenAI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Vertex AI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Mistral AI | ✅ | ✅ | ✅ |\n| **LLM Providers** | Cohere | ✅ | ✅ | ✅ |\n| **LLM Providers** | HuggingFace | ✅ | ✅ | ✅ |\n| **AI Frameworks** | LangChain | ✅ | ✅ | - |\n| **AI Frameworks** | LlamaIndex | ✅ | - | - |\n| **AI Frameworks** | CrewAI | ✅ | - | - |\n| **AI Frameworks** | LangGraph | ✅ | - | - |\n| **AI Frameworks** | Claude Agent SDK | ✅ | - | - |\n| **Vector Stores** | Pinecone | ✅ | - | - |\n| **Vector Stores** | Chroma | ✅ | - | - |\n| **Vector Stores** | Qdrant | ✅ | - | - |\n| **Vector Stores** | Weaviate | ✅ | - | - |\n\n资料来源：[sdk/python/README.md](https://github.com/openlit/openlit/blob/main/sdk/python/README.md)\n\n## Architecture\n\n### Instrumentation Pattern\n\nAll instrumentations follow a consistent pattern based on OpenTelemetry's `BaseInstrumentor` class:\n\n```mermaid\ngraph TD\n    A[Application Code] --> B[Instrumented SDK]\n    B --> C[Wrapper Function]\n    C --> D[OpenTelemetry Tracer]\n    C --> E[Metrics Recorder]\n    D --> F[OTLP Exporter]\n    E --> F\n    F --> G[OpenLIT Backend]\n```\n\n### Core Components\n\n| Component | Purpose | Location |\n|-----------|---------|----------|\n| `BaseInstrumentor` | Base class for all instrumentors | `opentelemetry.instrumentation.instrumentor` |\n| `wrap_function_wrapper` | Wraps SDK functions dynamically | `wrapt` library |\n| `OpenlitConfig` | Singleton configuration management | `sdk/python/src/openlit/_config.py` |\n| Semantic Conventions | Standardized attribute naming | `openlit.semcov` module |\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:17-21](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n\n## Python SDK Instrumentation\n\n### Instrumentor Base Class\n\nAll Python SDK instrumentors extend `BaseInstrumentor` and implement two required methods:\n\n```python\nclass ClaudeAgentSDKInstrumentor(BaseInstrumentor):\n    \"\"\"OTel GenAI semantic convention compliant instrumentor for Claude Agent SDK.\"\"\"\n\n    def instrumentation_dependencies(self) -> Collection[str]:\n        return _instruments  # e.g., (\"claude-agent-sdk >= 0.1.0\",)\n\n    def _instrument(self, **kwargs):\n        # Initialize tracer, config, and wrap functions\n```\n\n资料来源：[sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py:26-35](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/claude_agent_sdk/__init__.py)\n\n### Initialization Parameters\n\nWhen calling `openlit.init()`, the following parameters are passed to all instrumentors:\n\n| Parameter | Type | Description | Default |\n|-----------|------|-------------|---------|\n| `environment` | `str` | Deployment environment name | `\"default\"` |\n| `application_name` | `str` | Application identifier | `\"default\"` |\n| `pricing_info` | `Dict[str, ModelPricing]` | Custom model pricing | `{}` |\n| `capture_message_content` | `bool` | Enable/disable content tracing | `True` |\n| `disable_metrics` | `bool` | Disable metrics collection | `None` |\n| `otlp_endpoint` | `str` | OTLP exporter endpoint | Configured endpoint |\n\n资料来源：[sdk/python/src/openlit/_config.py:20-35](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/_config.py)\n\n### OpenlitConfig Singleton\n\nThe `OpenlitConfig` class manages centralized configuration:\n\n```python\nclass OpenlitConfig:\n    \"\"\"Singleton configuration class for OpenLIT.\"\"\"\n    \n    _instance = None\n    \n    # Class-level attributes\n    environment = \"default\"\n    application_name = \"default\"\n    pricing_info = {}\n    metrics_dict = {}\n    otlp_endpoint = None\n    otlp_headers = None\n    disable_batch = False\n    capture_message_content = True\n```\n\n资料来源：[sdk/python/src/openlit/_config.py:18-42](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/_config.py)\n\n## LlamaIndex Integration\n\n### Operation Type Mapping\n\nThe LlamaIndex instrumentation uses a semantic convention-based operation mapping system:\n\n```mermaid\ngraph LR\n    A[Document Operations] --> B[RETRIEVE]\n    A --> C[FRAMEWORK]\n    D[Index Operations] --> C\n    E[Query Operations] --> B\n    F[Retriever Operations] --> B\n```\n\n### Supported Operations\n\n| Operation | Semantic Convention | Category |\n|-----------|---------------------|----------|\n| `document_load` | `RETRIEVE` | Document Loading |\n| `document_transform` | `FRAMEWORK` | Document Processing |\n| `document_split` | `FRAMEWORK` | Document Processing |\n| `index_construct` | `FRAMEWORK` | Index Management |\n| `index_insert` | `FRAMEWORK` | Index Management |\n| `query_engine_query` | `RETRIEVE` | Query Engine |\n| `retriever_retrieve` | `RETRIEVE` | Retrieval |\n\n资料来源：[sdk/python/src/openlit/instrumentation/llamaindex/utils.py:1-30](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/instrumentation/llamaindex/utils.py)\n\n## Helper Functions\n\n### Building Tool Definitions\n\nThe `__helpers.py` module provides utilities for extracting tool definitions from chat requests:\n\n```python\ndef build_tool_definitions(tools):\n    \"\"\"\n    Extract tool/function definitions from a chat request's ``tools`` parameter.\n    \n    Supports both OpenAI-style schema and flat schema formats.\n    \"\"\"\n```\n\nSupported formats:\n\n| Format | Structure |\n|--------|-----------|\n| OpenAI-style | `{\"type\": \"function\", \"function\": {...}}` |\n| Flat (dict) | `{\"name\": ..., \"description\": ..., \"parameters\": ...}` |\n| Flat (object) | Object with `name`, `description`, `input_schema` attributes |\n\n资料来源：[sdk/python/src/openlit/__helpers.py:1-40](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/__helpers.py)\n\n### System Instructions Builder\n\nExtracts and formats system instructions from various input formats:\n\n```python\ndef build_system_instructions(instructions, **kwargs):\n    \"\"\"Builds system instructions from various input formats.\"\"\"\n```\n\n## Guardrails Integration\n\nOpenLIT includes a production-grade guardrails system:\n\n### Available Guards\n\n| Guard Class | Purpose |\n|-------------|---------|\n| `PII` | Detect and redact Personally Identifiable Information |\n| `PromptInjection` | Detect prompt injection attacks |\n| `SensitiveTopic` | Filter sensitive topics |\n| `TopicRestriction` | Restrict to allowed topics |\n| `Moderation` | Content moderation |\n| `Schema` | Output schema validation |\n| `Custom` | Custom guard implementation |\n\n资料来源：[sdk/python/src/openlit/guard/__init__.py:1-30](https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/guard/__init__.py)\n\n### Guard Architecture\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Pipeline]\n    B --> C[Guard 1: PII]\n    C --> D[Guard 2: PromptInjection]\n    D --> E[Guard N: Custom]\n    E --> F[GuardResult]\n    C -.->|Denied| G[GuardDeniedError]\n    D -.->|Timeout| H[GuardTimeoutError]\n```\n\n### Usage Example\n\n```python\nimport openlit\n\n# Initialize with guards\nopenlit.init(guards=[openlit.PII(action=\"redact\")])\n\n# Or with direct imports\nfrom openlit import PII, PromptInjection, Moderation\n\nguards = [PII(), PromptInjection(), Moderation()]\nopenlit.init(guards=guards)\n```\n\n## TypeScript SDK Instrumentation\n\n### Wrapper Pattern\n\nThe TypeScript SDK uses a similar wrapping pattern:\n\n```typescript\n// Wrapped in wrapper.ts for each integration\nexport function wrapOpenAI() {\n  // Wrap OpenAI SDK methods\n}\n```\n\n资料来源：[sdk/typescript/src/instrumentation/openai/wrapper.ts](https://github.com/openlit/openlit/blob/main/sdk/typescript/src/instrumentation/openai/wrapper.ts)\n\n### Initialization\n\n```typescript\nimport openlit from 'openlit';\n\nopenlit.init({\n  otlpEndpoint: \"http://127.0.0.1:4318\"\n});\n```\n\n## Configuration Reference\n\n### Environment Variables\n\n| Variable | Description | Example |\n|----------|-------------|---------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint URL | `http://127.0.0.1:4318` |\n| `OTEL_EXPORTER_OTLP_HEADERS` | Authentication headers | `Authorization=Bearer token` |\n\n### SDK Configuration Options\n\n```python\nimport openlit\n\nopenlit.init(\n    otlp_endpoint=\"http://127.0.0.1:4318\",\n    otlp_headers={\"Authorization\": \"Bearer token\"},\n    environment=\"production\",\n    application_name=\"my-llm-app\",\n    pricing_info={\n        \"gpt-4\": {\"input_cost_per_token\": 0.00003, \"output_cost_per_token\": 0.00006}\n    },\n    capture_message_content=True\n)\n```\n\n## Best Practices\n\n### 1. Instrument Before Usage\n\nAlways initialize OpenLIT before importing instrumented SDKs:\n\n```python\n# Correct order\nimport openlit\nopenlit.init(otlp_endpoint=\"http://127.0.0.1:4318\")\n\nfrom openai import OpenAI  # Now automatically instrumented\n```\n\n### 2. Custom Pricing\n\nDefine custom pricing for accurate cost tracking:\n\n```python\nopenlit.init(\n    pricing_info={\n        \"custom-model\": {\n            \"input_cost_per_token\": 0.00001,\n            \"output_cost_per_token\": 0.00002\n        }\n    }\n)\n```\n\n### 3. Selective Content Capture\n\nDisable content capture for sensitive data:\n\n```python\nopenlit.init(\n    capture_message_content=False  # Won't trace message content\n)\n```\n\n## See Also\n\n- [OpenLIT Python SDK Documentation](https://github.com/openlit/openlit/tree/main/sdk/python)\n- [OpenLIT TypeScript SDK Documentation](https://github.com/openlit/openlit/tree/main/sdk/typescript)\n- [OpenLIT Go SDK Documentation](https://github.com/openlit/openlit/tree/main/sdk/go)\n- [OpenTelemetry Semantic Conventions](https://opentelemetry.io/docs/specs/otel/trace/semantic_conventions/)\n\n---\n\n<a id='controller'></a>\n\n## OpenLIT Controller\n\n### 相关页面\n\n相关主题：[GPU Collector](#gpu-collector)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [openlit-controller/cmd/controller/main.go](https://github.com/openlit/openlit/blob/main/openlit-controller/cmd/controller/main.go)\n- [openlit-controller/internal/engine/engine.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/engine/engine.go)\n- [openlit-controller/internal/engine/lifecycle.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/engine/lifecycle.go)\n- [openlit-controller/internal/engine/python_sdk_runtime.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/engine/python_sdk_runtime.go)\n- [openlit-controller/internal/server/handlers.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/server/handlers.go)\n- [openlit-controller/internal/scanner/scanner.go](https://github.com/openlit/openlit/blob/main/openlit-controller/internal/scanner/scanner.go)\n</details>\n\n# OpenLIT Controller\n\nThe OpenLIT Controller is a standalone, lightweight binary agent designed to automatically instrument Python-based LLM applications with OpenLIT's observability SDK. It operates as a background service that runs alongside your application, providing seamless OpenTelemetry-native tracing and metrics collection without requiring code modifications.\n\n## Overview\n\nThe Controller serves as an autonomous agent that:\n\n- **Discovers** Python applications running in various environments (bare metal, containers, Kubernetes)\n- **Injects** the OpenLIT Python SDK into target applications at runtime\n- **Manages** the lifecycle of instrumentation (enable, disable, status monitoring)\n- **Reports** service metadata back to the OpenLIT platform\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:1-60]()\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[OpenLIT Platform] -->|Manage & Monitor| B[OpenLIT Controller]\n    B -->|Discover Services| C[Scanner Module]\n    B -->|Instrument Apps| D[Engine Module]\n    D -->|Python SDK Injection| E[Python Runtime]\n    E -->|Traces & Metrics| F[OpenTelemetry Collector]\n    \n    G[Kubernetes Pod] -->|Contains| H[Python Application]\n    H -->|Auto-instrumented by| D\n    \n    I[Linux Host] -->|Systemd Service| B\n```\n\n### Core Components\n\n| Component | Location | Responsibility |\n|-----------|----------|----------------|\n| **cmd/controller** | `cmd/controller/main.go` | Entry point, configuration, signal handling |\n| **Server** | `internal/server/handlers.go` | HTTP API for platform communication |\n| **Engine** | `internal/engine/engine.go` | Orchestrates instrumentation operations |\n| **Lifecycle** | `internal/engine/lifecycle.go` | Manages enable/disable transitions |\n| **Python SDK Runtime** | `internal/engine/python_sdk_runtime.go` | Runtime injection of Python SDK |\n| **Scanner** | `internal/scanner/scanner.go` | Discovers Python applications |\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:1-25]()\n\n## Supported Environments\n\nThe Controller supports multiple deployment scenarios:\n\n| Environment | Installation Method | Status |\n|-------------|---------------------|--------|\n| **Linux (systemd)** | Direct binary download + systemd service | ✅ Primary |\n| **Docker** | Privileged container with PID host mode | ✅ Supported |\n| **Kubernetes** | DaemonSet or sidecar pattern | ✅ Supported |\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:1-50]()\n\n## Installation\n\n### Linux (systemd)\n\nDownload the latest binary and configure as a systemd service:\n\n```bash\ncurl -fsSL https://github.com/openlit/openlit/releases/latest/download/openlit-controller-linux-amd64 \\\n  -o /usr/local/bin/openlit-controller\nchmod +x /usr/local/bin/openlit-controller\n\n# Create systemd service\ncat > /etc/systemd/system/openlit-controller.service << 'EOF'\n[Unit]\nDescription=OpenLIT Controller\nAfter=network.target\n\n[Service]\nEnvironment=\"OPENLIT_URL=${openlitUrl}\"\nEnvironment=\"OTEL_EXPORTER_OTLP_ENDPOINT=${openlitUrl.replace(/:\\d+$/, \":4318\")}\"\nEnvironment=\"OPENLIT_API_KEY=${apiKey}\"\nExecStart=/usr/local/bin/openlit-controller\nRestart=always\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsystemctl daemon-reload\nsystemctl enable --now openlit-controller\n```\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:10-35]()\n\n### Docker\n\n```bash\ndocker run -d --privileged --pid=host \\\n  -e OPENLIT_URL=http://openlit:3000 \\\n  -e OTEL_EXPORTER_OTLP_ENDPOINT=http://openlit:4318 \\\n  openlit-controller\n```\n\n## Configuration\n\nThe Controller is configured via environment variables:\n\n| Environment Variable | Description | Required |\n|---------------------|-------------|----------|\n| `OPENLIT_URL` | URL of the OpenLIT platform | Yes |\n| `OPENLIT_API_KEY` | API key for authentication | No |\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP endpoint for telemetry | Yes |\n\n资料来源：[src/client/src/app/(playground)/agents/no-controller.tsx:15-25]()\n\n## Agent Operations\n\nThe Controller exposes three primary operations:\n\n### Enable Instrumentation\n\nActivates OpenLIT SDK injection for target Python applications.\n\n```json\n{\n  \"operation\": \"enable\",\n  \"serviceId\": \"string\"\n}\n```\n\n### Disable Instrumentation\n\nDeactivates SDK injection and removes runtime hooks.\n\n```json\n{\n  \"operation\": \"disable\",\n  \"serviceId\": \"string\"\n}\n```\n\n### Status Check\n\nRetrieves current instrumentation state for a service.\n\n```json\n{\n  \"operation\": \"status\",\n  \"serviceId\": \"string\"\n}\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:25-45]()\n\n## Service State Model\n\n```mermaid\nstateDiagram-v2\n    [*] --> disabled: Initial State\n    disabled --> enabled: enable operation\n    enabled --> disabled: disable operation\n    enabled --> manual: explicit override\n    manual --> enabled: resume auto\n    disabled --> manual: partial config\n    manual --> disabled: full removal\n```\n\n### State Definitions\n\n| State | Description |\n|-------|-------------|\n| `enabled` | SDK actively injecting traces |\n| `disabled` | No instrumentation active |\n| `manual` | User-controlled state (not auto-managed) |\n| `automatable` | Service eligible for auto-instrumentation |\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:15-30]()\n\n## Python SDK Runtime Integration\n\nThe Controller's Python SDK Runtime module handles the actual SDK injection:\n\n1. **Process Discovery**: Identifies Python processes running user applications\n2. **Runtime Injection**: Injects OpenLIT SDK using Python's import hooks\n3. **Configuration Propagation**: Sets OTLP endpoint and API keys via environment\n4. **Health Monitoring**: Ensures instrumentation remains active\n\nThe runtime is specifically optimized for **Python-only** services:\n\n```typescript\nsupported: service.language_runtime === \"python\"\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:20]()\n\n## Kubernetes Integration\n\nWhen running in Kubernetes, the Controller respects workload metadata:\n\n| Attribute | Description |\n|-----------|-------------|\n| `k8s.workload.kind` | Workload type (Deployment, StatefulSet, etc.) |\n| `service.service_name` | Name of the service |\n| `service.namespace` | Kubernetes namespace |\n\n### Naked Pod Handling\n\nThe Controller automatically detects and handles \"naked pods\" (pods without a workload controller):\n\n```typescript\nconst isNakedPod = mode === \"kubernetes\" && (!workloadKind || workloadKind === \"Pod\");\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:8-12]()\n\n## Validation\n\nOperations are validated before execution:\n\n```typescript\nvalidatePayload(operation: string, _payload: Record<string, unknown>) {\n    if (\n        operation !== \"enable\" &&\n        operation !== \"disable\" &&\n        operation !== \"status\"\n    ) {\n        return `Unknown operation \"${operation}\" for feature \"${FEATURE}\". \n                Expected \"enable\", \"disable\", or \"status\".`;\n    }\n    return null;\n}\n```\n\n资料来源：[src/client/src/lib/platform/controller/features/agent.ts:28-40]()\n\n## Summary\n\nThe OpenLIT Controller is a critical component for zero-code instrumentation of Python LLM applications. It provides:\n\n- **Automated Discovery**: Scans and identifies Python services automatically\n- **Runtime Injection**: Injects observability SDK without application restarts\n- **Multi-Platform Support**: Works on Linux, Docker, and Kubernetes\n- **Platform Integration**: Connects to OpenLIT platform for centralized management\n- **Lifecycle Management**: Full control over enable/disable operations\n\n---\n\n<a id='gpu-collector'></a>\n\n## GPU Collector\n\n### 相关页面\n\n相关主题：[OpenLIT Controller](#controller), [System Architecture](#architecture)\n\n<details>\n<summary>Relevant Source Files</summary>\n\n以下源码文件用于生成本页说明：\n\n- [opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n- [opentelemetry-gpu-collector/cmd/collector/main.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/cmd/collector/main.go)\n- [opentelemetry-gpu-collector/internal/gpu/nvidia/nvidia.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/gpu/nvidia/nvidia.go)\n- [opentelemetry-gpu-collector/internal/gpu/amd/amd.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/gpu/amd/amd.go)\n- [opentelemetry-gpu-collector/internal/gpu/intel/intel.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/gpu/intel/intel.go)\n- [opentelemetry-gpu-collector/internal/ebpf/tracer.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/ebpf/tracer.go)\n- [opentelemetry-gpu-collector/internal/export/metrics.go](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/internal/export/metrics.go)\n</details>\n\n# GPU Collector\n\nThe **OpenTelemetry GPU Collector** (also referred to as `opentelemetry-gpu-collector`) is a specialized telemetry agent built and maintained by OpenLIT. It provides real-time GPU hardware telemetry collection for NVIDIA, AMD, and Intel GPUs, emitting metrics in compliance with the OpenTelemetry semantic conventions under the `hw.gpu.*` namespace.\n\n## Overview\n\nThe GPU Collector serves as a standalone service that monitors GPU hardware metrics and exports them via the OTLP protocol to any OpenTelemetry-compatible backend, including the OpenLIT observability platform.\n\n**Key Responsibilities:**\n\n- Collect GPU hardware telemetry from NVIDIA GPUs via NVML (NVIDIA Management Library)\n- Collect GPU hardware telemetry from AMD and Intel GPUs via `sysfs/hwmon` interfaces\n- Perform eBPF-based CUDA kernel tracing for detailed operation insights\n- Emit metrics following OpenTelemetry semantic conventions (`hw.gpu.*`)\n- Export metrics over OTLP for integration with observability platforms\n\n**License:** Apache-2.0\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Architecture\n\nThe GPU Collector follows a modular architecture with distinct internal components for GPU detection, metric collection, and telemetry export.\n\n```mermaid\ngraph TD\n    subgraph GPU Collector\n        A[main.go] --> B[GPU Detection Layer]\n        B --> C[NVIDIA Provider]\n        B --> D[AMD Provider]\n        B --> E[Intel Provider]\n        C --> F[NVML Interface]\n        D --> G[sysfs/hwmon]\n        E --> G\n        C --> H[Metrics Processor]\n        D --> H\n        E --> H\n        F --> H\n        G --> H\n        H --> I[eBPF Tracer]\n        H --> J[OTLP Exporter]\n        I --> J\n    end\n    \n    K[OpenTelemetry Backend] --> J\n    L[OpenLIT Dashboard] --> K\n```\n\n### Core Components\n\n| Component | Path | Purpose |\n|-----------|------|---------|\n| Entry Point | `cmd/collector/main.go` | Application initialization and configuration |\n| NVIDIA Provider | `internal/gpu/nvidia/nvidia.go` | NVML-based telemetry collection for NVIDIA GPUs |\n| AMD Provider | `internal/gpu/amd/amd.go` | sysfs/hwmon-based telemetry for AMD GPUs |\n| Intel Provider | `internal/gpu/intel/intel.go` | sysfs/hwmon-based telemetry for Intel GPUs |\n| eBPF Tracer | `internal/ebpf/tracer.go` | CUDA kernel tracing via eBPF |\n| Metrics Exporter | `internal/export/metrics.go` | OTLP metric export logic |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Supported Hardware and Vendors\n\nThe GPU Collector supports GPU telemetry collection from three major hardware vendors.\n\n### Vendor Support Matrix\n\n| Vendor | Collection Method | Status | Features |\n|--------|------------------|--------|----------|\n| **NVIDIA** | NVML (NVIDIA Management Library) | Done | Power, energy, clock, utilization, errors |\n| **AMD** | sysfs/hwmon | Done | Power, energy, clock, utilization |\n| **Intel** | sysfs/hwmon | Done | Power, clock, utilization* |\n\n*Intel support depends on driver (i915/Xe) and kernel version.\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n### Hardware Telemetry Features\n\n| Feature | Status |\n|---------|--------|\n| NVIDIA GPU hardware telemetry (NVML) | Done |\n| AMD GPU hardware telemetry (sysfs/hwmon) | Done |\n| Intel GPU hardware telemetry (sysfs/hwmon) | Done |\n| eBPF CUDA kernel tracing | Done |\n| OTel semantic convention compliance (`hw.gpu.*`) | Done |\n| Prometheus `/metrics` endpoint | Planned |\n| ROCm HIP tracing (AMD eBPF) | Planned |\n| Per-process GPU utilization (DRM fdinfo) | Planned |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Metrics Reference\n\nAll GPU metrics follow the OpenTelemetry semantic conventions with the `hw.gpu.*` prefix.\n\n### Metric Definitions\n\n| Metric Name | Type | Unit | Description | NVIDIA | AMD | Intel |\n|-------------|------|------|-------------|--------|-----|-------|\n| `hw.gpu.power.draw` | Gauge | W | Current power draw | Yes | Yes | Yes |\n| `hw.gpu.power.limit` | Gauge | W | Power limit/cap | Yes | Yes | Yes |\n| `hw.gpu.energy.consumed` | Counter | J | Cumulative energy consumed | Yes | Yes | Yes |\n| `hw.gpu.clock.graphics` | Gauge | MHz | Graphics/SM clock frequency | Yes | Yes | —* |\n| `hw.gpu.clock.memory` | Gauge | MHz | Memory clock frequency | Yes | Yes | — |\n| `hw.errors` | Counter | {error} | ECC and PCIe errors via `error.type` + `hw.type=gpu` | Yes | — | — |\n\n*Intel support depends on driver (i915/Xe) and kernel version.\n\n### Utilization Metrics\n\n| Metric | Extra Attribute | Values |\n|--------|-----------------|--------|\n| `hw.gpu.utilization` | `hw.gpu.task` | `general`, `encoder`, `decoder` |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Attributes\n\nAll GPU metrics include the following attributes for device identification and categorization.\n\n### Common Attributes\n\n| Attribute | Description | Example |\n|-----------|-------------|---------|\n| `hw.id` | Unique device identifier (required by spec) | `GPU-a1b2c3d4-...` |\n| `hw.name` | Product name | `NVIDIA A100-SXM4-80GB` |\n| `hw.vendor` | Vendor name | `nvidia`, `amd`, `intel` |\n| `gpu.index` | Device index | `0`, `1` |\n| `gpu.pci_address` | PCI bus address | `0000:01:00.0` |\n\n### Error Attributes\n\n| Attribute | Description |\n|-----------|-------------|\n| `error.type` | Type of hardware error |\n| `hw.type` | Set to `gpu` for GPU-specific errors |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Deployment Options\n\nThe GPU Collector can be deployed using multiple methods based on infrastructure requirements.\n\n### Docker\n\n```bash\ndocker run -d \\\n    --name otel-gpu-collector \\\n    --restart always \\\n    --gpus all \\\n    -e OTEL_SERVICE_NAME=my-gpu-app \\\n    -e OTEL_RESOURCE_ATTRIBUTES=deployment.environment=production \\\n    -e OTEL_EXPORTER_OTLP_ENDPOINT=\"http://otel-collector:4317\" \\\n    ghcr.io/openlit/otel-gpu-collector:latest\n```\n\n### Docker Compose\n\n```yaml\nservices:\n  otel-gpu-collector:\n    image: ghcr.io/openlit/otel-gpu-collector:latest\n    environment:\n      OTEL_SERVICE_NAME: my-app\n      OTEL_RESOURCE_ATTRIBUTES: \"deployment.environment=production\"\n      OTEL_EXPORTER_OTLP_ENDPOINT: \"http://otel-collector:4317\"\n    deploy:\n      resources:\n        reservations:\n          devices:\n            - driver: nvidia\n              count: all\n              capabilities: [gpu]\n    depends_on:\n      - otel-collector\n    restart: always\n```\n\n### Pre-built Binary\n\n```sh\n# Linux amd64\ncurl -L https://github.com/openlit/openlit/releases/latest/download/opentelemetry-gpu-collector-<version>-linux-amd64 \\\n    -o opentelemetry-gpu-collector\nchmod +x opentelemetry-gpu-collector\n\nOTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 ./opentelemetry-gpu-collector\n```\n\n### Build from Source\n\n```sh\ngit clone https://github.com/openlit/openlit.git\ncd openlit/opentelemetry-gpu-collector\nmake build\n./opentelemetry-gpu-collector\n```\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Configuration\n\nThe GPU Collector uses standard OpenTelemetry environment variables for configuration.\n\n### Configuration Variables\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `OTEL_EXPORTER_OTLP_ENDPOINT` | *(required)* | OTLP exporter endpoint |\n| `OTEL_SERVICE_NAME` | — | Service name for telemetry |\n| `OTEL_RESOURCE_ATTRIBUTES` | — | Additional resource attributes |\n\n资料来源：[opentelemetry-gpu-collector/README.md](https://github.com/openlit/openlit/blob/main/opentelemetry-gpu-collector/README.md)\n\n---\n\n## Data Flow\n\n```mermaid\ngraph LR\n    A[GPU Hardware] -->|NVML/sysfs| B[GPU Provider]\n    B -->|Raw Metrics| C[Metrics Processor]\n    D[eBPF Kernel Tracer] -->|Kernel Events| C\n    C -->|Structured Metrics| E[OTLP Exporter]\n    E -->|OTLP Protocol| F[OpenTelemetry Backend]\n    F --> G[OpenLIT Dashboard]\n```\n\n### Collection Pipeline\n\n1. **GPU Detection**: The collector detects available GPUs on the host system\n2. **Vendor-specific Collection**: Each GPU type uses its native interface:\n   - NVIDIA: NVML API calls\n   - AMD/Intel: Reading from `/sys/class/hwmon/`\n3. **Metric Processing**: Raw values are transformed into OpenTelemetry metric format\n4. **eBPF Enrichment**: CUDA kernel tracing data enriches the telemetry\n5. **OTLP Export**: Metrics are exported to the configured endpoint\n\n---\n\n## Integration with OpenLIT\n\nThe GPU Collector integrates seamlessly with the OpenLIT observability platform for GPU monitoring.\n\n```mermaid\ngraph TD\n    subgraph Collection Layer\n        A[GPU Collector] -->|OTLP|gRPC[OTLP gRPC]\n        A -->|OTLP|HTTP[OTLP HTTP]\n    end\n    \n    subgraph OpenLIT Stack\n        B[OpenLIT Backend] --> C[PostgreSQL]\n        B --> D[ClickHouse]\n        B --> E[Redis]\n    end\n    \n    gRPC --> B\n    HTTP --> B\n    B --> F[OpenLIT Dashboard:3000]\n```\n\n### Prerequisites\n\n1. Deploy the OpenLIT stack using Docker Compose:\n   ```bash\n   docker compose up -d\n   ```\n\n2. Configure the GPU Collector endpoint:\n   ```bash\n   OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 ./opentelemetry-gpu-collector\n   ```\n\n3. Access the OpenLIT Dashboard at `http://localhost:3000`\n\n资料来源：[src/client/src/app/(playground)/getting-started/page.tsx](https://github.com/openlit/openlit/blob/main/src/client/src/app/(playground)/getting-started/page.tsx)\n\n---\n\n## Project Structure\n\n```\nopentelemetry-gpu-collector/\n├── cmd/\n│   └── collector/\n│       └── main.go              # Application entry point\n├── internal/\n│   ├── gpu/\n│   │   ├── nvidia/\n│   │   │   └── nvidia.go        # NVIDIA GPU provider (NVML)\n│   │   ├── amd/\n│   │   │   └── amd.go           # AMD GPU provider (sysfs)\n│   │   └── intel/\n│   │       └── intel.go        # Intel GPU provider (sysfs)\n│   ├── ebpf/\n│   │   └── tracer.go           # eBPF CUDA kernel tracer\n│   └── export/\n│       └── metrics.go           # OTLP metrics exporter\n├── Dockerfile\n├── Makefile\n└── README.md\n```\n\n---\n\n## See Also\n\n- [OpenLIT Documentation](https://docs.openlit.io)\n- [OpenLIT GitHub Repository](https://github.com/openlit/openlit)\n- [OpenTelemetry Semantic Conventions - Hardware Metrics](https://opentelemetry.io/docs/specs/semconv/hardware-metrics/)\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：openlit/openlit\n\n摘要：发现 15 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：安装坑 - 来源证据：Integration: Governance and compliance signals for LLM observability。\n\n## 1. 安装坑 · 来源证据：Integration: Governance and compliance signals for LLM observability\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Integration: Governance and compliance signals for LLM observability\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_16e8a1979e4646f18ae6d36da1fd46fe | https://github.com/openlit/openlit/issues/1106 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 2. 安装坑 · 来源证据：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_9788255c9fb34a7eae64ba6413a52030 | https://github.com/openlit/openlit/issues/1186 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 安装坑 · 来源证据：[Bug]: Docker Image doesn't run on windows 64bit\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：[Bug]: Docker Image doesn't run on windows 64bit\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e25a08120daf4deb81b9193aeab1f929 | https://github.com/openlit/openlit/issues/786 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n\n## 4. 安装坑 · 来源证据：openlit-1.19.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：openlit-1.19.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0504e467960f4bbe919ff101c6a14d7b | https://github.com/openlit/openlit/releases/tag/openlit-1.19.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 5. 配置坑 · 来源证据：controller-0.2.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：controller-0.2.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_addec19eec37420da207487d5a685eaa | https://github.com/openlit/openlit/releases/tag/controller-0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 6. 配置坑 · 来源证据：openlit-1.20.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：openlit-1.20.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_217968c917e9426f9f8fbb4b50bebdb5 | https://github.com/openlit/openlit/releases/tag/openlit-1.20.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 7. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:747319327 | https://github.com/openlit/openlit | README/documentation is current enough for a first validation pass.\n\n## 8. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | last_activity_observed missing\n\n## 9. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n\n## 10. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n\n## 11. 安全/权限坑 · 来源证据：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_bfba0945570d4cbbaead1257e8f70dfe | https://github.com/openlit/openlit/issues/1135 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 12. 安全/权限坑 · 来源证据：openlit-1.19.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：openlit-1.19.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_b5088506959947828f2d740f9297d5b5 | https://github.com/openlit/openlit/releases/tag/openlit-1.19.1 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 13. 安全/权限坑 · 来源证据：py-1.41.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：py-1.41.2\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_ff3f4dfa2dc04616be73482b2145ac5c | https://github.com/openlit/openlit/releases/tag/py-1.41.2 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n\n## 14. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | issue_or_pr_quality=unknown\n\n## 15. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | release_recency=unknown\n\n<!-- canonical_name: openlit/openlit; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：openlit/openlit\n\n摘要：发现 15 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：安装坑 - 来源证据：Integration: Governance and compliance signals for LLM observability。\n\n## 1. 安装坑 · 来源证据：Integration: Governance and compliance signals for LLM observability\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Integration: Governance and compliance signals for LLM observability\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_16e8a1979e4646f18ae6d36da1fd46fe | https://github.com/openlit/openlit/issues/1106 | 来源类型 github_issue 暴露的待验证使用条件。\n\n## 2. 安装坑 · 来源证据：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：Proposal: gen_ai.agent.threat_detected span event helper for OTel-shaped detection observability\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源问题仍为 open，Pack Agent 需要复核是否仍影响当前版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_9788255c9fb34a7eae64ba6413a52030 | https://github.com/openlit/openlit/issues/1186 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 3. 安装坑 · 来源证据：[Bug]: Docker Image doesn't run on windows 64bit\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：[Bug]: Docker Image doesn't run on windows 64bit\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_e25a08120daf4deb81b9193aeab1f929 | https://github.com/openlit/openlit/issues/786 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n\n## 4. 安装坑 · 来源证据：openlit-1.19.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安装相关的待验证问题：openlit-1.19.0\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_0504e467960f4bbe919ff101c6a14d7b | https://github.com/openlit/openlit/releases/tag/openlit-1.19.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 5. 配置坑 · 来源证据：controller-0.2.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：controller-0.2.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_addec19eec37420da207487d5a685eaa | https://github.com/openlit/openlit/releases/tag/controller-0.2.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 6. 配置坑 · 来源证据：openlit-1.20.0\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个配置相关的待验证问题：openlit-1.20.0\n- 对用户的影响：可能影响升级、迁移或版本选择。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_217968c917e9426f9f8fbb4b50bebdb5 | https://github.com/openlit/openlit/releases/tag/openlit-1.20.0 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 7. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:747319327 | https://github.com/openlit/openlit | README/documentation is current enough for a first validation pass.\n\n## 8. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | last_activity_observed missing\n\n## 9. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n\n## 10. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:747319327 | https://github.com/openlit/openlit | no_demo; severity=medium\n\n## 11. 安全/权限坑 · 来源证据：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：Bug: OpenAI API key in operator example test-application is not using OPENAI_API_KEY env var\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_bfba0945570d4cbbaead1257e8f70dfe | https://github.com/openlit/openlit/issues/1135 | 来源讨论提到 python 相关条件，需在安装/试用前复核。\n\n## 12. 安全/权限坑 · 来源证据：openlit-1.19.1\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：openlit-1.19.1\n- 对用户的影响：可能增加新用户试用和生产接入成本。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_b5088506959947828f2d740f9297d5b5 | https://github.com/openlit/openlit/releases/tag/openlit-1.19.1 | 来源类型 github_release 暴露的待验证使用条件。\n\n## 13. 安全/权限坑 · 来源证据：py-1.41.2\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：GitHub 社区证据显示该项目存在一个安全/权限相关的待验证问题：py-1.41.2\n- 对用户的影响：可能影响授权、密钥配置或安全边界。\n- 建议检查：来源显示可能已有修复、规避或版本变化，说明书中必须标注适用版本。\n- 防护动作：不得脱离来源链接放大为确定性结论；需要标注适用版本和复核状态。\n- 证据：community_evidence:github | cevd_ff3f4dfa2dc04616be73482b2145ac5c | https://github.com/openlit/openlit/releases/tag/py-1.41.2 | 来源讨论提到 docker 相关条件，需在安装/试用前复核。\n\n## 14. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | issue_or_pr_quality=unknown\n\n## 15. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:747319327 | https://github.com/openlit/openlit | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# openlit - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for openlit/openlit.\n\nProject:\n- Name: openlit\n- Repository: https://github.com/openlit/openlit\n- Summary: Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.\n- Host target: local_cli\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: Open source platform for AI Engineering: OpenTelemetry-native LLM Observability, GPU Monitoring, Guardrails, Evaluations, Prompt Management, Vault, Playground. 🚀💻 Integrates with 50+ LLM Providers, VectorDBs, Agent Frameworks and GPUs.\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. overview: OpenLIT Overview. Produce one small intermediate artifact and wait for confirmation.\n2. quickstart: Quick Start Guide. Produce one small intermediate artifact and wait for confirmation.\n3. architecture: System Architecture. Produce one small intermediate artifact and wait for confirmation.\n4. data-flow: Data Flow and Management. Produce one small intermediate artifact and wait for confirmation.\n5. python-sdk: Python SDK Architecture. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/openlit/openlit\n- https://github.com/openlit/openlit#readme\n- README.md\n- src/README.md\n- docker-compose.yml\n- examples/linux/docker-compose.yaml\n- examples/kubernetes/setup.sh\n- src/client/src/lib/platform/clickhouse/clickhouse-client.ts\n- src/dev-docker-compose.yml\n- sdk/python/src/openlit/otel/tracing.py\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：openlit/openlit\n\n## 官方安装入口\n\n### Python / pip · 官方安装入口\n\n```bash\npip install openlit\n```\n\n来源：https://github.com/openlit/openlit#readme\n\n## 来源\n\n- repo: https://github.com/openlit/openlit\n- docs: https://github.com/openlit/openlit#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_bc021b9f126d4942b10ce3ce4d4df599"
}
